function_name
stringlengths 1
57
| function_code
stringlengths 20
4.99k
| documentation
stringlengths 50
2k
| language
stringclasses 5
values | file_path
stringlengths 8
166
| line_number
int32 4
16.7k
| parameters
listlengths 0
20
| return_type
stringlengths 0
131
| has_type_hints
bool 2
classes | complexity
int32 1
51
| quality_score
float32 6
9.68
| repo_name
stringclasses 34
values | repo_stars
int32 2.9k
242k
| docstring_style
stringclasses 7
values | is_async
bool 2
classes |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
write
|
@Override
public int write(ByteBuffer src) throws IOException {
if (state == State.CLOSING)
throw closingException();
if (!ready())
return 0;
int written = 0;
while (flush(netWriteBuffer) && src.hasRemaining()) {
netWriteBuffer.clear();
SSLEngineResult wrapResult = sslEngine.wrap(src, netWriteBuffer);
netWriteBuffer.flip();
// reject renegotiation if TLS < 1.3, key updates for TLS 1.3 are allowed
if (wrapResult.getHandshakeStatus() != HandshakeStatus.NOT_HANDSHAKING &&
wrapResult.getStatus() == Status.OK &&
!sslEngine.getSession().getProtocol().equals(TLS13)) {
throw renegotiationException();
}
if (wrapResult.getStatus() == Status.OK) {
written += wrapResult.bytesConsumed();
} else if (wrapResult.getStatus() == Status.BUFFER_OVERFLOW) {
// BUFFER_OVERFLOW means that the last `wrap` call had no effect, so we expand the buffer and try again
netWriteBuffer = Utils.ensureCapacity(netWriteBuffer, netWriteBufferSize());
netWriteBuffer.position(netWriteBuffer.limit());
} else if (wrapResult.getStatus() == Status.BUFFER_UNDERFLOW) {
throw new IllegalStateException("SSL BUFFER_UNDERFLOW during write");
} else if (wrapResult.getStatus() == Status.CLOSED) {
throw new EOFException();
}
}
return written;
}
|
Writes a sequence of bytes to this channel from the given buffer.
@param src The buffer from which bytes are to be retrieved
@return The number of bytes read from src, possibly zero, or -1 if the channel has reached end-of-stream
@throws IOException If some other I/O error occurs
|
java
|
clients/src/main/java/org/apache/kafka/common/network/SslTransportLayer.java
| 709
|
[
"src"
] | true
| 12
| 8.4
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
_correctness_check
|
def _correctness_check(provider_package: str, class_name: str, provider_info: ProviderInfo) -> Any:
"""
Perform coherence check on provider classes.
For apache-airflow providers - it checks if it starts with appropriate package. For all providers
it tries to import the provider - checking that there are no exceptions during importing.
It logs appropriate warning in case it detects any problems.
:param provider_package: name of the provider package
:param class_name: name of the class to import
:return the class if the class is OK, None otherwise.
"""
if not _check_builtin_provider_prefix(provider_package, class_name):
return None
try:
imported_class = import_string(class_name)
except AirflowOptionalProviderFeatureException as e:
# When the provider class raises AirflowOptionalProviderFeatureException
# this is an expected case when only some classes in provider are
# available. We just log debug level here and print info message in logs so that
# the user is aware of it
log_optional_feature_disabled(class_name, e, provider_package)
return None
except ImportError as e:
if "No module named 'airflow.providers." in e.msg:
# handle cases where another provider is missing. This can only happen if
# there is an optional feature, so we log debug and print information about it
log_optional_feature_disabled(class_name, e, provider_package)
return None
for known_error in KNOWN_UNHANDLED_OPTIONAL_FEATURE_ERRORS:
# Until we convert all providers to use AirflowOptionalProviderFeatureException
# we assume any problem with importing another "provider" is because this is an
# optional feature, so we log debug and print information about it
if known_error[0] == provider_package and known_error[1] in e.msg:
log_optional_feature_disabled(class_name, e, provider_package)
return None
# But when we have no idea - we print warning to logs
log_import_warning(class_name, e, provider_package)
return None
except Exception as e:
log_import_warning(class_name, e, provider_package)
return None
return imported_class
|
Perform coherence check on provider classes.
For apache-airflow providers - it checks if it starts with appropriate package. For all providers
it tries to import the provider - checking that there are no exceptions during importing.
It logs appropriate warning in case it detects any problems.
:param provider_package: name of the provider package
:param class_name: name of the class to import
:return the class if the class is OK, None otherwise.
|
python
|
airflow-core/src/airflow/providers_manager.py
| 287
|
[
"provider_package",
"class_name",
"provider_info"
] |
Any
| true
| 6
| 7.2
|
apache/airflow
| 43,597
|
sphinx
| false
|
getAnnotation
|
@SuppressWarnings("unchecked")
public <A extends Annotation> @Nullable A getAnnotation(Class<A> type) {
for (Annotation annotation : this.annotations) {
if (type.isInstance(annotation)) {
return (A) annotation;
}
}
return null;
}
|
Return a single associated annotations that could affect binding.
@param <A> the annotation type
@param type annotation type
@return the associated annotation or {@code null}
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/bind/Bindable.java
| 112
|
[
"type"
] |
A
| true
| 2
| 7.6
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
cancelJob
|
private void cancelJob() {
if (scheduler.get() != null) {
scheduler.get().remove(LIFECYCLE_JOB_NAME);
scheduledJob = null;
}
}
|
Records the provided error for the index in the error store and logs the error message at `ERROR` level if the error for the index
is different to what's already in the error store or if the same error was in the error store for a number of retries divible by
the provided signallingErrorRetryThreshold (i.e. we log to level `error` every signallingErrorRetryThreshold retries, if the error
stays the same)
This allows us to not spam the logs, but signal to the logs if DSL is not making progress.
|
java
|
modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java
| 1,556
|
[] |
void
| true
| 2
| 6.56
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
_set_order
|
def _set_order(X, y, order="C"):
"""Change the order of X and y if necessary.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Target values.
order : {None, 'C', 'F'}
If 'C', dense arrays are returned as C-ordered, sparse matrices in csr
format. If 'F', dense arrays are return as F-ordered, sparse matrices
in csc format.
Returns
-------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data with guaranteed order.
y : ndarray of shape (n_samples,)
Target values with guaranteed order.
"""
if order not in [None, "C", "F"]:
raise ValueError(
"Unknown value for order. Got {} instead of None, 'C' or 'F'.".format(order)
)
sparse_X = sparse.issparse(X)
sparse_y = sparse.issparse(y)
if order is not None:
sparse_format = "csc" if order == "F" else "csr"
if sparse_X:
X = X.asformat(sparse_format, copy=False)
else:
X = np.asarray(X, order=order)
if sparse_y:
y = y.asformat(sparse_format)
else:
y = np.asarray(y, order=order)
return X, y
|
Change the order of X and y if necessary.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Target values.
order : {None, 'C', 'F'}
If 'C', dense arrays are returned as C-ordered, sparse matrices in csr
format. If 'F', dense arrays are return as F-ordered, sparse matrices
in csc format.
Returns
-------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data with guaranteed order.
y : ndarray of shape (n_samples,)
Target values with guaranteed order.
|
python
|
sklearn/linear_model/_coordinate_descent.py
| 49
|
[
"X",
"y",
"order"
] | false
| 8
| 6.08
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
newConnections
|
private GraphConnections<N, V> newConnections() {
return isDirected()
? DirectedGraphConnections.<N, V>of(incidentEdgeOrder)
: UndirectedGraphConnections.<N, V>of(incidentEdgeOrder);
}
|
Adds {@code node} to the graph and returns the associated {@link GraphConnections}.
@throws IllegalStateException if {@code node} is already present
|
java
|
android/guava/src/com/google/common/graph/StandardMutableValueGraph.java
| 187
|
[] | true
| 2
| 6.08
|
google/guava
| 51,352
|
javadoc
| false
|
|
validateDurationByUnits
|
function validateDurationByUnits(durationString: string, timeUnits: string[]): boolean {
for (const value of durationString.trim().split(' ')) {
const match = value.match(/([0-9]*[.]?[0-9]+)(.+)/);
if (match === null || match.length !== 3) {
return false;
}
const isValidUnit = timeUnits.includes(match[2]);
if (!isValidUnit) {
return false;
}
}
return true;
}
|
isValidGrafanaDuration returns `true` if the given string can be parsed into a valid Duration object based on
the Grafana SDK's gtime.parseDuration, `false` otherwise.
Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h", "d", "w", "M", "y".
@see https://pkg.go.dev/github.com/grafana/grafana-plugin-sdk-go/backend/gtime#ParseDuration
@param durationString - string representation of a duration
@internal
|
typescript
|
packages/grafana-data/src/datetime/durationutil.ts
| 163
|
[
"durationString",
"timeUnits"
] | true
| 4
| 6.24
|
grafana/grafana
| 71,362
|
jsdoc
| false
|
|
beanOfType
|
public static <T> T beanOfType(ListableBeanFactory lbf, Class<T> type) throws BeansException {
Assert.notNull(lbf, "ListableBeanFactory must not be null");
Map<String, T> beansOfType = lbf.getBeansOfType(type);
return uniqueBean(type, beansOfType);
}
|
Return a single bean of the given type or subtypes, not looking in ancestor
factories. Useful convenience method when we expect a single bean and
don't care about the bean name.
<p>Does consider objects created by FactoryBeans, which means that FactoryBeans
will get initialized. If the object created by the FactoryBean doesn't match,
the raw FactoryBean itself will be matched against the type.
<p>This version of {@code beanOfType} automatically includes
prototypes and FactoryBeans.
@param lbf the bean factory
@param type the type of bean to match
@return the matching bean instance
@throws NoSuchBeanDefinitionException if no bean of the given type was found
@throws NoUniqueBeanDefinitionException if more than one bean of the given type was found
@throws BeansException if the bean could not be created
@see ListableBeanFactory#getBeansOfType(Class)
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/BeanFactoryUtils.java
| 472
|
[
"lbf",
"type"
] |
T
| true
| 1
| 6.72
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
leastOf
|
@SuppressWarnings("EmptyList") // ImmutableList doesn't support nullable element types
public <E extends T> List<E> leastOf(Iterator<E> iterator, int k) {
checkNotNull(iterator);
checkNonnegative(k, "k");
if (k == 0 || !iterator.hasNext()) {
return emptyList();
} else if (k >= Integer.MAX_VALUE / 2) {
// k is really large; just do a straightforward sorted-copy-and-sublist
ArrayList<E> list = Lists.newArrayList(iterator);
sort(list, this);
if (list.size() > k) {
list.subList(k, list.size()).clear();
}
list.trimToSize();
return unmodifiableList(list);
} else {
TopKSelector<E> selector = TopKSelector.least(k, this);
selector.offerAll(iterator);
return selector.topK();
}
}
|
Returns the {@code k} least elements from the given iterator according to this ordering, in
order from least to greatest. If there are fewer than {@code k} elements present, all will be
included.
<p>The implementation does not necessarily use a <i>stable</i> sorting algorithm; when multiple
elements are equivalent, it is undefined which will come first.
<p><b>Java 8+ users:</b> Use {@code Streams.stream(iterator).collect(Comparators.least(k,
thisComparator))} instead.
@return an immutable {@code RandomAccess} list of the {@code k} least elements in ascending
order
@throws IllegalArgumentException if {@code k} is negative
@since 14.0
|
java
|
android/guava/src/com/google/common/collect/Ordering.java
| 781
|
[
"iterator",
"k"
] | true
| 5
| 6.4
|
google/guava
| 51,352
|
javadoc
| false
|
|
fromType
|
public static ScramMechanism fromType(byte type) {
for (ScramMechanism scramMechanism : VALUES) {
if (scramMechanism.type == type) {
return scramMechanism;
}
}
return UNKNOWN;
}
|
@param type the type indicator
@return the instance corresponding to the given type indicator, otherwise {@link #UNKNOWN}
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/ScramMechanism.java
| 44
|
[
"type"
] |
ScramMechanism
| true
| 2
| 7.12
|
apache/kafka
| 31,560
|
javadoc
| false
|
memberId
|
public String memberId() {
return memberId;
}
|
@return Member ID that is generated at startup and remains unchanged for the entire lifetime of the process.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractMembershipManager.java
| 269
|
[] |
String
| true
| 1
| 6.8
|
apache/kafka
| 31,560
|
javadoc
| false
|
rot90
|
def rot90(m, k=1, axes=(0, 1)):
"""
Rotate an array by 90 degrees in the plane specified by axes.
Rotation direction is from the first towards the second axis.
This means for a 2D array with the default `k` and `axes`, the
rotation will be counterclockwise.
Parameters
----------
m : array_like
Array of two or more dimensions.
k : integer
Number of times the array is rotated by 90 degrees.
axes : (2,) array_like
The array is rotated in the plane defined by the axes.
Axes must be different.
Returns
-------
y : ndarray
A rotated view of `m`.
See Also
--------
flip : Reverse the order of elements in an array along the given axis.
fliplr : Flip an array horizontally.
flipud : Flip an array vertically.
Notes
-----
``rot90(m, k=1, axes=(1,0))`` is the reverse of
``rot90(m, k=1, axes=(0,1))``
``rot90(m, k=1, axes=(1,0))`` is equivalent to
``rot90(m, k=-1, axes=(0,1))``
Examples
--------
>>> import numpy as np
>>> m = np.array([[1,2],[3,4]], int)
>>> m
array([[1, 2],
[3, 4]])
>>> np.rot90(m)
array([[2, 4],
[1, 3]])
>>> np.rot90(m, 2)
array([[4, 3],
[2, 1]])
>>> m = np.arange(8).reshape((2,2,2))
>>> np.rot90(m, 1, (1,2))
array([[[1, 3],
[0, 2]],
[[5, 7],
[4, 6]]])
"""
axes = tuple(axes)
if len(axes) != 2:
raise ValueError("len(axes) must be 2.")
m = asanyarray(m)
if axes[0] == axes[1] or absolute(axes[0] - axes[1]) == m.ndim:
raise ValueError("Axes must be different.")
if (axes[0] >= m.ndim or axes[0] < -m.ndim
or axes[1] >= m.ndim or axes[1] < -m.ndim):
raise ValueError(f"Axes={axes} out of range for array of ndim={m.ndim}.")
k %= 4
if k == 0:
return m[:]
if k == 2:
return flip(flip(m, axes[0]), axes[1])
axes_list = arange(0, m.ndim)
(axes_list[axes[0]], axes_list[axes[1]]) = (axes_list[axes[1]],
axes_list[axes[0]])
if k == 1:
return transpose(flip(m, axes[1]), axes_list)
else:
# k == 3
return flip(transpose(m, axes_list), axes[1])
|
Rotate an array by 90 degrees in the plane specified by axes.
Rotation direction is from the first towards the second axis.
This means for a 2D array with the default `k` and `axes`, the
rotation will be counterclockwise.
Parameters
----------
m : array_like
Array of two or more dimensions.
k : integer
Number of times the array is rotated by 90 degrees.
axes : (2,) array_like
The array is rotated in the plane defined by the axes.
Axes must be different.
Returns
-------
y : ndarray
A rotated view of `m`.
See Also
--------
flip : Reverse the order of elements in an array along the given axis.
fliplr : Flip an array horizontally.
flipud : Flip an array vertically.
Notes
-----
``rot90(m, k=1, axes=(1,0))`` is the reverse of
``rot90(m, k=1, axes=(0,1))``
``rot90(m, k=1, axes=(1,0))`` is equivalent to
``rot90(m, k=-1, axes=(0,1))``
Examples
--------
>>> import numpy as np
>>> m = np.array([[1,2],[3,4]], int)
>>> m
array([[1, 2],
[3, 4]])
>>> np.rot90(m)
array([[2, 4],
[1, 3]])
>>> np.rot90(m, 2)
array([[4, 3],
[2, 1]])
>>> m = np.arange(8).reshape((2,2,2))
>>> np.rot90(m, 1, (1,2))
array([[[1, 3],
[0, 2]],
[[5, 7],
[4, 6]]])
|
python
|
numpy/lib/_function_base_impl.py
| 179
|
[
"m",
"k",
"axes"
] | false
| 12
| 7.76
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
emptyTopicList
|
public boolean emptyTopicList() {
return data.topics().isEmpty();
}
|
@return Builder for metadata request using topic IDs.
|
java
|
clients/src/main/java/org/apache/kafka/common/requests/MetadataRequest.java
| 103
|
[] | true
| 1
| 6.8
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
longValue
|
@Override
public long longValue() {
return value;
}
|
Returns the value of this MutableByte as a long.
@return the numeric value represented by this object after conversion to type long.
|
java
|
src/main/java/org/apache/commons/lang3/mutable/MutableByte.java
| 322
|
[] | true
| 1
| 6.48
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
logical_op
|
def logical_op(left: ArrayLike, right: Any, op) -> ArrayLike:
"""
Evaluate a logical operation `|`, `&`, or `^`.
Parameters
----------
left : np.ndarray or ExtensionArray
right : object
Cannot be a DataFrame, Series, or Index.
op : {operator.and_, operator.or_, operator.xor}
Or one of the reversed variants from roperator.
Returns
-------
ndarray or ExtensionArray
"""
def fill_bool(x, left=None):
# if `left` is specifically not-boolean, we do not cast to bool
if x.dtype.kind in "cfO":
# dtypes that can hold NA
mask = isna(x)
if mask.any():
x = x.astype(object)
x[mask] = False
if left is None or left.dtype.kind == "b":
x = x.astype(bool)
return x
right = lib.item_from_zerodim(right)
if is_list_like(right) and not hasattr(right, "dtype"):
# e.g. list, tuple
raise TypeError(
# GH#52264
"Logical ops (and, or, xor) between Pandas objects and dtype-less "
"sequences (e.g. list, tuple) are no longer supported. "
"Wrap the object in a Series, Index, or np.array "
"before operating instead.",
)
# NB: We assume extract_array has already been called on left and right
lvalues = ensure_wrapped_if_datetimelike(left)
rvalues = right
if should_extension_dispatch(lvalues, rvalues):
# Call the method on lvalues
res_values = op(lvalues, rvalues)
else:
if isinstance(rvalues, np.ndarray):
is_other_int_dtype = rvalues.dtype.kind in "iu"
if not is_other_int_dtype:
rvalues = fill_bool(rvalues, lvalues)
else:
# i.e. scalar
is_other_int_dtype = lib.is_integer(rvalues)
res_values = na_logical_op(lvalues, rvalues, op)
# For int vs int `^`, `|`, `&` are bitwise operators and return
# integer dtypes. Otherwise these are boolean ops
if not (left.dtype.kind in "iu" and is_other_int_dtype):
res_values = fill_bool(res_values)
return res_values
|
Evaluate a logical operation `|`, `&`, or `^`.
Parameters
----------
left : np.ndarray or ExtensionArray
right : object
Cannot be a DataFrame, Series, or Index.
op : {operator.and_, operator.or_, operator.xor}
Or one of the reversed variants from roperator.
Returns
-------
ndarray or ExtensionArray
|
python
|
pandas/core/ops/array_ops.py
| 400
|
[
"left",
"right",
"op"
] |
ArrayLike
| true
| 14
| 6.64
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
read
|
@Override
@CanIgnoreReturnValue
public int read() throws IOException {
int b = in.read();
if (b != -1) {
hasher.putByte((byte) b);
}
return b;
}
|
Reads the next byte of data from the underlying input stream and updates the hasher with the
byte read.
|
java
|
android/guava/src/com/google/common/hash/HashingInputStream.java
| 50
|
[] | true
| 2
| 7.04
|
google/guava
| 51,352
|
javadoc
| false
|
|
validate_integer
|
def validate_integer(
name: str, val: int | float | None, min_val: int = 0
) -> int | None:
"""
Checks whether the 'name' parameter for parsing is either
an integer OR float that can SAFELY be cast to an integer
without losing accuracy. Raises a ValueError if that is
not the case.
Parameters
----------
name : str
Parameter name (used for error reporting)
val : int or float
The value to check
min_val : int
Minimum allowed value (val < min_val will result in a ValueError)
"""
if val is None:
return val
msg = f"'{name:s}' must be an integer >={min_val:d}"
if is_float(val):
if int(val) != val:
raise ValueError(msg)
val = int(val)
elif not (is_integer(val) and val >= min_val):
raise ValueError(msg)
return int(val)
|
Checks whether the 'name' parameter for parsing is either
an integer OR float that can SAFELY be cast to an integer
without losing accuracy. Raises a ValueError if that is
not the case.
Parameters
----------
name : str
Parameter name (used for error reporting)
val : int or float
The value to check
min_val : int
Minimum allowed value (val < min_val will result in a ValueError)
|
python
|
pandas/io/parsers/readers.py
| 202
|
[
"name",
"val",
"min_val"
] |
int | None
| true
| 6
| 6.56
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
addCopies
|
@CanIgnoreReturnValue
public Builder<E> addCopies(E element, int occurrences) {
requireNonNull(contents); // see the comment on the field
if (occurrences == 0) {
return this;
}
if (buildInvoked) {
contents = new ObjectCountHashMap<E>(contents);
isLinkedHash = false;
}
buildInvoked = false;
checkNotNull(element);
contents.put(element, occurrences + contents.get(element));
return this;
}
|
Adds a number of occurrences of an element to this {@code ImmutableMultiset}.
@param element the element to add
@param occurrences the number of occurrences of the element to add. May be zero, in which
case no change will be made.
@return this {@code Builder} object
@throws NullPointerException if {@code element} is null
@throws IllegalArgumentException if {@code occurrences} is negative, or if this operation
would result in more than {@link Integer#MAX_VALUE} occurrences of the element
|
java
|
android/guava/src/com/google/common/collect/ImmutableMultiset.java
| 543
|
[
"element",
"occurrences"
] | true
| 3
| 7.76
|
google/guava
| 51,352
|
javadoc
| false
|
|
_groupby_op
|
def _groupby_op(
self,
*,
how: str,
has_dropped_na: bool,
min_count: int,
ngroups: int,
ids: npt.NDArray[np.intp],
**kwargs,
) -> ArrayLike:
"""
Dispatch GroupBy reduction or transformation operation.
This is an *experimental* API to allow ExtensionArray authors to implement
reductions and transformations. The API is subject to change.
Parameters
----------
how : {'any', 'all', 'sum', 'prod', 'min', 'max', 'mean', 'median',
'median', 'var', 'std', 'sem', 'nth', 'last', 'ohlc',
'cumprod', 'cumsum', 'cummin', 'cummax', 'rank'}
has_dropped_na : bool
min_count : int
ngroups : int
ids : np.ndarray[np.intp]
ids[i] gives the integer label for the group that self[i] belongs to.
**kwargs : operation-specific
'any', 'all' -> ['skipna']
'var', 'std', 'sem' -> ['ddof']
'cumprod', 'cumsum', 'cummin', 'cummax' -> ['skipna']
'rank' -> ['ties_method', 'ascending', 'na_option', 'pct']
Returns
-------
np.ndarray or ExtensionArray
"""
from pandas.core.arrays.string_ import StringDtype
from pandas.core.groupby.ops import WrappedCythonOp
kind = WrappedCythonOp.get_kind_from_how(how)
op = WrappedCythonOp(how=how, kind=kind, has_dropped_na=has_dropped_na)
initial: Any = 0
# GH#43682
if isinstance(self.dtype, StringDtype):
# StringArray
if op.how in [
"prod",
"mean",
"median",
"cumsum",
"cumprod",
"std",
"sem",
"var",
"skew",
"kurt",
]:
raise TypeError(
f"dtype '{self.dtype}' does not support operation '{how}'"
)
if op.how not in ["any", "all"]:
# Fail early to avoid conversion to object
op._get_cython_function(op.kind, op.how, np.dtype(object), False)
arr = self
if op.how == "sum":
initial = ""
# https://github.com/pandas-dev/pandas/issues/60229
# All NA should result in the empty string.
assert "skipna" in kwargs
if kwargs["skipna"] and min_count == 0:
arr = arr.fillna("")
npvalues = arr.to_numpy(object, na_value=np.nan)
else:
raise NotImplementedError(
f"function is not implemented for this dtype: {self.dtype}"
)
res_values = op._cython_op_ndim_compat(
npvalues,
min_count=min_count,
ngroups=ngroups,
comp_ids=ids,
mask=None,
initial=initial,
**kwargs,
)
if op.how in op.cast_blocklist:
# i.e. how in ["rank"], since other cast_blocklist methods don't go
# through cython_operation
return res_values
if isinstance(self.dtype, StringDtype):
dtype = self.dtype
string_array_cls = dtype.construct_array_type()
return string_array_cls._from_sequence(res_values, dtype=dtype)
else:
raise NotImplementedError
|
Dispatch GroupBy reduction or transformation operation.
This is an *experimental* API to allow ExtensionArray authors to implement
reductions and transformations. The API is subject to change.
Parameters
----------
how : {'any', 'all', 'sum', 'prod', 'min', 'max', 'mean', 'median',
'median', 'var', 'std', 'sem', 'nth', 'last', 'ohlc',
'cumprod', 'cumsum', 'cummin', 'cummax', 'rank'}
has_dropped_na : bool
min_count : int
ngroups : int
ids : np.ndarray[np.intp]
ids[i] gives the integer label for the group that self[i] belongs to.
**kwargs : operation-specific
'any', 'all' -> ['skipna']
'var', 'std', 'sem' -> ['ddof']
'cumprod', 'cumsum', 'cummin', 'cummax' -> ['skipna']
'rank' -> ['ties_method', 'ascending', 'na_option', 'pct']
Returns
-------
np.ndarray or ExtensionArray
|
python
|
pandas/core/arrays/base.py
| 2,678
|
[
"self",
"how",
"has_dropped_na",
"min_count",
"ngroups",
"ids"
] |
ArrayLike
| true
| 11
| 6.48
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
release
|
private void release() {
if (refCount.decrementAndGet() == 0)
currentThread.set(NO_CURRENT_THREAD);
}
|
Release the light lock protecting the consumer from multithreaded access.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer.java
| 2,102
|
[] |
void
| true
| 2
| 6.32
|
apache/kafka
| 31,560
|
javadoc
| false
|
toString
|
@Override
public String toString() {
return (this.origin != null) ? this.origin.toString()
: "\"" + this.propertyName + "\" from property source \"" + this.propertySource.getName() + "\"";
}
|
Return the actual origin for the source if known.
@return the actual source origin
@since 3.2.8
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/origin/PropertySourceOrigin.java
| 94
|
[] |
String
| true
| 2
| 8.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
zeroIfNull
|
public static Duration zeroIfNull(final Duration duration) {
return ObjectUtils.getIfNull(duration, Duration.ZERO);
}
|
Returns the given non-null value or {@link Duration#ZERO} if null.
@param duration The duration to test.
@return The given duration or {@link Duration#ZERO}.
|
java
|
src/main/java/org/apache/commons/lang3/time/DurationUtils.java
| 264
|
[
"duration"
] |
Duration
| true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
shouldAppearInPrimaryNavBarMenu
|
function shouldAppearInPrimaryNavBarMenu(item: NavigationBarNode): boolean {
// Items with children should always appear in the primary navbar menu.
if (item.children) {
return true;
}
// Some nodes are otherwise important enough to always include in the primary navigation menu.
switch (navigationBarNodeKind(item)) {
case SyntaxKind.ClassDeclaration:
case SyntaxKind.ClassExpression:
case SyntaxKind.EnumDeclaration:
case SyntaxKind.InterfaceDeclaration:
case SyntaxKind.ModuleDeclaration:
case SyntaxKind.SourceFile:
case SyntaxKind.TypeAliasDeclaration:
case SyntaxKind.JSDocTypedefTag:
case SyntaxKind.JSDocCallbackTag:
return true;
case SyntaxKind.ArrowFunction:
case SyntaxKind.FunctionDeclaration:
case SyntaxKind.FunctionExpression:
return isTopLevelFunctionDeclaration(item);
default:
return false;
}
function isTopLevelFunctionDeclaration(item: NavigationBarNode): boolean {
if (!(item.node as FunctionDeclaration).body) {
return false;
}
switch (navigationBarNodeKind(item.parent!)) {
case SyntaxKind.ModuleBlock:
case SyntaxKind.SourceFile:
case SyntaxKind.MethodDeclaration:
case SyntaxKind.Constructor:
return true;
default:
return false;
}
}
}
|
Determines if a node should appear in the primary navbar menu.
|
typescript
|
src/services/navigationBar.ts
| 897
|
[
"item"
] | true
| 3
| 6
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
visitSourceFile
|
function visitSourceFile(node: SourceFile): SourceFile {
const ancestorFacts = enterSubtree(
HierarchyFacts.SourceFileExcludes,
isEffectiveStrictModeSourceFile(node, compilerOptions) ?
HierarchyFacts.StrictModeSourceFileIncludes :
HierarchyFacts.SourceFileIncludes,
);
exportedVariableStatement = false;
const visited = visitEachChild(node, visitor, context);
const statement = concatenate(
visited.statements,
taggedTemplateStringDeclarations && [
factory.createVariableStatement(/*modifiers*/ undefined, factory.createVariableDeclarationList(taggedTemplateStringDeclarations)),
],
);
const result = factory.updateSourceFile(visited, setTextRange(factory.createNodeArray(statement), node.statements));
exitSubtree(ancestorFacts);
return result;
}
|
@param expressionResultIsUnused Indicates the result of an expression is unused by the parent node (i.e., the left side of a comma or the
expression of an `ExpressionStatement`).
|
typescript
|
src/compiler/transformers/es2018.ts
| 563
|
[
"node"
] | true
| 3
| 6.72
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
clientChannelBuilder
|
public static ChannelBuilder clientChannelBuilder(
SecurityProtocol securityProtocol,
JaasContext.Type contextType,
AbstractConfig config,
ListenerName listenerName,
String clientSaslMechanism,
Time time,
LogContext logContext) {
if (securityProtocol == SecurityProtocol.SASL_PLAINTEXT || securityProtocol == SecurityProtocol.SASL_SSL) {
if (contextType == null)
throw new IllegalArgumentException("`contextType` must be non-null if `securityProtocol` is `" + securityProtocol + "`");
if (clientSaslMechanism == null)
throw new IllegalArgumentException("`clientSaslMechanism` must be non-null in client mode if `securityProtocol` is `" + securityProtocol + "`");
}
return create(securityProtocol, ConnectionMode.CLIENT, contextType, config, listenerName, false, clientSaslMechanism,
null, null, time, logContext, null);
}
|
@param securityProtocol the securityProtocol
@param contextType the contextType, it must be non-null if `securityProtocol` is SASL_*; it is ignored otherwise
@param config client config
@param listenerName the listenerName if contextType is SERVER or null otherwise
@param clientSaslMechanism SASL mechanism if mode is CLIENT, ignored otherwise
@param time the time instance
@param logContext the log context instance
@return the configured `ChannelBuilder`
@throws IllegalArgumentException if `mode` invariants described above is not maintained
|
java
|
clients/src/main/java/org/apache/kafka/common/network/ChannelBuilders.java
| 64
|
[
"securityProtocol",
"contextType",
"config",
"listenerName",
"clientSaslMechanism",
"time",
"logContext"
] |
ChannelBuilder
| true
| 5
| 6.64
|
apache/kafka
| 31,560
|
javadoc
| false
|
create
|
private static NestedLocation create(String location) {
int index = location.lastIndexOf("/!");
String locationPath = (index != -1) ? location.substring(0, index) : location;
String nestedEntryName = (index != -1) ? location.substring(index + 2) : null;
return new NestedLocation((!locationPath.isEmpty()) ? asPath(locationPath) : null, nestedEntryName);
}
|
Create a new {@link NestedLocation} from the given URI.
@param uri the nested URI
@return a new {@link NestedLocation} instance
@throws IllegalArgumentException if the URI is not valid
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/net/protocol/nested/NestedLocation.java
| 101
|
[
"location"
] |
NestedLocation
| true
| 4
| 7.6
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
builder
|
public static XContentBuilder builder(XContent xContent) throws IOException {
return new XContentBuilder(xContent, new ByteArrayOutputStream());
}
|
Create a new {@link XContentBuilder} using the given {@link XContent} content.
<p>
The builder uses an internal {@link ByteArrayOutputStream} output stream to build the content.
</p>
@param xContent the {@link XContent}
@return a new {@link XContentBuilder}
@throws IOException if an {@link IOException} occurs while building the content
|
java
|
libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentBuilder.java
| 60
|
[
"xContent"
] |
XContentBuilder
| true
| 1
| 6.16
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
predict
|
def predict(self, X):
"""
Predict class for X.
The predicted class of an input sample is a vote by the trees in
the forest, weighted by their probability estimates. That is,
the predicted class is the one with highest mean probability
estimate across the trees.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
y : ndarray of shape (n_samples,) or (n_samples, n_outputs)
The predicted classes.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
n_samples = proba[0].shape[0]
# all dtypes should be the same, so just take the first
class_type = self.classes_[0].dtype
predictions = np.empty((n_samples, self.n_outputs_), dtype=class_type)
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[k], axis=1), axis=0
)
return predictions
|
Predict class for X.
The predicted class of an input sample is a vote by the trees in
the forest, weighted by their probability estimates. That is,
the predicted class is the one with highest mean probability
estimate across the trees.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
y : ndarray of shape (n_samples,) or (n_samples, n_outputs)
The predicted classes.
|
python
|
sklearn/ensemble/_forest.py
| 882
|
[
"self",
"X"
] | false
| 4
| 6.08
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
toCodePoints
|
public static int[] toCodePoints(final CharSequence cs) {
if (cs == null) {
return null;
}
if (cs.length() == 0) {
return ArrayUtils.EMPTY_INT_ARRAY;
}
return cs.toString().codePoints().toArray();
}
|
Converts a {@link CharSequence} into an array of code points.
<p>
Valid pairs of surrogate code units will be converted into a single supplementary code point. Isolated surrogate code units (i.e. a high surrogate not
followed by a low surrogate or a low surrogate not preceded by a high surrogate) will be returned as-is.
</p>
<pre>
StringUtils.toCodePoints(null) = null
StringUtils.toCodePoints("") = [] // empty array
</pre>
@param cs the character sequence to convert.
@return an array of code points.
@since 3.6
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 8,641
|
[
"cs"
] | true
| 3
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
key_exist
|
def key_exist(self, bucket_name: str | None, key: str) -> bool:
"""
Find out whether the specified key exists in the oss remote storage.
:param bucket_name: the name of the bucket
:param key: oss bucket key
"""
# full_path = None
self.log.info("Looking up oss bucket %s for bucket key %s ...", bucket_name, key)
try:
return self.get_bucket(bucket_name).object_exists(key)
except Exception as e:
self.log.error(e)
raise AirflowException(f"Errors when check bucket object existence: {key}")
|
Find out whether the specified key exists in the oss remote storage.
:param bucket_name: the name of the bucket
:param key: oss bucket key
|
python
|
providers/alibaba/src/airflow/providers/alibaba/cloud/hooks/oss.py
| 322
|
[
"self",
"bucket_name",
"key"
] |
bool
| true
| 1
| 6
|
apache/airflow
| 43,597
|
sphinx
| false
|
get
|
@ParametricNullness
public static <T extends @Nullable Object> T get(
Iterable<? extends T> iterable, int position, @ParametricNullness T defaultValue) {
checkNotNull(iterable);
Iterators.checkNonnegative(position);
if (iterable instanceof List) {
List<? extends T> list = (List<? extends T>) iterable;
return (position < list.size()) ? list.get(position) : defaultValue;
} else {
Iterator<? extends T> iterator = iterable.iterator();
Iterators.advance(iterator, position);
return Iterators.getNext(iterator, defaultValue);
}
}
|
Returns the element at the specified position in an iterable or a default value otherwise.
<p><b>{@code Stream} equivalent:</b> {@code
stream.skip(position).findFirst().orElse(defaultValue)} (returns the default value if the index
is out of bounds)
@param position position of the element to return
@param defaultValue the default value to return if {@code position} is greater than or equal to
the size of the iterable
@return the element at the specified position in {@code iterable} or {@code defaultValue} if
{@code iterable} contains fewer than {@code position + 1} elements.
@throws IndexOutOfBoundsException if {@code position} is negative
@since 4.0
|
java
|
android/guava/src/com/google/common/collect/Iterables.java
| 799
|
[
"iterable",
"position",
"defaultValue"
] |
T
| true
| 3
| 7.44
|
google/guava
| 51,352
|
javadoc
| false
|
copyReaderToBuilder
|
@CanIgnoreReturnValue
static long copyReaderToBuilder(Reader from, StringBuilder to) throws IOException {
checkNotNull(from);
checkNotNull(to);
char[] buf = new char[DEFAULT_BUF_SIZE];
int nRead;
long total = 0;
while ((nRead = from.read(buf)) != -1) {
to.append(buf, 0, nRead);
total += nRead;
}
return total;
}
|
Copies all characters between the {@link Reader} and {@link StringBuilder} objects. Does not
close or flush the reader.
<p>This is identical to {@link #copy(Readable, Appendable)} but optimized for these specific
types. CharBuffer has poor performance when being written into or read out of so round tripping
all the bytes through the buffer takes a long time. With these specialized types we can just
use a char array.
@param from the object to read from
@param to the object to write to
@return the number of characters copied
@throws IOException if an I/O error occurs
|
java
|
android/guava/src/com/google/common/io/CharStreams.java
| 109
|
[
"from",
"to"
] | true
| 2
| 8.08
|
google/guava
| 51,352
|
javadoc
| false
|
|
getCoercedIPv4Address
|
public static Inet4Address getCoercedIPv4Address(InetAddress ip) {
if (ip instanceof Inet4Address) {
return (Inet4Address) ip;
}
// Special cases:
byte[] bytes = ip.getAddress();
boolean leadingBytesOfZero = true;
for (int i = 0; i < 15; ++i) {
if (bytes[i] != 0) {
leadingBytesOfZero = false;
break;
}
}
if (leadingBytesOfZero && (bytes[15] == 1)) {
return LOOPBACK4; // ::1
} else if (leadingBytesOfZero && (bytes[15] == 0)) {
return ANY4; // ::0
}
Inet6Address ip6 = (Inet6Address) ip;
long addressAsLong = 0;
if (hasEmbeddedIPv4ClientAddress(ip6)) {
addressAsLong = getEmbeddedIPv4ClientAddress(ip6).hashCode();
} else {
// Just extract the high 64 bits (assuming the rest is user-modifiable).
addressAsLong = ByteBuffer.wrap(ip6.getAddress(), 0, 8).getLong();
}
// Many strategies for hashing are possible. This might suffice for now.
int coercedHash = Hashing.murmur3_32_fixed().hashLong(addressAsLong).asInt();
// Squash into 224/4 Multicast and 240/4 Reserved space (i.e. 224/3).
coercedHash |= 0xe0000000;
// Fixup to avoid some "illegal" values. Currently the only potential
// illegal value is 255.255.255.255.
if (coercedHash == 0xffffffff) {
coercedHash = 0xfffffffe;
}
return getInet4Address(Ints.toByteArray(coercedHash));
}
|
Coerces an IPv6 address into an IPv4 address.
<p>HACK: As long as applications continue to use IPv4 addresses for indexing into tables,
accounting, et cetera, it may be necessary to <b>coerce</b> IPv6 addresses into IPv4 addresses.
This method does so by hashing 64 bits of the IPv6 address into {@code 224.0.0.0/3} (64 bits
into 29 bits):
<ul>
<li>If the IPv6 address contains an embedded IPv4 address, the function hashes that.
<li>Otherwise, it hashes the upper 64 bits of the IPv6 address.
</ul>
<p>A "coerced" IPv4 address is equivalent to itself.
<p>NOTE: This method is failsafe for security purposes: ALL IPv6 addresses (except localhost
(::1)) are hashed to avoid the security risk associated with extracting an embedded IPv4
address that might permit elevated privileges.
@param ip {@link InetAddress} to "coerce"
@return {@link Inet4Address} represented "coerced" address
@since 7.0
|
java
|
android/guava/src/com/google/common/net/InetAddresses.java
| 997
|
[
"ip"
] |
Inet4Address
| true
| 10
| 8.08
|
google/guava
| 51,352
|
javadoc
| false
|
stripToEmpty
|
public static String stripToEmpty(final String str) {
return str == null ? EMPTY : strip(str, null);
}
|
Strips whitespace from the start and end of a String returning an empty String if {@code null} input.
<p>
This is similar to {@link #trimToEmpty(String)} but removes whitespace. Whitespace is defined by {@link Character#isWhitespace(char)}.
</p>
<pre>
StringUtils.stripToEmpty(null) = ""
StringUtils.stripToEmpty("") = ""
StringUtils.stripToEmpty(" ") = ""
StringUtils.stripToEmpty("abc") = "abc"
StringUtils.stripToEmpty(" abc") = "abc"
StringUtils.stripToEmpty("abc ") = "abc"
StringUtils.stripToEmpty(" abc ") = "abc"
StringUtils.stripToEmpty(" ab c ") = "ab c"
</pre>
@param str the String to be stripped, may be null.
@return the trimmed String, or an empty String if {@code null} input.
@since 2.0
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 8,040
|
[
"str"
] |
String
| true
| 2
| 7.68
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
process
|
private void process(final ResumePartitionsEvent event) {
try {
Collection<TopicPartition> partitions = event.partitions();
log.debug("Resuming partitions {}", partitions);
for (TopicPartition partition : partitions) {
subscriptions.resume(partition);
}
event.future().complete(null);
} catch (Exception e) {
event.future().completeExceptionally(e);
}
}
|
Process event indicating whether the AcknowledgeCommitCallbackHandler is configured by the user.
@param event Event containing a boolean to indicate if the callback handler is configured or not.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEventProcessor.java
| 636
|
[
"event"
] |
void
| true
| 2
| 6.08
|
apache/kafka
| 31,560
|
javadoc
| false
|
removeAdvice
|
boolean removeAdvice(Advice advice);
|
Remove the Advisor containing the given advice.
@param advice the advice to remove
@return {@code true} of the advice was found and removed;
{@code false} if there was no such advice
|
java
|
spring-aop/src/main/java/org/springframework/aop/framework/Advised.java
| 216
|
[
"advice"
] | true
| 1
| 6.8
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
registerStateListener
|
public void registerStateListener(MemberStateListener listener) {
if (listener == null) {
throw new IllegalArgumentException("State updates listener cannot be null");
}
this.stateUpdatesListeners.add(listener);
}
|
Register a new listener that will be invoked whenever the member state changes, or a new
member ID or epoch is received.
@param listener Listener to invoke.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractMembershipManager.java
| 1,385
|
[
"listener"
] |
void
| true
| 2
| 6.88
|
apache/kafka
| 31,560
|
javadoc
| false
|
removeBrackets
|
function removeBrackets(key) {
return utils.endsWith(key, '[]') ? key.slice(0, -2) : key;
}
|
It removes the brackets from the end of a string
@param {string} key - The key of the parameter.
@returns {string} the key without the brackets.
|
javascript
|
lib/helpers/toFormData.js
| 26
|
[
"key"
] | false
| 2
| 6
|
axios/axios
| 108,381
|
jsdoc
| false
|
|
asFailableFunction
|
@SuppressWarnings("unchecked")
public static <T, R> FailableFunction<T, R, Throwable> asFailableFunction(final Method method) {
return asInterfaceInstance(FailableFunction.class, method);
}
|
Produces a {@link FailableFunction} for a given a <em>supplier</em> Method. You call the Function with one argument:
the object receiving the method call. The FailableFunction return type must match the method's return type.
@param <T> the type of the first argument to the function: The type containing the method.
@param <R> the type of the result of the function: The method return type.
@param method the method to invoke.
@return a correctly-typed wrapper for the given target.
|
java
|
src/main/java/org/apache/commons/lang3/function/MethodInvokers.java
| 153
|
[
"method"
] | true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
read_key
|
def read_key(self, key: str, bucket_name: str | None = None) -> str:
"""
Read a key from S3.
.. seealso::
- :external+boto3:py:meth:`S3.Object.get`
:param key: S3 key that will point to the file
:param bucket_name: Name of the bucket in which the file is stored
:return: the content of the key
"""
obj = self.get_key(key, bucket_name)
return obj.get()["Body"].read().decode("utf-8")
|
Read a key from S3.
.. seealso::
- :external+boto3:py:meth:`S3.Object.get`
:param key: S3 key that will point to the file
:param bucket_name: Name of the bucket in which the file is stored
:return: the content of the key
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/s3.py
| 1,068
|
[
"self",
"key",
"bucket_name"
] |
str
| true
| 1
| 6.88
|
apache/airflow
| 43,597
|
sphinx
| false
|
root_mean_squared_log_error
|
def root_mean_squared_log_error(
y_true, y_pred, *, sample_weight=None, multioutput="uniform_average"
):
"""Root mean squared logarithmic error regression loss.
Read more in the :ref:`User Guide <mean_squared_log_error>`.
.. versionadded:: 1.4
Parameters
----------
y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
multioutput : {'raw_values', 'uniform_average'} or array-like of shape \
(n_outputs,), default='uniform_average'
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors when the input is of multioutput
format.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
Examples
--------
>>> from sklearn.metrics import root_mean_squared_log_error
>>> y_true = [3, 5, 2.5, 7]
>>> y_pred = [2.5, 5, 4, 8]
>>> root_mean_squared_log_error(y_true, y_pred)
0.199...
"""
xp, _ = get_namespace(y_true, y_pred)
_, y_true, y_pred, sample_weight, multioutput = (
_check_reg_targets_with_floating_dtype(
y_true, y_pred, sample_weight, multioutput, xp=xp
)
)
if xp.any(y_true <= -1) or xp.any(y_pred <= -1):
raise ValueError(
"Root Mean Squared Logarithmic Error cannot be used when "
"targets contain values less than or equal to -1."
)
return root_mean_squared_error(
xp.log1p(y_true),
xp.log1p(y_pred),
sample_weight=sample_weight,
multioutput=multioutput,
)
|
Root mean squared logarithmic error regression loss.
Read more in the :ref:`User Guide <mean_squared_log_error>`.
.. versionadded:: 1.4
Parameters
----------
y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
multioutput : {'raw_values', 'uniform_average'} or array-like of shape \
(n_outputs,), default='uniform_average'
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors when the input is of multioutput
format.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
Examples
--------
>>> from sklearn.metrics import root_mean_squared_log_error
>>> y_true = [3, 5, 2.5, 7]
>>> y_pred = [2.5, 5, 4, 8]
>>> root_mean_squared_log_error(y_true, y_pred)
0.199...
|
python
|
sklearn/metrics/_regression.py
| 785
|
[
"y_true",
"y_pred",
"sample_weight",
"multioutput"
] | false
| 3
| 7.12
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
serverAssignor
|
public Optional<String> serverAssignor() {
return this.serverAssignor;
}
|
@return Server-side assignor implementation configured for the member, that will be sent
out to the server to be used. If empty, then the server will select the assignor.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerMembershipManager.java
| 348
|
[] | true
| 1
| 6.8
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
directFieldAsBase64
|
public XContentBuilder directFieldAsBase64(String name, CheckedConsumer<OutputStream, IOException> writer) throws IOException {
if (contentType() != XContentType.JSON) {
assert false : "directFieldAsBase64 supports only JSON format";
throw new UnsupportedOperationException("directFieldAsBase64 supports only JSON format");
}
generator.writeDirectField(name, os -> {
os.write('\"');
// We need to close the output stream that is wrapped by a Base64 encoder to flush the outstanding buffer
// of the encoder, but we must not close the underlying output stream of the XContentBuilder.
final OutputStream noClose = Streams.noCloseStream(os);
final OutputStream encodedOutput = Base64.getEncoder().wrap(noClose);
writer.accept(encodedOutput);
encodedOutput.close(); // close to flush the outstanding buffer used in the Base64 Encoder
os.write('\"');
});
return this;
}
|
Write the content that is written to the output stream by the {@code writer} as a string encoded in Base64 format.
This API can be used to generate XContent directly without the intermediate results to reduce memory usage.
Note that this method supports only JSON.
|
java
|
libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentBuilder.java
| 1,255
|
[
"name",
"writer"
] |
XContentBuilder
| true
| 2
| 6
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
skipNulls
|
public Joiner skipNulls() {
return new Joiner(this) {
@Override
@SuppressWarnings("JoinIterableIterator") // suggests infinite recursion
public String join(Iterable<?> parts) {
return join(parts.iterator());
}
@Override
public <A extends Appendable> A appendTo(A appendable, Iterator<?> parts) throws IOException {
checkNotNull(appendable, "appendable");
checkNotNull(parts, "parts");
while (parts.hasNext()) {
Object part = parts.next();
if (part != null) {
appendable.append(Joiner.this.toString(part));
break;
}
}
while (parts.hasNext()) {
Object part = parts.next();
if (part != null) {
appendable.append(separator);
appendable.append(Joiner.this.toString(part));
}
}
return appendable;
}
@Override
public Joiner useForNull(String nullText) {
throw new UnsupportedOperationException("already specified skipNulls");
}
@Override
public MapJoiner withKeyValueSeparator(String kvs) {
throw new UnsupportedOperationException("can't use .skipNulls() with maps");
}
};
}
|
Returns a joiner with the same behavior as this joiner, except automatically skipping over any
provided null elements.
|
java
|
android/guava/src/com/google/common/base/Joiner.java
| 264
|
[] |
Joiner
| true
| 5
| 6.72
|
google/guava
| 51,352
|
javadoc
| false
|
toString
|
@Override
public String toString() {
return "ConfigEntry(" +
"name=" + name +
", value=" + (isSensitive ? "Redacted" : value) +
", source=" + source +
", isSensitive=" + isSensitive +
", isReadOnly=" + isReadOnly +
", synonyms=" + synonyms +
", type=" + type +
", documentation=" + documentation +
")";
}
|
Override toString to redact sensitive value.
WARNING, user should be responsible to set the correct "isSensitive" field for each config entry.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/ConfigEntry.java
| 182
|
[] |
String
| true
| 2
| 6.56
|
apache/kafka
| 31,560
|
javadoc
| false
|
initializeConfigFileSupport
|
function initializeConfigFileSupport() {
if (getOptionValue('--experimental-default-config-file') ||
getOptionValue('--experimental-config-file')) {
emitExperimentalWarning('--experimental-config-file');
}
}
|
Patch the process object with legacy properties and normalizations.
Replace `process.argv[0]` with `process.execPath`, preserving the original `argv[0]` value as `process.argv0`.
Replace `process.argv[1]` with the resolved absolute file path of the entry point, if found.
@param {boolean} expandArgv1 - Whether to replace `process.argv[1]` with the resolved absolute file path of
the main entry point.
@returns {string}
|
javascript
|
lib/internal/process/pre_execution.js
| 383
|
[] | false
| 3
| 6.8
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
tryConsumeDeclare
|
function tryConsumeDeclare(): boolean {
let token = scanner.getToken();
if (token === SyntaxKind.DeclareKeyword) {
// declare module "mod"
token = nextToken();
if (token === SyntaxKind.ModuleKeyword) {
token = nextToken();
if (token === SyntaxKind.StringLiteral) {
recordAmbientExternalModule();
}
}
return true;
}
return false;
}
|
Returns true if at least one token was consumed from the stream
|
typescript
|
src/services/preProcess.ts
| 76
|
[] | true
| 4
| 6.56
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
toInstance
|
public <R> R toInstance(Function<? super T, R> factory) {
Assert.notNull(factory, "'factory' must not be null");
T value = getValue();
if (value != null && test(value)) {
return factory.apply(value);
}
throw new NoSuchElementException("No value present");
}
|
Complete the mapping by creating a new instance from the non-filtered value.
@param <R> the resulting type
@param factory the factory used to create the instance
@return the instance
@throws NoSuchElementException if the value has been filtered
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/PropertyMapper.java
| 324
|
[
"factory"
] |
R
| true
| 3
| 7.76
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
createDateTimeFormatter
|
public DateTimeFormatter createDateTimeFormatter() {
return createDateTimeFormatter(DateTimeFormatter.ofLocalizedDateTime(FormatStyle.MEDIUM));
}
|
Create a new {@code DateTimeFormatter} using this factory.
<p>If no specific pattern or style has been defined,
{@link FormatStyle#MEDIUM medium date time format} will be used.
@return a new date time formatter
@see #createDateTimeFormatter(DateTimeFormatter)
|
java
|
spring-context/src/main/java/org/springframework/format/datetime/standard/DateTimeFormatterFactory.java
| 163
|
[] |
DateTimeFormatter
| true
| 1
| 6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
equals
|
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (!(other instanceof Features)) {
return false;
}
final Features<?> that = (Features<?>) other;
return Objects.equals(this.features, that.features);
}
|
Converts from a map to Features<SupportedVersionRange>.
@param featuresMap the map representation of a Features<SupportedVersionRange> object,
generated using the toMap() API.
@return the Features<SupportedVersionRange> object
|
java
|
clients/src/main/java/org/apache/kafka/common/feature/Features.java
| 139
|
[
"other"
] | true
| 3
| 7.12
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
paired_manhattan_distances
|
def paired_manhattan_distances(X, Y):
"""Compute the paired L1 distances between X and Y.
Distances are calculated between (X[0], Y[0]), (X[1], Y[1]), ...,
(X[n_samples], Y[n_samples]).
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
An array-like where each row is a sample and each column is a feature.
Y : {array-like, sparse matrix} of shape (n_samples, n_features)
An array-like where each row is a sample and each column is a feature.
Returns
-------
distances : ndarray of shape (n_samples,)
L1 paired distances between the row vectors of `X`
and the row vectors of `Y`.
Examples
--------
>>> from sklearn.metrics.pairwise import paired_manhattan_distances
>>> import numpy as np
>>> X = np.array([[1, 1, 0], [0, 1, 0], [0, 0, 1]])
>>> Y = np.array([[0, 1, 0], [0, 0, 1], [0, 0, 0]])
>>> paired_manhattan_distances(X, Y)
array([1., 2., 1.])
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
|
Compute the paired L1 distances between X and Y.
Distances are calculated between (X[0], Y[0]), (X[1], Y[1]), ...,
(X[n_samples], Y[n_samples]).
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
An array-like where each row is a sample and each column is a feature.
Y : {array-like, sparse matrix} of shape (n_samples, n_features)
An array-like where each row is a sample and each column is a feature.
Returns
-------
distances : ndarray of shape (n_samples,)
L1 paired distances between the row vectors of `X`
and the row vectors of `Y`.
Examples
--------
>>> from sklearn.metrics.pairwise import paired_manhattan_distances
>>> import numpy as np
>>> X = np.array([[1, 1, 0], [0, 1, 0], [0, 0, 1]])
>>> Y = np.array([[0, 1, 0], [0, 0, 1], [0, 0, 0]])
>>> paired_manhattan_distances(X, Y)
array([1., 2., 1.])
|
python
|
sklearn/metrics/pairwise.py
| 1,222
|
[
"X",
"Y"
] | false
| 3
| 7.68
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
hasNameStartingWith
|
public boolean hasNameStartingWith(CharSequence prefix) {
String name = this.name;
if (name != null) {
return name.startsWith(prefix.toString());
}
long pos = getCentralDirectoryFileHeaderRecordPos(this.lookupIndex)
+ ZipCentralDirectoryFileHeaderRecord.FILE_NAME_OFFSET;
return ZipString.startsWith(null, ZipContent.this.data, pos, this.centralRecord.fileNameLength(),
prefix) != -1;
}
|
Returns {@code true} if this entry has a name starting with the given prefix.
@param prefix the required prefix
@return if the entry name starts with the prefix
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/zip/ZipContent.java
| 738
|
[
"prefix"
] | true
| 2
| 8.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
setQuoteMatcher
|
public StrTokenizer setQuoteMatcher(final StrMatcher quote) {
if (quote != null) {
this.quoteMatcher = quote;
}
return this;
}
|
Sets the quote matcher to use.
<p>
The quote character is used to wrap data between the tokens.
This enables delimiters to be entered as data.
</p>
@param quote the quote matcher to use, null ignored.
@return {@code this} instance.
|
java
|
src/main/java/org/apache/commons/lang3/text/StrTokenizer.java
| 1,021
|
[
"quote"
] |
StrTokenizer
| true
| 2
| 8.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
generate_numba_table_func
|
def generate_numba_table_func(
func: Callable[..., np.ndarray],
nopython: bool,
nogil: bool,
parallel: bool,
):
"""
Generate a numba jitted function to apply window calculations table-wise.
Func will be passed an M window size x N number of columns array, and
must return a 1 x N number of columns array.
1. jit the user's function
2. Return a rolling apply function with the jitted function inline
Parameters
----------
func : function
function to be applied to each window and will be JITed
nopython : bool
nopython to be passed into numba.jit
nogil : bool
nogil to be passed into numba.jit
parallel : bool
parallel to be passed into numba.jit
Returns
-------
Numba function
"""
numba_func = jit_user_function(func)
if TYPE_CHECKING:
import numba
else:
numba = import_optional_dependency("numba")
@numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)
def roll_table(
values: np.ndarray,
begin: np.ndarray,
end: np.ndarray,
minimum_periods: int,
*args: Any,
):
result = np.empty((len(begin), values.shape[1]))
min_periods_mask = np.empty(result.shape)
for i in numba.prange(len(result)):
start = begin[i]
stop = end[i]
window = values[start:stop]
count_nan = np.sum(np.isnan(window), axis=0)
nan_mask = len(window) - count_nan >= minimum_periods
if nan_mask.any():
result[i, :] = numba_func(window, *args)
min_periods_mask[i, :] = nan_mask
result = np.where(min_periods_mask, result, np.nan)
return result
return roll_table
|
Generate a numba jitted function to apply window calculations table-wise.
Func will be passed an M window size x N number of columns array, and
must return a 1 x N number of columns array.
1. jit the user's function
2. Return a rolling apply function with the jitted function inline
Parameters
----------
func : function
function to be applied to each window and will be JITed
nopython : bool
nopython to be passed into numba.jit
nogil : bool
nogil to be passed into numba.jit
parallel : bool
parallel to be passed into numba.jit
Returns
-------
Numba function
|
python
|
pandas/core/window/numba_.py
| 183
|
[
"func",
"nopython",
"nogil",
"parallel"
] | true
| 5
| 6.64
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
head_object
|
def head_object(self, key: str, bucket_name: str | None = None) -> dict | None:
"""
Retrieve metadata of an object.
.. seealso::
- :external+boto3:py:meth:`S3.Client.head_object`
:param key: S3 key that will point to the file
:param bucket_name: Name of the bucket in which the file is stored
:return: metadata of an object
"""
try:
params = {
"Bucket": bucket_name,
"Key": key,
}
if self._requester_pays:
params["RequestPayer"] = "requester"
return self.get_conn().head_object(**params)
except ClientError as e:
if e.response["ResponseMetadata"]["HTTPStatusCode"] == 404:
return None
raise e
|
Retrieve metadata of an object.
.. seealso::
- :external+boto3:py:meth:`S3.Client.head_object`
:param key: S3 key that will point to the file
:param bucket_name: Name of the bucket in which the file is stored
:return: metadata of an object
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/s3.py
| 997
|
[
"self",
"key",
"bucket_name"
] |
dict | None
| true
| 3
| 7.92
|
apache/airflow
| 43,597
|
sphinx
| false
|
commitAsync
|
public void commitAsync(
final Map<TopicIdPartition, NodeAcknowledgements> acknowledgementsMap,
final long deadlineMs) {
final Cluster cluster = metadata.fetch();
final ResultHandler resultHandler = new ResultHandler(Optional.empty());
sessionHandlers.forEach((nodeId, sessionHandler) -> {
Node node = cluster.nodeById(nodeId);
if (node != null) {
Map<TopicIdPartition, Acknowledgements> acknowledgementsMapForNode = new HashMap<>();
acknowledgeRequestStates.putIfAbsent(nodeId, new Tuple<>(null, null, null));
for (TopicIdPartition tip : sessionHandler.sessionPartitions()) {
NodeAcknowledgements nodeAcknowledgements = acknowledgementsMap.get(tip);
if ((nodeAcknowledgements != null) && (nodeAcknowledgements.nodeId() == node.id())) {
if (!isLeaderKnownToHaveChanged(node.id(), tip)) {
Acknowledgements acknowledgements = nodeAcknowledgements.acknowledgements();
acknowledgementsMapForNode.put(tip, acknowledgements);
metricsManager.recordAcknowledgementSent(acknowledgements.size());
log.debug("Added async acknowledge request for partition {} to node {}", tip.topicPartition(), node.id());
AcknowledgeRequestState asyncRequestState = acknowledgeRequestStates.get(nodeId).getAsyncRequest();
if (asyncRequestState == null) {
acknowledgeRequestStates.get(nodeId).setAsyncRequest(new AcknowledgeRequestState(logContext,
ShareConsumeRequestManager.class.getSimpleName() + ":2",
deadlineMs,
retryBackoffMs,
retryBackoffMaxMs,
sessionHandler,
nodeId,
acknowledgementsMapForNode,
resultHandler,
AcknowledgeRequestType.COMMIT_ASYNC
));
} else {
Acknowledgements prevAcks = asyncRequestState.acknowledgementsToSend.putIfAbsent(tip, acknowledgements);
if (prevAcks != null) {
asyncRequestState.acknowledgementsToSend.get(tip).merge(acknowledgements);
}
}
} else {
nodeAcknowledgements.acknowledgements().complete(Errors.NOT_LEADER_OR_FOLLOWER.exception());
maybeSendShareAcknowledgementEvent(Map.of(tip, nodeAcknowledgements.acknowledgements()), true, Optional.empty());
}
}
}
}
});
resultHandler.completeIfEmpty();
}
|
Enqueue an AcknowledgeRequestState to be picked up on the next poll.
@param acknowledgementsMap The acknowledgements to commit
@param deadlineMs Time until which the request will be retried if it fails with
an expected retriable error.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManager.java
| 599
|
[
"acknowledgementsMap",
"deadlineMs"
] |
void
| true
| 7
| 6.56
|
apache/kafka
| 31,560
|
javadoc
| false
|
ensureTransactional
|
private void ensureTransactional() {
if (!isTransactional())
throw new IllegalStateException("Transactional method invoked on a non-transactional producer.");
}
|
Check if the transaction is in the prepared state.
@return true if the current state is PREPARED_TRANSACTION
|
java
|
clients/src/main/java/org/apache/kafka/clients/producer/internals/TransactionManager.java
| 1,149
|
[] |
void
| true
| 2
| 8.16
|
apache/kafka
| 31,560
|
javadoc
| false
|
failure_message_from_response
|
def failure_message_from_response(response: dict[str, Any]) -> str | None:
"""
Get failure message from response dictionary.
:param response: response from AWS API
:return: failure message
"""
return response["jobRun"]["stateDetails"]
|
Get failure message from response dictionary.
:param response: response from AWS API
:return: failure message
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/sensors/emr.py
| 170
|
[
"response"
] |
str | None
| true
| 1
| 6.56
|
apache/airflow
| 43,597
|
sphinx
| false
|
_from_arrays
|
def _from_arrays(
cls,
arrays,
columns,
index,
dtype: Dtype | None = None,
verify_integrity: bool = True,
) -> Self:
"""
Create DataFrame from a list of arrays corresponding to the columns.
Parameters
----------
arrays : list-like of arrays
Each array in the list corresponds to one column, in order.
columns : list-like, Index
The column names for the resulting DataFrame.
index : list-like, Index
The rows labels for the resulting DataFrame.
dtype : dtype, optional
Optional dtype to enforce for all arrays.
verify_integrity : bool, default True
Validate and homogenize all input. If set to False, it is assumed
that all elements of `arrays` are actual arrays how they will be
stored in a block (numpy ndarray or ExtensionArray), have the same
length as and are aligned with the index, and that `columns` and
`index` are ensured to be an Index object.
Returns
-------
DataFrame
"""
if dtype is not None:
dtype = pandas_dtype(dtype)
columns = ensure_index(columns)
if len(columns) != len(arrays):
raise ValueError("len(columns) must match len(arrays)")
mgr = arrays_to_mgr(
arrays,
columns,
index,
dtype=dtype,
verify_integrity=verify_integrity,
)
return cls._from_mgr(mgr, axes=mgr.axes)
|
Create DataFrame from a list of arrays corresponding to the columns.
Parameters
----------
arrays : list-like of arrays
Each array in the list corresponds to one column, in order.
columns : list-like, Index
The column names for the resulting DataFrame.
index : list-like, Index
The rows labels for the resulting DataFrame.
dtype : dtype, optional
Optional dtype to enforce for all arrays.
verify_integrity : bool, default True
Validate and homogenize all input. If set to False, it is assumed
that all elements of `arrays` are actual arrays how they will be
stored in a block (numpy ndarray or ExtensionArray), have the same
length as and are aligned with the index, and that `columns` and
`index` are ensured to be an Index object.
Returns
-------
DataFrame
|
python
|
pandas/core/frame.py
| 2,611
|
[
"cls",
"arrays",
"columns",
"index",
"dtype",
"verify_integrity"
] |
Self
| true
| 3
| 6.72
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
fireSuccess
|
private void fireSuccess() {
T value = value();
while (true) {
RequestFutureListener<T> listener = listeners.poll();
if (listener == null)
break;
listener.onSuccess(value);
}
}
|
Raise an error. The request will be marked as failed.
@param error corresponding error to be passed to caller
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/RequestFuture.java
| 163
|
[] |
void
| true
| 3
| 6.88
|
apache/kafka
| 31,560
|
javadoc
| false
|
forward
|
def forward(self, outputs, targets):
"""This performs the loss computation.
Parameters:
outputs: dict of tensors, see the output specification of the model for the format
targets: list of dicts, such that len(targets) == batch_size.
The expected keys in each dict depends on the losses applied, see each loss' doc
"""
outputs_without_aux = {k: v for k, v in outputs.items() if k != "aux_outputs"}
# Retrieve the matching between the outputs of the last layer and the targets
indices = self.matcher(outputs_without_aux, targets)
# Compute the average number of target boxes across all nodes, for normalization purposes
num_boxes = sum(len(t["labels"]) for t in targets)
num_boxes = torch.as_tensor(
[num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device
)
if is_dist_avail_and_initialized():
torch.distributed.all_reduce(num_boxes)
num_boxes = torch.clamp(num_boxes / get_world_size(), min=1).item()
# Compute all the requested losses
losses = {}
for loss in self.losses:
losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes))
# In case of auxiliary losses, we repeat this process with the output of each intermediate layer.
if "aux_outputs" in outputs:
for i, aux_outputs in enumerate(outputs["aux_outputs"]):
indices = self.matcher(aux_outputs, targets)
for loss in self.losses:
if loss == "masks":
# Intermediate masks losses are too costly to compute, we ignore them.
continue
kwargs = {}
if loss == "labels":
# Logging is enabled only for the last layer
kwargs = {"log": False}
l_dict = self.get_loss(
loss, aux_outputs, targets, indices, num_boxes, **kwargs
)
l_dict = {k + f"_{i}": v for k, v in l_dict.items()}
losses.update(l_dict)
return losses
|
This performs the loss computation.
Parameters:
outputs: dict of tensors, see the output specification of the model for the format
targets: list of dicts, such that len(targets) == batch_size.
The expected keys in each dict depends on the losses applied, see each loss' doc
|
python
|
benchmarks/functional_autograd_benchmark/torchvision_models.py
| 820
|
[
"self",
"outputs",
"targets"
] | false
| 8
| 6.08
|
pytorch/pytorch
| 96,034
|
google
| false
|
|
asList
|
public ImmutableList<E> asList() {
return isEmpty() ? ImmutableList.of() : ImmutableList.asImmutableList(toArray());
}
|
Returns an {@code ImmutableList} containing the same elements, in the same order, as this
collection.
<p><b>Performance note:</b> in most cases this method can return quickly without actually
copying anything. The exact circumstances under which the copy is performed are undefined and
subject to change.
@since 2.0
|
java
|
android/guava/src/com/google/common/collect/ImmutableCollection.java
| 354
|
[] | true
| 2
| 6.64
|
google/guava
| 51,352
|
javadoc
| false
|
|
nextInt
|
@Deprecated
public static int nextInt(final int startInclusive, final int endExclusive) {
return secure().randomInt(startInclusive, endExclusive);
}
|
Generates a random integer within the specified range.
@param startInclusive the smallest value that can be returned, must be non-negative.
@param endExclusive the upper bound (not included).
@throws IllegalArgumentException if {@code startInclusive > endExclusive} or if {@code startInclusive} is negative.
@return the random integer.
@deprecated Use {@link #secure()}, {@link #secureStrong()}, or {@link #insecure()}.
|
java
|
src/main/java/org/apache/commons/lang3/RandomUtils.java
| 207
|
[
"startInclusive",
"endExclusive"
] | true
| 1
| 6.16
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
getAndAdd
|
public byte getAndAdd(final byte operand) {
final byte last = value;
this.value += operand;
return last;
}
|
Increments this instance's value by {@code operand}; this method returns the value associated with the instance
immediately prior to the addition operation. This method is not thread safe.
@param operand the quantity to add, not null.
@return the value associated with this instance immediately before the operand was added.
@since 3.5
|
java
|
src/main/java/org/apache/commons/lang3/mutable/MutableByte.java
| 217
|
[
"operand"
] | true
| 1
| 6.88
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
copyConfigurationFrom
|
protected void copyConfigurationFrom(AdvisedSupport other, TargetSource targetSource, List<Advisor> advisors) {
copyFrom(other);
this.targetSource = targetSource;
this.advisorChainFactory = other.advisorChainFactory;
this.interfaces = new ArrayList<>(other.interfaces);
for (Advisor advisor : advisors) {
if (advisor instanceof IntroductionAdvisor introductionAdvisor) {
validateIntroductionAdvisor(introductionAdvisor);
}
Assert.notNull(advisor, "Advisor must not be null");
this.advisors.add(advisor);
}
adviceChanged();
}
|
Copy the AOP configuration from the given {@link AdvisedSupport} object,
but allow substitution of a fresh {@link TargetSource} and a given interceptor chain.
@param other the {@code AdvisedSupport} object to take proxy configuration from
@param targetSource the new TargetSource
@param advisors the Advisors for the chain
|
java
|
spring-aop/src/main/java/org/springframework/aop/framework/AdvisedSupport.java
| 574
|
[
"other",
"targetSource",
"advisors"
] |
void
| true
| 2
| 6.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
generate_numba_apply_func
|
def generate_numba_apply_func(
func: Callable[..., Scalar],
nopython: bool,
nogil: bool,
parallel: bool,
):
"""
Generate a numba jitted apply function specified by values from engine_kwargs.
1. jit the user's function
2. Return a rolling apply function with the jitted function inline
Configurations specified in engine_kwargs apply to both the user's
function _AND_ the rolling apply function.
Parameters
----------
func : function
function to be applied to each window and will be JITed
nopython : bool
nopython to be passed into numba.jit
nogil : bool
nogil to be passed into numba.jit
parallel : bool
parallel to be passed into numba.jit
Returns
-------
Numba function
"""
numba_func = jit_user_function(func)
if TYPE_CHECKING:
import numba
else:
numba = import_optional_dependency("numba")
@numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)
def roll_apply(
values: np.ndarray,
begin: np.ndarray,
end: np.ndarray,
minimum_periods: int,
*args: Any,
) -> np.ndarray:
result = np.empty(len(begin))
for i in numba.prange(len(result)):
start = begin[i]
stop = end[i]
window = values[start:stop]
count_nan = np.sum(np.isnan(window))
if len(window) - count_nan >= minimum_periods:
result[i] = numba_func(window, *args)
else:
result[i] = np.nan
return result
return roll_apply
|
Generate a numba jitted apply function specified by values from engine_kwargs.
1. jit the user's function
2. Return a rolling apply function with the jitted function inline
Configurations specified in engine_kwargs apply to both the user's
function _AND_ the rolling apply function.
Parameters
----------
func : function
function to be applied to each window and will be JITed
nopython : bool
nopython to be passed into numba.jit
nogil : bool
nogil to be passed into numba.jit
parallel : bool
parallel to be passed into numba.jit
Returns
-------
Numba function
|
python
|
pandas/core/window/numba_.py
| 22
|
[
"func",
"nopython",
"nogil",
"parallel"
] | true
| 6
| 6.64
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
toString
|
@Override
public String toString() {
StringBuilder sb = new StringBuilder(64);
int i = 0;
for (ParseState.Entry entry : this.state) {
if (i > 0) {
sb.append('\n');
sb.append("\t".repeat(i));
sb.append("-> ");
}
sb.append(entry);
i++;
}
return sb.toString();
}
|
Returns a tree-style representation of the current {@code ParseState}.
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/parsing/ParseState.java
| 93
|
[] |
String
| true
| 2
| 6.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
getPropertiesFromServices
|
private Properties getPropertiesFromServices(Environment environment, JsonParser parser) {
Properties properties = new Properties();
try {
String property = environment.getProperty("VCAP_SERVICES", "{}");
Map<String, Object> map = parser.parseMap(property);
extractPropertiesFromServices(properties, map);
}
catch (Exception ex) {
this.logger.error("Could not parse VCAP_SERVICES", ex);
}
return properties;
}
|
Create a new {@link CloudFoundryVcapEnvironmentPostProcessor} instance.
@param logFactory the log factory to use
@since 3.0.0
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/cloud/CloudFoundryVcapEnvironmentPostProcessor.java
| 156
|
[
"environment",
"parser"
] |
Properties
| true
| 2
| 6.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
getAsText
|
@Override
public String getAsText() {
Object value = getValue();
if (value == null) {
return "";
}
if (this.numberFormat != null) {
// Use NumberFormat for rendering value.
return this.numberFormat.format(value);
}
else {
// Use toString method for rendering value.
return value.toString();
}
}
|
Format the Number as String, using the specified NumberFormat.
|
java
|
spring-beans/src/main/java/org/springframework/beans/propertyeditors/CustomNumberEditor.java
| 135
|
[] |
String
| true
| 3
| 6.24
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
parse
|
public static JoinGroupRequest parse(Readable readable, short version) {
return new JoinGroupRequest(new JoinGroupRequestData(readable, version), version);
}
|
Get the client's join reason.
@param request The JoinGroupRequest.
@return The join reason.
|
java
|
clients/src/main/java/org/apache/kafka/common/requests/JoinGroupRequest.java
| 210
|
[
"readable",
"version"
] |
JoinGroupRequest
| true
| 1
| 6.48
|
apache/kafka
| 31,560
|
javadoc
| false
|
subAndCheck
|
private static int subAndCheck(final int x, final int y) {
final long s = (long) x - (long) y;
if (s < Integer.MIN_VALUE || s > Integer.MAX_VALUE) {
throw new ArithmeticException("overflow: add");
}
return (int) s;
}
|
Subtracts two integers, checking for overflow.
@param x the minuend
@param y the subtrahend
@return the difference {@code x-y}
@throws ArithmeticException if the result cannot be represented as
an int
|
java
|
src/main/java/org/apache/commons/lang3/math/Fraction.java
| 449
|
[
"x",
"y"
] | true
| 3
| 7.76
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
logCacheError
|
protected void logCacheError(Supplier<String> messageSupplier, RuntimeException exception) {
if (getLogger().isWarnEnabled()) {
if (isLogStackTraces()) {
getLogger().warn(messageSupplier.get(), exception);
}
else {
getLogger().warn(messageSupplier.get());
}
}
}
|
Log the cache error message in the given supplier.
<p>If {@link #isLogStackTraces()} is {@code true}, the given
{@code exception} will be logged as well.
<p>The default implementation logs the message as a warning.
@param messageSupplier the message supplier
@param exception the exception thrown by the cache provider
@since 5.3.22
@see #isLogStackTraces()
@see #getLogger()
|
java
|
spring-context/src/main/java/org/springframework/cache/interceptor/LoggingCacheErrorHandler.java
| 156
|
[
"messageSupplier",
"exception"
] |
void
| true
| 3
| 6.4
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
open_memmap
|
def open_memmap(filename, mode='r+', dtype=None, shape=None,
fortran_order=False, version=None, *,
max_header_size=_MAX_HEADER_SIZE):
"""
Open a .npy file as a memory-mapped array.
This may be used to read an existing file or create a new one.
Parameters
----------
filename : str or path-like
The name of the file on disk. This may *not* be a file-like
object.
mode : str, optional
The mode in which to open the file; the default is 'r+'. In
addition to the standard file modes, 'c' is also accepted to mean
"copy on write." See `memmap` for the available mode strings.
dtype : data-type, optional
The data type of the array if we are creating a new file in "write"
mode, if not, `dtype` is ignored. The default value is None, which
results in a data-type of `float64`.
shape : tuple of int
The shape of the array if we are creating a new file in "write"
mode, in which case this parameter is required. Otherwise, this
parameter is ignored and is thus optional.
fortran_order : bool, optional
Whether the array should be Fortran-contiguous (True) or
C-contiguous (False, the default) if we are creating a new file in
"write" mode.
version : tuple of int (major, minor) or None
If the mode is a "write" mode, then this is the version of the file
format used to create the file. None means use the oldest
supported version that is able to store the data. Default: None
max_header_size : int, optional
Maximum allowed size of the header. Large headers may not be safe
to load securely and thus require explicitly passing a larger value.
See :py:func:`ast.literal_eval()` for details.
Returns
-------
marray : memmap
The memory-mapped array.
Raises
------
ValueError
If the data or the mode is invalid.
OSError
If the file is not found or cannot be opened correctly.
See Also
--------
numpy.memmap
"""
if isfileobj(filename):
raise ValueError("Filename must be a string or a path-like object."
" Memmap cannot use existing file handles.")
if 'w' in mode:
# We are creating the file, not reading it.
# Check if we ought to create the file.
_check_version(version)
# Ensure that the given dtype is an authentic dtype object rather
# than just something that can be interpreted as a dtype object.
dtype = numpy.dtype(dtype)
if dtype.hasobject:
msg = "Array can't be memory-mapped: Python objects in dtype."
raise ValueError(msg)
d = {
"descr": dtype_to_descr(dtype),
"fortran_order": fortran_order,
"shape": shape,
}
# If we got here, then it should be safe to create the file.
with open(os.fspath(filename), mode + 'b') as fp:
_write_array_header(fp, d, version)
offset = fp.tell()
else:
# Read the header of the file first.
with open(os.fspath(filename), 'rb') as fp:
version = read_magic(fp)
_check_version(version)
shape, fortran_order, dtype = _read_array_header(
fp, version, max_header_size=max_header_size)
if dtype.hasobject:
msg = "Array can't be memory-mapped: Python objects in dtype."
raise ValueError(msg)
offset = fp.tell()
if fortran_order:
order = 'F'
else:
order = 'C'
# We need to change a write-only mode to a read-write mode since we've
# already written data to the file.
if mode == 'w+':
mode = 'r+'
marray = numpy.memmap(filename, dtype=dtype, shape=shape, order=order,
mode=mode, offset=offset)
return marray
|
Open a .npy file as a memory-mapped array.
This may be used to read an existing file or create a new one.
Parameters
----------
filename : str or path-like
The name of the file on disk. This may *not* be a file-like
object.
mode : str, optional
The mode in which to open the file; the default is 'r+'. In
addition to the standard file modes, 'c' is also accepted to mean
"copy on write." See `memmap` for the available mode strings.
dtype : data-type, optional
The data type of the array if we are creating a new file in "write"
mode, if not, `dtype` is ignored. The default value is None, which
results in a data-type of `float64`.
shape : tuple of int
The shape of the array if we are creating a new file in "write"
mode, in which case this parameter is required. Otherwise, this
parameter is ignored and is thus optional.
fortran_order : bool, optional
Whether the array should be Fortran-contiguous (True) or
C-contiguous (False, the default) if we are creating a new file in
"write" mode.
version : tuple of int (major, minor) or None
If the mode is a "write" mode, then this is the version of the file
format used to create the file. None means use the oldest
supported version that is able to store the data. Default: None
max_header_size : int, optional
Maximum allowed size of the header. Large headers may not be safe
to load securely and thus require explicitly passing a larger value.
See :py:func:`ast.literal_eval()` for details.
Returns
-------
marray : memmap
The memory-mapped array.
Raises
------
ValueError
If the data or the mode is invalid.
OSError
If the file is not found or cannot be opened correctly.
See Also
--------
numpy.memmap
|
python
|
numpy/lib/_format_impl.py
| 891
|
[
"filename",
"mode",
"dtype",
"shape",
"fortran_order",
"version",
"max_header_size"
] | false
| 9
| 6.16
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
_is_jax_zero_gradient_array
|
def _is_jax_zero_gradient_array(x: object) -> TypeGuard[_ZeroGradientArray]:
"""Return True if `x` is a zero-gradient array.
These arrays are a design quirk of Jax that may one day be removed.
See https://github.com/google/jax/issues/20620.
"""
# Fast exit
try:
dtype = x.dtype # type: ignore[attr-defined]
except AttributeError:
return False
cls = cast(Hashable, type(dtype))
if not _issubclass_fast(cls, "numpy.dtypes", "VoidDType"):
return False
if "jax" not in sys.modules:
return False
import jax
# jax.float0 is a np.dtype([('float0', 'V')])
return dtype == jax.float0
|
Return True if `x` is a zero-gradient array.
These arrays are a design quirk of Jax that may one day be removed.
See https://github.com/google/jax/issues/20620.
|
python
|
sklearn/externals/array_api_compat/common/_helpers.py
| 75
|
[
"x"
] |
TypeGuard[_ZeroGradientArray]
| true
| 3
| 6
|
scikit-learn/scikit-learn
| 64,340
|
unknown
| false
|
read
|
@CanIgnoreReturnValue
// Sometimes you don't care how many bytes you actually read, I guess.
// (You know that it's either going to read len bytes or stop at EOF.)
public static int read(InputStream in, byte[] b, int off, int len) throws IOException {
checkNotNull(in);
checkNotNull(b);
if (len < 0) {
throw new IndexOutOfBoundsException(String.format("len (%s) cannot be negative", len));
}
checkPositionIndexes(off, off + len, b.length);
int total = 0;
while (total < len) {
int result = in.read(b, off + total, len - total);
if (result == -1) {
break;
}
total += result;
}
return total;
}
|
Reads some bytes from an input stream and stores them into the buffer array {@code b}. This
method blocks until {@code len} bytes of input data have been read into the array, or end of
file is detected. The number of bytes read is returned, possibly zero. Does not close the
stream.
<p>A caller can detect EOF if the number of bytes read is less than {@code len}. All subsequent
calls on the same stream will return zero.
<p>If {@code b} is null, a {@code NullPointerException} is thrown. If {@code off} is negative,
or {@code len} is negative, or {@code off+len} is greater than the length of the array {@code
b}, then an {@code IndexOutOfBoundsException} is thrown. If {@code len} is zero, then no bytes
are read. Otherwise, the first byte read is stored into element {@code b[off]}, the next one
into {@code b[off+1]}, and so on. The number of bytes read is, at most, equal to {@code len}.
@param in the input stream to read from
@param b the buffer into which the data is read
@param off an int specifying the offset into the data
@param len an int specifying the number of bytes to read
@return the number of bytes read
@throws IOException if an I/O error occurs
@throws IndexOutOfBoundsException if {@code off} is negative, if {@code len} is negative, or if
{@code off + len} is greater than {@code b.length}
|
java
|
android/guava/src/com/google/common/io/ByteStreams.java
| 935
|
[
"in",
"b",
"off",
"len"
] | true
| 4
| 8.24
|
google/guava
| 51,352
|
javadoc
| false
|
|
resourceOutput
|
public Builder resourceOutput(Path resourceOutput) {
this.resourceOutput = resourceOutput;
return this;
}
|
Set the output directory for generated resources.
@param resourceOutput the location of generated resources
@return this builder for method chaining
|
java
|
spring-context/src/main/java/org/springframework/context/aot/AbstractAotProcessor.java
| 231
|
[
"resourceOutput"
] |
Builder
| true
| 1
| 6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
herm2poly
|
def herm2poly(c):
"""
Convert a Hermite series to a polynomial.
Convert an array representing the coefficients of a Hermite series,
ordered from lowest degree to highest, to an array of the coefficients
of the equivalent polynomial (relative to the "standard" basis) ordered
from lowest to highest degree.
Parameters
----------
c : array_like
1-D array containing the Hermite series coefficients, ordered
from lowest order term to highest.
Returns
-------
pol : ndarray
1-D array containing the coefficients of the equivalent polynomial
(relative to the "standard" basis) ordered from lowest order term
to highest.
See Also
--------
poly2herm
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy.polynomial.hermite import herm2poly
>>> herm2poly([ 1. , 2.75 , 0.5 , 0.375])
array([0., 1., 2., 3.])
"""
from .polynomial import polyadd, polymulx, polysub
[c] = pu.as_series([c])
n = len(c)
if n == 1:
return c
if n == 2:
c[1] *= 2
return c
else:
c0 = c[-2]
c1 = c[-1]
# i is the current degree of c1
for i in range(n - 1, 1, -1):
tmp = c0
c0 = polysub(c[i - 2], c1 * (2 * (i - 1)))
c1 = polyadd(tmp, polymulx(c1) * 2)
return polyadd(c0, polymulx(c1) * 2)
|
Convert a Hermite series to a polynomial.
Convert an array representing the coefficients of a Hermite series,
ordered from lowest degree to highest, to an array of the coefficients
of the equivalent polynomial (relative to the "standard" basis) ordered
from lowest to highest degree.
Parameters
----------
c : array_like
1-D array containing the Hermite series coefficients, ordered
from lowest order term to highest.
Returns
-------
pol : ndarray
1-D array containing the coefficients of the equivalent polynomial
(relative to the "standard" basis) ordered from lowest order term
to highest.
See Also
--------
poly2herm
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy.polynomial.hermite import herm2poly
>>> herm2poly([ 1. , 2.75 , 0.5 , 0.375])
array([0., 1., 2., 3.])
|
python
|
numpy/polynomial/hermite.py
| 140
|
[
"c"
] | false
| 5
| 7.52
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
subscribeFromPattern
|
public synchronized boolean subscribeFromPattern(Set<String> topics) {
if (subscriptionType != SubscriptionType.AUTO_PATTERN)
throw new IllegalArgumentException("Attempt to subscribe from pattern while subscription type set to " +
subscriptionType);
return changeSubscription(topics);
}
|
This method sets the subscription type if it is not already set (i.e. when it is NONE),
or verifies that the subscription type is equal to the give type when it is set (i.e.
when it is not NONE)
@param type The given subscription type
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/SubscriptionState.java
| 210
|
[
"topics"
] | true
| 2
| 7.04
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
_get_vars
|
def _get_vars(self, stack, scopes: list[str]) -> None:
"""
Get specifically scoped variables from a list of stack frames.
Parameters
----------
stack : list
A list of stack frames as returned by ``inspect.stack()``
scopes : sequence of strings
A sequence containing valid stack frame attribute names that
evaluate to a dictionary. For example, ('locals', 'globals')
"""
variables = itertools.product(scopes, stack)
for scope, (frame, _, _, _, _, _) in variables:
try:
d = getattr(frame, f"f_{scope}")
self.scope = DeepChainMap(self.scope.new_child(d))
finally:
# won't remove it, but DECREF it
# in Py3 this probably isn't necessary since frame won't be
# scope after the loop
del frame
|
Get specifically scoped variables from a list of stack frames.
Parameters
----------
stack : list
A list of stack frames as returned by ``inspect.stack()``
scopes : sequence of strings
A sequence containing valid stack frame attribute names that
evaluate to a dictionary. For example, ('locals', 'globals')
|
python
|
pandas/core/computation/scope.py
| 272
|
[
"self",
"stack",
"scopes"
] |
None
| true
| 2
| 6.88
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
pendingToString
|
@Override
protected @Nullable String pendingToString() {
@RetainedLocalRef ListenableFuture<? extends V> localInputFuture = inputFuture;
@RetainedLocalRef Class<X> localExceptionType = exceptionType;
@RetainedLocalRef F localFallback = fallback;
String superString = super.pendingToString();
String resultString = "";
if (localInputFuture != null) {
resultString = "inputFuture=[" + localInputFuture + "], ";
}
if (localExceptionType != null && localFallback != null) {
return resultString
+ "exceptionType=["
+ localExceptionType
+ "], fallback=["
+ localFallback
+ "]";
} else if (superString != null) {
return resultString + superString;
}
return null;
}
|
Template method for subtypes to actually set the result.
|
java
|
android/guava/src/com/google/common/util/concurrent/AbstractCatchingFuture.java
| 167
|
[] |
String
| true
| 5
| 6.88
|
google/guava
| 51,352
|
javadoc
| false
|
span
|
@Override
public Range<C> span() {
Entry<Cut<C>, Range<C>> firstEntry = rangesByLowerBound.firstEntry();
Entry<Cut<C>, Range<C>> lastEntry = rangesByLowerBound.lastEntry();
if (firstEntry == null || lastEntry == null) {
/*
* Either both are null or neither is: Either the set is empty, or it's not. But we check both
* to make the nullness checker happy.
*/
throw new NoSuchElementException();
}
return Range.create(firstEntry.getValue().lowerBound, lastEntry.getValue().upperBound);
}
|
Returns a {@code TreeRangeSet} representing the union of the specified ranges.
<p>This is the smallest {@code RangeSet} which encloses each of the specified ranges. An
element will be contained in this {@code RangeSet} if and only if it is contained in at least
one {@code Range} in {@code ranges}.
@since 21.0
|
java
|
android/guava/src/com/google/common/collect/TreeRangeSet.java
| 164
|
[] | true
| 3
| 7.04
|
google/guava
| 51,352
|
javadoc
| false
|
|
indexOf
|
public static int indexOf(final byte[] array, final byte valueToFind, final int startIndex) {
if (array == null) {
return INDEX_NOT_FOUND;
}
for (int i = max0(startIndex); i < array.length; i++) {
if (valueToFind == array[i]) {
return i;
}
}
return INDEX_NOT_FOUND;
}
|
Finds the index of the given value in the array starting at the given index.
<p>
This method returns {@link #INDEX_NOT_FOUND} ({@code -1}) for a {@code null} input array.
</p>
<p>
A negative startIndex is treated as zero. A startIndex larger than the array length will return {@link #INDEX_NOT_FOUND} ({@code -1}).
</p>
@param array the array to search for the object, may be {@code null}.
@param valueToFind the value to find.
@param startIndex the index to start searching.
@return the index of the value within the array, {@link #INDEX_NOT_FOUND} ({@code -1}) if not found or {@code null} array input.
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 2,427
|
[
"array",
"valueToFind",
"startIndex"
] | true
| 4
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
empty
|
static ReleasableExponentialHistogram empty() {
return EmptyExponentialHistogram.INSTANCE;
}
|
@return an empty singleton, which does not allocate any memory and therefore {@link #close()} is a no-op.
|
java
|
libs/exponential-histogram/src/main/java/org/elasticsearch/exponentialhistogram/ReleasableExponentialHistogram.java
| 34
|
[] |
ReleasableExponentialHistogram
| true
| 1
| 6.8
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
isExpandoDeclaration
|
function isExpandoDeclaration(node: Declaration): boolean {
if (!isAssignmentDeclaration(node)) return false;
const containingAssignment = findAncestor(node, p => {
if (isAssignmentExpression(p)) return true;
if (!isAssignmentDeclaration(p as Declaration)) return "quit";
return false;
}) as AssignmentExpression<AssignmentOperatorToken> | undefined;
return !!containingAssignment && getAssignmentDeclarationKind(containingAssignment) === AssignmentDeclarationKind.Property;
}
|
```ts
function f() {}
f.foo = 0;
```
Here, `f` has two declarations: the function declaration, and the identifier in the next line.
The latter is a declaration for `f` because it gives `f` the `SymbolFlags.Namespace` meaning so
it can contain `foo`. However, that declaration is pretty uninteresting and not intuitively a
"definition" for `f`. Ideally, the question we'd like to answer is "what SymbolFlags does this
declaration contribute to the symbol for `f`?" If the answer is just `Namespace` and the
declaration looks like an assignment, that declaration is in no sense a definition for `f`.
But that information is totally lost during binding and/or symbol merging, so we need to do
our best to reconstruct it or use other heuristics. This function (and the logic around its
calling) covers our tests but feels like a hack, and it would be great if someone could come
up with a more precise definition of what counts as a definition.
|
typescript
|
src/services/goToDefinition.ts
| 617
|
[
"node"
] | true
| 5
| 7.12
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
reconstruct_func
|
def reconstruct_func(
func: AggFuncType | None, **kwargs
) -> tuple[bool, AggFuncType, tuple[str, ...] | None, npt.NDArray[np.intp] | None]:
"""
This is the internal function to reconstruct func given if there is relabeling
or not and also normalize the keyword to get new order of columns.
If named aggregation is applied, `func` will be None, and kwargs contains the
column and aggregation function information to be parsed;
If named aggregation is not applied, `func` is either string (e.g. 'min') or
Callable, or list of them (e.g. ['min', np.max]), or the dictionary of column name
and str/Callable/list of them (e.g. {'A': 'min'}, or {'A': [np.min, lambda x: x]})
If relabeling is True, will return relabeling, reconstructed func, column
names, and the reconstructed order of columns.
If relabeling is False, the columns and order will be None.
Parameters
----------
func: agg function (e.g. 'min' or Callable) or list of agg functions
(e.g. ['min', np.max]) or dictionary (e.g. {'A': ['min', np.max]}).
**kwargs: dict, kwargs used in is_multi_agg_with_relabel and
normalize_keyword_aggregation function for relabelling
Returns
-------
relabelling: bool, if there is relabelling or not
func: normalized and mangled func
columns: tuple of column names
order: array of columns indices
Examples
--------
>>> reconstruct_func(None, **{"foo": ("col", "min")})
(True, defaultdict(<class 'list'>, {'col': ['min']}), ('foo',), array([0]))
>>> reconstruct_func("min")
(False, 'min', None, None)
"""
from pandas.core.groupby.generic import NamedAgg
relabeling = func is None and (
is_multi_agg_with_relabel(**kwargs)
or any(isinstance(v, NamedAgg) for v in kwargs.values())
)
columns: tuple[str, ...] | None = None
order: npt.NDArray[np.intp] | None = None
if not relabeling:
if isinstance(func, list) and len(func) > len(set(func)):
# GH 28426 will raise error if duplicated function names are used and
# there is no reassigned name
raise SpecificationError(
"Function names must be unique if there is no new column names assigned"
)
if func is None:
# nicer error message
raise TypeError("Must provide 'func' or tuples of '(column, aggfunc).")
if relabeling:
# error: Incompatible types in assignment (expression has type
# "MutableMapping[Hashable, list[Callable[..., Any] | str]]", variable has type
# "Callable[..., Any] | str | list[Callable[..., Any] | str] |
# MutableMapping[Hashable, Callable[..., Any] | str | list[Callable[..., Any] |
# str]] | None")
converted_kwargs = {}
for key, val in kwargs.items():
if isinstance(val, NamedAgg):
aggfunc = val.aggfunc
if val.args or val.kwargs:
aggfunc = lambda x, func=aggfunc, a=val.args, kw=val.kwargs: func(
x, *a, **kw
)
converted_kwargs[key] = (val.column, aggfunc)
else:
converted_kwargs[key] = val
func, columns, order = normalize_keyword_aggregation( # type: ignore[assignment]
converted_kwargs
)
assert func is not None
return relabeling, func, columns, order
|
This is the internal function to reconstruct func given if there is relabeling
or not and also normalize the keyword to get new order of columns.
If named aggregation is applied, `func` will be None, and kwargs contains the
column and aggregation function information to be parsed;
If named aggregation is not applied, `func` is either string (e.g. 'min') or
Callable, or list of them (e.g. ['min', np.max]), or the dictionary of column name
and str/Callable/list of them (e.g. {'A': 'min'}, or {'A': [np.min, lambda x: x]})
If relabeling is True, will return relabeling, reconstructed func, column
names, and the reconstructed order of columns.
If relabeling is False, the columns and order will be None.
Parameters
----------
func: agg function (e.g. 'min' or Callable) or list of agg functions
(e.g. ['min', np.max]) or dictionary (e.g. {'A': ['min', np.max]}).
**kwargs: dict, kwargs used in is_multi_agg_with_relabel and
normalize_keyword_aggregation function for relabelling
Returns
-------
relabelling: bool, if there is relabelling or not
func: normalized and mangled func
columns: tuple of column names
order: array of columns indices
Examples
--------
>>> reconstruct_func(None, **{"foo": ("col", "min")})
(True, defaultdict(<class 'list'>, {'col': ['min']}), ('foo',), array([0]))
>>> reconstruct_func("min")
(False, 'min', None, None)
|
python
|
pandas/core/apply.py
| 1,712
|
[
"func"
] |
tuple[bool, AggFuncType, tuple[str, ...] | None, npt.NDArray[np.intp] | None]
| true
| 13
| 8.4
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
serializeEntityNameAsExpressionFallback
|
function serializeEntityNameAsExpressionFallback(node: EntityName): BinaryExpression {
if (node.kind === SyntaxKind.Identifier) {
// A -> typeof A !== "undefined" && A
const copied = serializeEntityNameAsExpression(node);
return createCheckedValue(copied, copied);
}
if (node.left.kind === SyntaxKind.Identifier) {
// A.B -> typeof A !== "undefined" && A.B
return createCheckedValue(serializeEntityNameAsExpression(node.left), serializeEntityNameAsExpression(node));
}
// A.B.C -> typeof A !== "undefined" && (_a = A.B) !== void 0 && _a.C
const left = serializeEntityNameAsExpressionFallback(node.left);
const temp = factory.createTempVariable(hoistVariableDeclaration);
return factory.createLogicalAnd(
factory.createLogicalAnd(
left.left,
factory.createStrictInequality(factory.createAssignment(temp, left.right), factory.createVoidZero()),
),
factory.createPropertyAccessExpression(temp, node.right),
);
}
|
Serializes an entity name which may not exist at runtime, but whose access shouldn't throw
@param node The entity name to serialize.
|
typescript
|
src/compiler/transformers/typeSerializer.ts
| 569
|
[
"node"
] | true
| 3
| 7.04
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
get_pip_package_name
|
def get_pip_package_name(provider_id: str) -> str:
"""
Returns PIP package name for the package id.
:param provider_id: id of the package
:return: the name of pip package
"""
return "apache-airflow-providers-" + provider_id.replace(".", "-")
|
Returns PIP package name for the package id.
:param provider_id: id of the package
:return: the name of pip package
|
python
|
dev/breeze/src/airflow_breeze/utils/packages.py
| 457
|
[
"provider_id"
] |
str
| true
| 1
| 6.88
|
apache/airflow
| 43,597
|
sphinx
| false
|
sheet_names
|
def sheet_names(self):
"""
Names of the sheets in the document.
This is particularly useful for loading a specific sheet into a DataFrame when
you do not know the sheet names beforehand.
Returns
-------
list of str
List of sheet names in the document.
See Also
--------
ExcelFile.parse : Parse a sheet into a DataFrame.
read_excel : Read an Excel file into a pandas DataFrame. If you know the sheet
names, it may be easier to specify them directly to read_excel.
Examples
--------
>>> file = pd.ExcelFile("myfile.xlsx") # doctest: +SKIP
>>> file.sheet_names # doctest: +SKIP
["Sheet1", "Sheet2"]
"""
return self._reader.sheet_names
|
Names of the sheets in the document.
This is particularly useful for loading a specific sheet into a DataFrame when
you do not know the sheet names beforehand.
Returns
-------
list of str
List of sheet names in the document.
See Also
--------
ExcelFile.parse : Parse a sheet into a DataFrame.
read_excel : Read an Excel file into a pandas DataFrame. If you know the sheet
names, it may be easier to specify them directly to read_excel.
Examples
--------
>>> file = pd.ExcelFile("myfile.xlsx") # doctest: +SKIP
>>> file.sheet_names # doctest: +SKIP
["Sheet1", "Sheet2"]
|
python
|
pandas/io/excel/_base.py
| 1,832
|
[
"self"
] | false
| 1
| 6.48
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
|
isBindingIdentifier
|
function isBindingIdentifier(): boolean {
if (token() === SyntaxKind.Identifier) {
return true;
}
// `let await`/`let yield` in [Yield] or [Await] are allowed here and disallowed in the binder.
return token() > SyntaxKind.LastReservedWord;
}
|
Invokes the provided callback. If the callback returns something falsy, then it restores
the parser to the state it was in immediately prior to invoking the callback. If the
callback returns something truthy, then the parser state is not rolled back. The result
of invoking the callback is returned from this function.
|
typescript
|
src/compiler/parser.ts
| 2,308
|
[] | true
| 2
| 7.2
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
iterator
|
@Override
public Iterator<Entry> iterator() {
return new PropertiesIterator(this.entries);
}
|
Return the value of the specified property as an {@link Instant} or {@code null} if
the value is not a valid {@link Long} representation of an epoch time.
@param key the key of the property
@return the property value
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/info/InfoProperties.java
| 78
|
[] | true
| 1
| 6.8
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
baseProperty
|
function baseProperty(key) {
return function(object) {
return object == null ? undefined : object[key];
};
}
|
The base implementation of `_.property` without support for deep paths.
@private
@param {string} key The key of the property to get.
@returns {Function} Returns the new accessor function.
|
javascript
|
lodash.js
| 892
|
[
"key"
] | false
| 2
| 6.24
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
wsgi_errors_stream
|
def wsgi_errors_stream() -> t.TextIO:
"""Find the most appropriate error stream for the application. If a request
is active, log to ``wsgi.errors``, otherwise use ``sys.stderr``.
If you configure your own :class:`logging.StreamHandler`, you may want to
use this for the stream. If you are using file or dict configuration and
can't import this directly, you can refer to it as
``ext://flask.logging.wsgi_errors_stream``.
"""
if request:
return request.environ["wsgi.errors"] # type: ignore[no-any-return]
return sys.stderr
|
Find the most appropriate error stream for the application. If a request
is active, log to ``wsgi.errors``, otherwise use ``sys.stderr``.
If you configure your own :class:`logging.StreamHandler`, you may want to
use this for the stream. If you are using file or dict configuration and
can't import this directly, you can refer to it as
``ext://flask.logging.wsgi_errors_stream``.
|
python
|
src/flask/logging.py
| 16
|
[] |
t.TextIO
| true
| 2
| 6.88
|
pallets/flask
| 70,946
|
unknown
| false
|
join
|
@Nullable String join(String prefix, String name);
|
Joins the given prefix and name.
@param prefix the prefix
@param name the name
@return the joined result or {@code null}
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/logging/structured/ContextPairs.java
| 113
|
[
"prefix",
"name"
] |
String
| true
| 1
| 6.64
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
matchesMethod
|
private boolean matchesMethod(Method method) {
return (this.checkInherited ? AnnotatedElementUtils.hasAnnotation(method, this.annotationType) :
method.isAnnotationPresent(this.annotationType));
}
|
Create a new AnnotationClassFilter for the given annotation type.
@param annotationType the annotation type to look for
@param checkInherited whether to also check the superclasses and
interfaces as well as meta-annotations for the annotation type
(i.e. whether to use {@link AnnotatedElementUtils#hasAnnotation}
semantics instead of standard Java {@link Method#isAnnotationPresent})
@since 5.0
|
java
|
spring-aop/src/main/java/org/springframework/aop/support/annotation/AnnotationMethodMatcher.java
| 86
|
[
"method"
] | true
| 2
| 6.16
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
ping
|
def ping(self, destination=None):
"""Ping all (or specific) workers.
>>> app.control.inspect().ping()
{'celery@node1': {'ok': 'pong'}, 'celery@node2': {'ok': 'pong'}}
>>> app.control.inspect().ping(destination=['celery@node1'])
{'celery@node1': {'ok': 'pong'}}
Arguments:
destination (List): If set, a list of the hosts to send the
command to, when empty broadcast to all workers.
Returns:
Dict: Dictionary ``{HOSTNAME: {'ok': 'pong'}}``.
See Also:
:meth:`broadcast` for supported keyword arguments.
"""
if destination:
self.destination = destination
return self._request('ping')
|
Ping all (or specific) workers.
>>> app.control.inspect().ping()
{'celery@node1': {'ok': 'pong'}, 'celery@node2': {'ok': 'pong'}}
>>> app.control.inspect().ping(destination=['celery@node1'])
{'celery@node1': {'ok': 'pong'}}
Arguments:
destination (List): If set, a list of the hosts to send the
command to, when empty broadcast to all workers.
Returns:
Dict: Dictionary ``{HOSTNAME: {'ok': 'pong'}}``.
See Also:
:meth:`broadcast` for supported keyword arguments.
|
python
|
celery/app/control.py
| 274
|
[
"self",
"destination"
] | false
| 2
| 8.24
|
celery/celery
| 27,741
|
google
| false
|
|
_has_method
|
def _has_method(
class_path: str,
method_names: Iterable[str],
class_registry: dict[str, dict[str, Any]],
ignored_classes: list[str] | None = None,
) -> bool:
"""
Determines if a class or its bases in the registry have any of the specified methods.
:param class_path: The path of the class to check.
:param method_names: A list of names of methods to search for.
:param class_registry: A dictionary representing the class registry, where each key is a class name
and the value is its metadata.
:param ignored_classes: A list of classes to ignore when searching. If a base class has
OL method but is ignored, the class_path will be treated as it would not have ol methods.
:return: True if any of the specified methods are found in the class or its base classes; False otherwise.
Example:
>>> example_class_registry = {
... "some.module.MyClass": {"methods": {"foo", "bar"}, "base_classes": ["BaseClass"]},
... "another.module.BaseClass": {"methods": {"base_foo"}, "base_classes": []},
... }
>>> _has_method("some.module.MyClass", ["foo"], example_class_registry)
True
>>> _has_method("some.module.MyClass", ["base_foo"], example_class_registry)
True
>>> _has_method("some.module.MyClass", ["not_a_method"], example_class_registry)
False
"""
ignored_classes = ignored_classes or []
if class_path in ignored_classes:
return False
if class_path in class_registry:
if any(method in class_registry[class_path]["methods"] for method in method_names):
return True
for base_name in class_registry[class_path]["base_classes"]:
if base_name in ignored_classes:
continue
if _has_method(base_name, method_names, class_registry, ignored_classes):
return True
return False
|
Determines if a class or its bases in the registry have any of the specified methods.
:param class_path: The path of the class to check.
:param method_names: A list of names of methods to search for.
:param class_registry: A dictionary representing the class registry, where each key is a class name
and the value is its metadata.
:param ignored_classes: A list of classes to ignore when searching. If a base class has
OL method but is ignored, the class_path will be treated as it would not have ol methods.
:return: True if any of the specified methods are found in the class or its base classes; False otherwise.
Example:
>>> example_class_registry = {
... "some.module.MyClass": {"methods": {"foo", "bar"}, "base_classes": ["BaseClass"]},
... "another.module.BaseClass": {"methods": {"base_foo"}, "base_classes": []},
... }
>>> _has_method("some.module.MyClass", ["foo"], example_class_registry)
True
>>> _has_method("some.module.MyClass", ["base_foo"], example_class_registry)
True
>>> _has_method("some.module.MyClass", ["not_a_method"], example_class_registry)
False
|
python
|
devel-common/src/sphinx_exts/providers_extensions.py
| 188
|
[
"class_path",
"method_names",
"class_registry",
"ignored_classes"
] |
bool
| true
| 8
| 9.2
|
apache/airflow
| 43,597
|
sphinx
| false
|
count_masked
|
def count_masked(arr, axis=None):
"""
Count the number of masked elements along the given axis.
Parameters
----------
arr : array_like
An array with (possibly) masked elements.
axis : int, optional
Axis along which to count. If None (default), a flattened
version of the array is used.
Returns
-------
count : int, ndarray
The total number of masked elements (axis=None) or the number
of masked elements along each slice of the given axis.
See Also
--------
MaskedArray.count : Count non-masked elements.
Examples
--------
>>> import numpy as np
>>> a = np.arange(9).reshape((3,3))
>>> a = np.ma.array(a)
>>> a[1, 0] = np.ma.masked
>>> a[1, 2] = np.ma.masked
>>> a[2, 1] = np.ma.masked
>>> a
masked_array(
data=[[0, 1, 2],
[--, 4, --],
[6, --, 8]],
mask=[[False, False, False],
[ True, False, True],
[False, True, False]],
fill_value=999999)
>>> np.ma.count_masked(a)
3
When the `axis` keyword is used an array is returned.
>>> np.ma.count_masked(a, axis=0)
array([1, 1, 1])
>>> np.ma.count_masked(a, axis=1)
array([0, 2, 1])
"""
m = getmaskarray(arr)
return m.sum(axis)
|
Count the number of masked elements along the given axis.
Parameters
----------
arr : array_like
An array with (possibly) masked elements.
axis : int, optional
Axis along which to count. If None (default), a flattened
version of the array is used.
Returns
-------
count : int, ndarray
The total number of masked elements (axis=None) or the number
of masked elements along each slice of the given axis.
See Also
--------
MaskedArray.count : Count non-masked elements.
Examples
--------
>>> import numpy as np
>>> a = np.arange(9).reshape((3,3))
>>> a = np.ma.array(a)
>>> a[1, 0] = np.ma.masked
>>> a[1, 2] = np.ma.masked
>>> a[2, 1] = np.ma.masked
>>> a
masked_array(
data=[[0, 1, 2],
[--, 4, --],
[6, --, 8]],
mask=[[False, False, False],
[ True, False, True],
[False, True, False]],
fill_value=999999)
>>> np.ma.count_masked(a)
3
When the `axis` keyword is used an array is returned.
>>> np.ma.count_masked(a, axis=0)
array([1, 1, 1])
>>> np.ma.count_masked(a, axis=1)
array([0, 2, 1])
|
python
|
numpy/ma/extras.py
| 66
|
[
"arr",
"axis"
] | false
| 1
| 6.48
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
format
|
@Deprecated
StringBuffer format(Date date, StringBuffer buf);
|
Formats a {@link Date} object into the
supplied {@link StringBuffer} using a {@link GregorianCalendar}.
@param date the date to format.
@param buf the buffer to format into.
@return the specified string buffer.
@deprecated Use {{@link #format(Date, Appendable)}.
|
java
|
src/main/java/org/apache/commons/lang3/time/DatePrinter.java
| 106
|
[
"date",
"buf"
] |
StringBuffer
| true
| 1
| 6.16
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
isAppEngineWithApiClasses
|
@J2ktIncompatible
@GwtIncompatible // TODO
private static boolean isAppEngineWithApiClasses() {
if (System.getProperty("com.google.appengine.runtime.environment") == null) {
return false;
}
try {
Class.forName("com.google.appengine.api.utils.SystemProperty");
} catch (ClassNotFoundException e) {
return false;
}
try {
// If the current environment is null, we're not inside AppEngine.
return Class.forName("com.google.apphosting.api.ApiProxy")
.getMethod("getCurrentEnvironment")
.invoke(null)
!= null;
} catch (ClassNotFoundException e) {
// If ApiProxy doesn't exist, we're not on AppEngine at all.
return false;
} catch (InvocationTargetException e) {
// If ApiProxy throws an exception, we're not in a proper AppEngine environment.
return false;
} catch (IllegalAccessException e) {
// If the method isn't accessible, we're not on a supported version of AppEngine;
return false;
} catch (NoSuchMethodException e) {
// If the method doesn't exist, we're not on a supported version of AppEngine;
return false;
}
}
|
Returns a default thread factory used to create new threads.
<p>When running on AppEngine with access to <a
href="https://cloud.google.com/appengine/docs/standard/java/javadoc/">AppEngine legacy
APIs</a>, this method returns {@code ThreadManager.currentRequestThreadFactory()}. Otherwise,
it returns {@link Executors#defaultThreadFactory()}.
@since 14.0
|
java
|
android/guava/src/com/google/common/util/concurrent/MoreExecutors.java
| 834
|
[] | true
| 7
| 6.24
|
google/guava
| 51,352
|
javadoc
| false
|
|
toLocalDateTime
|
public LocalDateTime toLocalDateTime() {
return toLocalDateTime(calendar);
}
|
Converts this instance to a {@link LocalDateTime}.
@return a LocalDateTime.
@since 3.17.0
|
java
|
src/main/java/org/apache/commons/lang3/time/CalendarUtils.java
| 213
|
[] |
LocalDateTime
| true
| 1
| 6.32
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.