function_name
stringlengths 1
57
| function_code
stringlengths 20
4.99k
| documentation
stringlengths 50
2k
| language
stringclasses 5
values | file_path
stringlengths 8
166
| line_number
int32 4
16.7k
| parameters
listlengths 0
20
| return_type
stringlengths 0
131
| has_type_hints
bool 2
classes | complexity
int32 1
51
| quality_score
float32 6
9.68
| repo_name
stringclasses 34
values | repo_stars
int32 2.9k
242k
| docstring_style
stringclasses 7
values | is_async
bool 2
classes |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
preLookup
|
boolean preLookup() {
return currentUsages.updateAndGet(current -> current < 0 ? current : current + 1) > 0;
}
|
Prepares the database for lookup by incrementing the usage count.
If the usage count is already negative, it indicates that the database is being closed,
and this method will return false to indicate that no lookup should be performed.
@return true if the database is ready for lookup, false if it is being closed
|
java
|
modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/DatabaseReaderLazyLoader.java
| 101
|
[] | true
| 2
| 8
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
to_pytimedelta
|
def to_pytimedelta(self) -> np.ndarray:
"""
Return an array of native :class:`datetime.timedelta` objects.
Python's standard `datetime` library uses a different representation
timedelta's. This method converts a Series of pandas Timedeltas
to `datetime.timedelta` format with the same length as the original
Series.
Returns
-------
numpy.ndarray
Array of 1D containing data with `datetime.timedelta` type.
See Also
--------
datetime.timedelta : A duration expressing the difference
between two date, time, or datetime.
Examples
--------
>>> s = pd.Series(pd.to_timedelta(np.arange(5), unit="D"))
>>> s
0 0 days
1 1 days
2 2 days
3 3 days
4 4 days
dtype: timedelta64[ns]
>>> s.dt.to_pytimedelta()
array([datetime.timedelta(0), datetime.timedelta(days=1),
datetime.timedelta(days=2), datetime.timedelta(days=3),
datetime.timedelta(days=4)], dtype=object)
"""
# GH 57463
warnings.warn(
f"The behavior of {type(self).__name__}.to_pytimedelta is deprecated, "
"in a future version this will return a Series containing python "
"datetime.timedelta objects instead of an ndarray. To retain the "
"old behavior, call `np.array` on the result",
Pandas4Warning,
stacklevel=find_stack_level(),
)
return self._get_values().to_pytimedelta()
|
Return an array of native :class:`datetime.timedelta` objects.
Python's standard `datetime` library uses a different representation
timedelta's. This method converts a Series of pandas Timedeltas
to `datetime.timedelta` format with the same length as the original
Series.
Returns
-------
numpy.ndarray
Array of 1D containing data with `datetime.timedelta` type.
See Also
--------
datetime.timedelta : A duration expressing the difference
between two date, time, or datetime.
Examples
--------
>>> s = pd.Series(pd.to_timedelta(np.arange(5), unit="D"))
>>> s
0 0 days
1 1 days
2 2 days
3 3 days
4 4 days
dtype: timedelta64[ns]
>>> s.dt.to_pytimedelta()
array([datetime.timedelta(0), datetime.timedelta(days=1),
datetime.timedelta(days=2), datetime.timedelta(days=3),
datetime.timedelta(days=4)], dtype=object)
|
python
|
pandas/core/indexes/accessors.py
| 464
|
[
"self"
] |
np.ndarray
| true
| 1
| 6.96
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
create_github_issue_url
|
def create_github_issue_url(title: str, body: str, labels: Iterable[str]) -> str:
"""
Creates URL to create the issue with title, body and labels.
:param title: issue title
:param body: issue body
:param labels: labels for the issue
:return: URL to use to create the issue
"""
from urllib.parse import quote
quoted_labels = quote(",".join(labels))
quoted_title = quote(title)
quoted_body = quote(body)
return (
f"https://github.com/apache/airflow/issues/new?labels={quoted_labels}&"
f"title={quoted_title}&body={quoted_body}"
)
|
Creates URL to create the issue with title, body and labels.
:param title: issue title
:param body: issue body
:param labels: labels for the issue
:return: URL to use to create the issue
|
python
|
dev/breeze/src/airflow_breeze/commands/release_management_commands.py
| 2,425
|
[
"title",
"body",
"labels"
] |
str
| true
| 1
| 6.88
|
apache/airflow
| 43,597
|
sphinx
| false
|
getAllSources
|
public Set<Object> getAllSources() {
Set<Object> allSources = new LinkedHashSet<>();
if (!CollectionUtils.isEmpty(this.primarySources)) {
allSources.addAll(this.primarySources);
}
if (!CollectionUtils.isEmpty(this.properties.getSources())) {
allSources.addAll(this.properties.getSources());
}
return Collections.unmodifiableSet(allSources);
}
|
Return an immutable set of all the sources that will be added to an
ApplicationContext when {@link #run(String...)} is called. This method combines any
primary sources specified in the constructor with any additional ones that have
been {@link #setSources(Set) explicitly set}.
@return an immutable set of all sources
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/SpringApplication.java
| 1,189
|
[] | true
| 3
| 7.92
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
classOutput
|
public Builder classOutput(Path classOutput) {
this.classOutput = classOutput;
return this;
}
|
Set the output directory for generated classes.
@param classOutput the location of generated classes
@return this builder for method chaining
|
java
|
spring-context/src/main/java/org/springframework/context/aot/AbstractAotProcessor.java
| 241
|
[
"classOutput"
] |
Builder
| true
| 1
| 6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
fast_logdet
|
def fast_logdet(A):
"""Compute logarithm of determinant of a square matrix.
The (natural) logarithm of the determinant of a square matrix
is returned if det(A) is non-negative and well defined.
If the determinant is zero or negative returns -Inf.
Equivalent to : np.log(np.det(A)) but more robust.
Parameters
----------
A : array_like of shape (n, n)
The square matrix.
Returns
-------
logdet : float
When det(A) is strictly positive, log(det(A)) is returned.
When det(A) is non-positive or not defined, then -inf is returned.
See Also
--------
numpy.linalg.slogdet : Compute the sign and (natural) logarithm of the determinant
of an array.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import fast_logdet
>>> a = np.array([[5, 1], [2, 8]])
>>> fast_logdet(a)
np.float64(3.6375861597263857)
"""
xp, _ = get_namespace(A)
sign, ld = xp.linalg.slogdet(A)
if not sign > 0:
return -xp.inf
return ld
|
Compute logarithm of determinant of a square matrix.
The (natural) logarithm of the determinant of a square matrix
is returned if det(A) is non-negative and well defined.
If the determinant is zero or negative returns -Inf.
Equivalent to : np.log(np.det(A)) but more robust.
Parameters
----------
A : array_like of shape (n, n)
The square matrix.
Returns
-------
logdet : float
When det(A) is strictly positive, log(det(A)) is returned.
When det(A) is non-positive or not defined, then -inf is returned.
See Also
--------
numpy.linalg.slogdet : Compute the sign and (natural) logarithm of the determinant
of an array.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import fast_logdet
>>> a = np.array([[5, 1], [2, 8]])
>>> fast_logdet(a)
np.float64(3.6375861597263857)
|
python
|
sklearn/utils/extmath.py
| 98
|
[
"A"
] | false
| 2
| 7.52
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
createDefaultPropertyValue
|
private PropertyValue createDefaultPropertyValue(PropertyTokenHolder tokens) {
TypeDescriptor desc = getPropertyTypeDescriptor(tokens.canonicalName);
if (desc == null) {
throw new NullValueInNestedPathException(getRootClass(), this.nestedPath + tokens.canonicalName,
"Could not determine property type for auto-growing a default value");
}
Object defaultValue = newValue(desc.getType(), desc, tokens.canonicalName);
return new PropertyValue(tokens.canonicalName, defaultValue);
}
|
Retrieve a Property accessor for the given nested property.
Create a new one if not found in the cache.
<p>Note: Caching nested PropertyAccessors is necessary now,
to keep registered custom editors for nested properties.
@param nestedProperty property to create the PropertyAccessor for
@return the PropertyAccessor instance, either cached or newly created
|
java
|
spring-beans/src/main/java/org/springframework/beans/AbstractNestablePropertyAccessor.java
| 885
|
[
"tokens"
] |
PropertyValue
| true
| 2
| 7.44
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
detectAndParse
|
public static Period detectAndParse(String value) {
return detectAndParse(value, null);
}
|
Detect the style then parse the value to return a period.
@param value the value to parse
@return the parsed period
@throws IllegalArgumentException if the value is not a known style or cannot be
parsed
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/convert/PeriodStyle.java
| 185
|
[
"value"
] |
Period
| true
| 1
| 6.64
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
delete_cluster
|
def delete_cluster(
self,
cluster_identifier: str,
skip_final_cluster_snapshot: bool = True,
final_cluster_snapshot_identifier: str | None = None,
):
"""
Delete a cluster and optionally create a snapshot.
.. seealso::
- :external+boto3:py:meth:`Redshift.Client.delete_cluster`
:param cluster_identifier: unique identifier of a cluster
:param skip_final_cluster_snapshot: determines cluster snapshot creation
:param final_cluster_snapshot_identifier: name of final cluster snapshot
"""
final_cluster_snapshot_identifier = final_cluster_snapshot_identifier or ""
response = self.conn.delete_cluster(
ClusterIdentifier=cluster_identifier,
SkipFinalClusterSnapshot=skip_final_cluster_snapshot,
FinalClusterSnapshotIdentifier=final_cluster_snapshot_identifier,
)
return response["Cluster"] if response["Cluster"] else None
|
Delete a cluster and optionally create a snapshot.
.. seealso::
- :external+boto3:py:meth:`Redshift.Client.delete_cluster`
:param cluster_identifier: unique identifier of a cluster
:param skip_final_cluster_snapshot: determines cluster snapshot creation
:param final_cluster_snapshot_identifier: name of final cluster snapshot
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/redshift_cluster.py
| 99
|
[
"self",
"cluster_identifier",
"skip_final_cluster_snapshot",
"final_cluster_snapshot_identifier"
] | true
| 3
| 6.08
|
apache/airflow
| 43,597
|
sphinx
| false
|
|
maybeResolveSequences
|
synchronized void maybeResolveSequences() {
for (Iterator<TopicPartition> iter = partitionsWithUnresolvedSequences.keySet().iterator(); iter.hasNext(); ) {
TopicPartition topicPartition = iter.next();
if (!hasInflightBatches(topicPartition)) {
// The partition has been fully drained. At this point, the last ack'd sequence should be one less than
// next sequence destined for the partition. If so, the partition is fully resolved. If not, we should
// reset the sequence number if necessary.
if (isNextSequence(topicPartition, sequenceNumber(topicPartition))) {
// This would happen when a batch was expired, but subsequent batches succeeded.
iter.remove();
} else {
// We would enter this branch if all in flight batches were ultimately expired in the producer.
if (isTransactional()) {
// For the transactional producer, we bump the epoch if possible, otherwise we transition to a fatal error
String unackedMessagesErr = "The client hasn't received acknowledgment for some previously " +
"sent messages and can no longer retry them. ";
KafkaException abortableException = new KafkaException(unackedMessagesErr + "It is safe to abort " +
"the transaction and continue.");
KafkaException fatalException = new KafkaException(unackedMessagesErr + "It isn't safe to continue.");
transitionToAbortableErrorOrFatalError(abortableException, fatalException);
} else {
// For the idempotent producer, bump the epoch
log.info("No inflight batches remaining for {}, last ack'd sequence for partition is {}, next sequence is {}. " +
"Going to bump epoch and reset sequence numbers.", topicPartition,
lastAckedSequence(topicPartition).orElse(TxnPartitionEntry.NO_LAST_ACKED_SEQUENCE_NUMBER), sequenceNumber(topicPartition));
requestIdempotentEpochBumpForPartition(topicPartition);
}
iter.remove();
}
}
}
}
|
Returns the first inflight sequence for a given partition. This is the base sequence of an inflight batch with
the lowest sequence number.
@return the lowest inflight sequence if the transaction manager is tracking inflight requests for this partition.
If there are no inflight requests being tracked for this partition, this method will return
RecordBatch.NO_SEQUENCE.
|
java
|
clients/src/main/java/org/apache/kafka/clients/producer/internals/TransactionManager.java
| 852
|
[] |
void
| true
| 5
| 8.08
|
apache/kafka
| 31,560
|
javadoc
| false
|
execute
|
def execute(self, context: Context):
"""
Invoke the target AWS Lambda function from Airflow.
:return: The response payload from the function, or an error object.
"""
success_status_codes = [200, 202, 204]
self.log.info("Invoking AWS Lambda function: %s with payload: %s", self.function_name, self.payload)
response = self.hook.invoke_lambda(
function_name=self.function_name,
invocation_type=self.invocation_type,
log_type=self.log_type,
client_context=self.client_context,
payload=self.payload,
qualifier=self.qualifier,
)
self.log.info("Lambda response metadata: %r", response.get("ResponseMetadata"))
if log_result := response.get("LogResult"):
log_records = self.hook.encode_log_result(
log_result,
keep_empty_lines=self.keep_empty_log_lines,
)
if log_records:
self.log.info(
"The last 4 KB of the Lambda execution log (keep_empty_log_lines=%s).",
self.keep_empty_log_lines,
)
for log_record in log_records:
self.log.info(log_record)
if response.get("StatusCode") not in success_status_codes:
raise ValueError("Lambda function did not execute", json.dumps(response.get("ResponseMetadata")))
payload_stream = response.get("Payload")
payload = payload_stream.read().decode()
if "FunctionError" in response:
raise ValueError(
"Lambda function execution resulted in error",
{"ResponseMetadata": response.get("ResponseMetadata"), "Payload": payload},
)
self.log.info("Lambda function invocation succeeded: %r", response.get("ResponseMetadata"))
return payload
|
Invoke the target AWS Lambda function from Airflow.
:return: The response payload from the function, or an error object.
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/operators/lambda_function.py
| 211
|
[
"self",
"context"
] | true
| 6
| 7.04
|
apache/airflow
| 43,597
|
unknown
| false
|
|
watchMissingFileSystemEntry
|
function watchMissingFileSystemEntry(): FileWatcher {
return watchFile(
fileOrDirectory,
(_fileName, eventKind, modifiedTime) => {
if (eventKind === FileWatcherEventKind.Created) {
modifiedTime ||= getModifiedTime(fileOrDirectory) || missingFileModifiedTime;
if (modifiedTime !== missingFileModifiedTime) {
callback("rename", "", modifiedTime);
// Call the callback for current file or directory
// For now it could be callback for the inner directory creation,
// but just return current directory, better than current no-op
updateWatcher(watchPresentFileSystemEntry);
}
}
},
fallbackPollingInterval,
fallbackOptions,
);
}
|
Watch the file or directory that is missing
and switch to existing file or directory when the missing filesystem entry is created
|
typescript
|
src/compiler/sys.ts
| 1,339
|
[] | true
| 4
| 6.56
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
remove
|
public static double[] remove(final double[] array, final int index) {
return (double[]) remove((Object) array, index);
}
|
Removes the element at the specified position from the specified array. All subsequent elements are shifted to the left (subtracts one from their
indices).
<p>
This method returns a new array with the same elements of the input array except the element on the specified position. The component type of the
returned array is always the same as that of the input array.
</p>
<p>
If the input array is {@code null}, an IndexOutOfBoundsException will be thrown, because in that case no valid index can be specified.
</p>
<pre>
ArrayUtils.remove([1.1], 0) = []
ArrayUtils.remove([2.5, 6.0], 0) = [6.0]
ArrayUtils.remove([2.5, 6.0], 1) = [2.5]
ArrayUtils.remove([2.5, 6.0, 3.8], 1) = [2.5, 3.8]
</pre>
@param array the array to remove the element from, may not be {@code null}.
@param index the position of the element to be removed.
@return A new array containing the existing elements except the element at the specified position.
@throws IndexOutOfBoundsException if the index is out of range (index < 0 || index >= array.length), or if the array is {@code null}.
@since 2.1
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 4,767
|
[
"array",
"index"
] | true
| 1
| 6.64
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
detect
|
public static DurationFormat.Style detect(String value) {
Assert.notNull(value, "Value must not be null");
// warning: the order of parsing starts to matter if multiple patterns accept a plain integer (no unit suffix)
if (ISO_8601_PATTERN.matcher(value).matches()) {
return DurationFormat.Style.ISO8601;
}
if (SIMPLE_PATTERN.matcher(value).matches()) {
return DurationFormat.Style.SIMPLE;
}
if (COMPOSITE_PATTERN.matcher(value).matches()) {
return DurationFormat.Style.COMPOSITE;
}
throw new IllegalArgumentException("'" + value + "' is not a valid duration, cannot detect any known style");
}
|
Detect the style from the given source value.
@param value the source value
@return the duration style
@throws IllegalArgumentException if the value is not a known style
|
java
|
spring-context/src/main/java/org/springframework/format/datetime/standard/DurationFormatterUtils.java
| 111
|
[
"value"
] | true
| 4
| 8.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
processInjection
|
public void processInjection(Object bean) throws BeanCreationException {
Class<?> clazz = bean.getClass();
InjectionMetadata metadata = findAutowiringMetadata(clazz.getName(), clazz, null);
try {
metadata.inject(bean, null, null);
}
catch (BeanCreationException ex) {
throw ex;
}
catch (Throwable ex) {
throw new BeanCreationException(
"Injection of autowired dependencies failed for class [" + clazz + "]", ex);
}
}
|
<em>Native</em> processing method for direct calls with an arbitrary target
instance, resolving all of its fields and methods which are annotated with
one of the configured 'autowired' annotation types.
@param bean the target instance to process
@throws BeanCreationException if autowiring failed
@see #setAutowiredAnnotationTypes(Set)
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/annotation/AutowiredAnnotationBeanPostProcessor.java
| 511
|
[
"bean"
] |
void
| true
| 3
| 6.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
_list_of_dict_to_arrays
|
def _list_of_dict_to_arrays(
data: list[dict],
columns: Index | None,
) -> tuple[np.ndarray, Index]:
"""
Convert list of dicts to numpy arrays
if `columns` is not passed, column names are inferred from the records
- for OrderedDict and dicts, the column names match
the key insertion-order from the first record to the last.
- For other kinds of dict-likes, the keys are lexically sorted.
Parameters
----------
data : iterable
collection of records (OrderedDict, dict)
columns: iterables or None
Returns
-------
content : np.ndarray[object, ndim=2]
columns : Index
"""
if columns is None:
gen = (list(x.keys()) for x in data)
sort = not any(isinstance(d, dict) for d in data)
pre_cols = lib.fast_unique_multiple_list_gen(gen, sort=sort)
columns = ensure_index(pre_cols)
# assure that they are of the base dict class and not of derived
# classes
data = [d if type(d) is dict else dict(d) for d in data]
content = lib.dicts_to_array(data, list(columns))
return content, columns
|
Convert list of dicts to numpy arrays
if `columns` is not passed, column names are inferred from the records
- for OrderedDict and dicts, the column names match
the key insertion-order from the first record to the last.
- For other kinds of dict-likes, the keys are lexically sorted.
Parameters
----------
data : iterable
collection of records (OrderedDict, dict)
columns: iterables or None
Returns
-------
content : np.ndarray[object, ndim=2]
columns : Index
|
python
|
pandas/core/internals/construction.py
| 835
|
[
"data",
"columns"
] |
tuple[np.ndarray, Index]
| true
| 3
| 6.88
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
shouldAddOverrideKeyword
|
function shouldAddOverrideKeyword(): boolean {
return !!(context.program.getCompilerOptions().noImplicitOverride && declaration && hasAbstractModifier(declaration));
}
|
(#49811)
Note that there are cases in which the symbol declaration is not present. For example, in the code below both
`MappedIndirect.ax` and `MappedIndirect.ay` have no declaration node attached (due to their mapped-type
parent):
```ts
type Base = { ax: number; ay: string };
type BaseKeys = keyof Base;
type MappedIndirect = { [K in BaseKeys]: boolean };
```
In such cases, we assume the declaration to be a `PropertySignature`.
|
typescript
|
src/services/codefixes/helpers.ts
| 345
|
[] | true
| 3
| 8.4
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
rename_axis
|
def rename_axis(
self,
mapper: IndexLabel | lib.NoDefault = lib.no_default,
*,
index=lib.no_default,
axis: Axis = 0,
copy: bool | lib.NoDefault = lib.no_default,
inplace: bool = False,
) -> Self | None:
"""
Set the name of the axis for the index.
Parameters
----------
mapper : scalar, list-like, optional
Value to set the axis name attribute.
Use either ``mapper`` and ``axis`` to
specify the axis to target with ``mapper``, or ``index``.
index : scalar, list-like, dict-like or function, optional
A scalar, list-like, dict-like or functions transformations to
apply to that axis' values.
axis : {0 or 'index'}, default 0
The axis to rename. For `Series` this parameter is unused and defaults to 0.
copy : bool, default False
This keyword is now ignored; changing its value will have no
impact on the method.
.. deprecated:: 3.0.0
This keyword is ignored and will be removed in pandas 4.0. Since
pandas 3.0, this method always returns a new object using a lazy
copy mechanism that defers copies until necessary
(Copy-on-Write). See the `user guide on Copy-on-Write
<https://pandas.pydata.org/docs/dev/user_guide/copy_on_write.html>`__
for more details.
inplace : bool, default False
Modifies the object directly, instead of creating a new Series
or DataFrame.
Returns
-------
Series, or None
The same type as the caller or None if ``inplace=True``.
See Also
--------
Series.rename : Alter Series index labels or name.
DataFrame.rename : Alter DataFrame index labels or name.
Index.rename : Set new names on index.
Examples
--------
>>> s = pd.Series(["dog", "cat", "monkey"])
>>> s
0 dog
1 cat
2 monkey
dtype: object
>>> s.rename_axis("animal")
animal
0 dog
1 cat
2 monkey
dtype: object
"""
return super().rename_axis(
mapper=mapper,
index=index,
axis=axis,
inplace=inplace,
copy=copy,
)
|
Set the name of the axis for the index.
Parameters
----------
mapper : scalar, list-like, optional
Value to set the axis name attribute.
Use either ``mapper`` and ``axis`` to
specify the axis to target with ``mapper``, or ``index``.
index : scalar, list-like, dict-like or function, optional
A scalar, list-like, dict-like or functions transformations to
apply to that axis' values.
axis : {0 or 'index'}, default 0
The axis to rename. For `Series` this parameter is unused and defaults to 0.
copy : bool, default False
This keyword is now ignored; changing its value will have no
impact on the method.
.. deprecated:: 3.0.0
This keyword is ignored and will be removed in pandas 4.0. Since
pandas 3.0, this method always returns a new object using a lazy
copy mechanism that defers copies until necessary
(Copy-on-Write). See the `user guide on Copy-on-Write
<https://pandas.pydata.org/docs/dev/user_guide/copy_on_write.html>`__
for more details.
inplace : bool, default False
Modifies the object directly, instead of creating a new Series
or DataFrame.
Returns
-------
Series, or None
The same type as the caller or None if ``inplace=True``.
See Also
--------
Series.rename : Alter Series index labels or name.
DataFrame.rename : Alter DataFrame index labels or name.
Index.rename : Set new names on index.
Examples
--------
>>> s = pd.Series(["dog", "cat", "monkey"])
>>> s
0 dog
1 cat
2 monkey
dtype: object
>>> s.rename_axis("animal")
animal
0 dog
1 cat
2 monkey
dtype: object
|
python
|
pandas/core/series.py
| 5,451
|
[
"self",
"mapper",
"index",
"axis",
"copy",
"inplace"
] |
Self | None
| true
| 1
| 7.04
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
toByte
|
public static byte toByte(final String str) {
return toByte(str, (byte) 0);
}
|
Converts a {@link String} to a {@code byte}, returning {@code zero} if the conversion fails.
<p>
If the string is {@code null}, {@code zero} is returned.
</p>
<pre>
NumberUtils.toByte(null) = 0
NumberUtils.toByte("") = 0
NumberUtils.toByte("1") = 1
</pre>
@param str the string to convert, may be null.
@return the byte represented by the string, or {@code zero} if conversion fails.
@since 2.5
|
java
|
src/main/java/org/apache/commons/lang3/math/NumberUtils.java
| 1,358
|
[
"str"
] | true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
_log_file_processing_stats
|
def _log_file_processing_stats(self, known_files: dict[str, set[DagFileInfo]]):
"""
Print out stats about how files are getting processed.
:param known_files: a list of file paths that may contain Airflow
DAG definitions
:return: None
"""
# File Path: Path to the file containing the DAG definition
# PID: PID associated with the process that's processing the file. May
# be empty.
# Runtime: If the process is currently running, how long it's been
# running for in seconds.
# Last Runtime: If the process ran before, how long did it take to
# finish in seconds
# Last Run: When the file finished processing in the previous run.
# Last # of DB Queries: The number of queries performed to the
# Airflow database during last parsing of the file.
headers = [
"Bundle",
"File Path",
"PID",
"Current Duration",
"# DAGs",
"# Errors",
"Last Duration",
"Last Run At",
]
rows = []
utcnow = timezone.utcnow()
now = time.monotonic()
for files in known_files.values():
for file in files:
stat = self._file_stats[file]
proc = self._processors.get(file)
num_dags = stat.num_dags
num_errors = stat.import_errors
file_name = Path(file.rel_path).stem
processor_pid = proc.pid if proc else None
processor_start_time = proc.start_time if proc else None
runtime = (now - processor_start_time) if processor_start_time else None
last_run = stat.last_finish_time
if last_run:
seconds_ago = (utcnow - last_run).total_seconds()
Stats.gauge(f"dag_processing.last_run.seconds_ago.{file_name}", seconds_ago)
rows.append(
(
file.bundle_name,
file.rel_path,
processor_pid,
runtime,
num_dags,
num_errors,
stat.last_duration,
last_run,
)
)
# Sort by longest last runtime. (Can't sort None values in python3)
rows.sort(key=lambda x: x[6] or 0.0, reverse=True)
formatted_rows = []
for (
bundle_name,
relative_path,
pid,
runtime,
num_dags,
num_errors,
last_runtime,
last_run,
) in rows:
formatted_rows.append(
(
bundle_name,
relative_path,
pid,
f"{runtime:.2f}s" if runtime else None,
num_dags,
num_errors,
f"{last_runtime:.2f}s" if last_runtime else None,
last_run.strftime("%Y-%m-%dT%H:%M:%S") if last_run else None,
)
)
log_str = (
"\n"
+ "=" * 80
+ "\n"
+ "DAG File Processing Stats\n\n"
+ tabulate(formatted_rows, headers=headers)
+ "\n"
+ "=" * 80
)
self.log.info(log_str)
|
Print out stats about how files are getting processed.
:param known_files: a list of file paths that may contain Airflow
DAG definitions
:return: None
|
python
|
airflow-core/src/airflow/dag_processing/manager.py
| 687
|
[
"self",
"known_files"
] | true
| 12
| 8
|
apache/airflow
| 43,597
|
sphinx
| false
|
|
with_row_locks
|
def with_row_locks(
query: Select[Any],
session: Session,
*,
nowait: bool = False,
skip_locked: bool = False,
key_share: bool = True,
**kwargs,
) -> Select[Any]:
"""
Apply with_for_update to the SQLAlchemy query if row level locking is in use.
This wrapper is needed so we don't use the syntax on unsupported database
engines. In particular, MySQL (prior to 8.0) and MariaDB do not support
row locking, where we do not support nor recommend running HA scheduler. If
a user ignores this and tries anyway, everything will still work, just
slightly slower in some circumstances.
See https://jira.mariadb.org/browse/MDEV-13115
:param query: An SQLAlchemy Query object
:param session: ORM Session
:param nowait: If set to True, will pass NOWAIT to supported database backends.
:param skip_locked: If set to True, will pass SKIP LOCKED to supported database backends.
:param key_share: If true, will lock with FOR KEY SHARE UPDATE (at least on postgres).
:param kwargs: Extra kwargs to pass to with_for_update (of, nowait, skip_locked, etc)
:return: updated query
"""
try:
dialect_name = get_dialect_name(session)
except ValueError:
return query
if not dialect_name:
return query
# Don't use row level locks if the MySQL dialect (Mariadb & MySQL < 8) does not support it.
if not USE_ROW_LEVEL_LOCKING:
return query
if dialect_name == "mysql" and not getattr(
session.bind.dialect if session.bind else None, "supports_for_update_of", False
):
return query
if nowait:
kwargs["nowait"] = True
if skip_locked:
kwargs["skip_locked"] = True
if key_share:
kwargs["key_share"] = True
return query.with_for_update(**kwargs)
|
Apply with_for_update to the SQLAlchemy query if row level locking is in use.
This wrapper is needed so we don't use the syntax on unsupported database
engines. In particular, MySQL (prior to 8.0) and MariaDB do not support
row locking, where we do not support nor recommend running HA scheduler. If
a user ignores this and tries anyway, everything will still work, just
slightly slower in some circumstances.
See https://jira.mariadb.org/browse/MDEV-13115
:param query: An SQLAlchemy Query object
:param session: ORM Session
:param nowait: If set to True, will pass NOWAIT to supported database backends.
:param skip_locked: If set to True, will pass SKIP LOCKED to supported database backends.
:param key_share: If true, will lock with FOR KEY SHARE UPDATE (at least on postgres).
:param kwargs: Extra kwargs to pass to with_for_update (of, nowait, skip_locked, etc)
:return: updated query
|
python
|
airflow-core/src/airflow/utils/sqlalchemy.py
| 332
|
[
"query",
"session",
"nowait",
"skip_locked",
"key_share"
] |
Select[Any]
| true
| 9
| 8.08
|
apache/airflow
| 43,597
|
sphinx
| false
|
getNextEvictable
|
@GuardedBy("this")
ReferenceEntry<K, V> getNextEvictable() {
for (ReferenceEntry<K, V> e : accessQueue) {
int weight = e.getValueReference().getWeight();
if (weight > 0) {
return e;
}
}
throw new AssertionError();
}
|
Performs eviction if the segment is over capacity. Avoids flushing the entire cache if the
newest entry exceeds the maximum weight all on its own.
@param newest the most recently added entry
|
java
|
android/guava/src/com/google/common/cache/LocalCache.java
| 2,583
|
[] | true
| 2
| 6.88
|
google/guava
| 51,352
|
javadoc
| false
|
|
toString
|
@Override
public String toString() {
return "ClientQuotaAlteration.Op(key=" + key + ", value=" + value + ")";
}
|
@return if set then the existing value is updated,
otherwise if null, the existing value is cleared
|
java
|
clients/src/main/java/org/apache/kafka/common/quota/ClientQuotaAlteration.java
| 70
|
[] |
String
| true
| 1
| 6
|
apache/kafka
| 31,560
|
javadoc
| false
|
_hash_pandas_object
|
def _hash_pandas_object(
self, *, encoding: str, hash_key: str, categorize: bool
) -> npt.NDArray[np.uint64]:
"""
Hook for hash_pandas_object.
Default is to use the values returned by _values_for_factorize.
Parameters
----------
encoding : str
Encoding for data & key when strings.
hash_key : str
Hash_key for string key to encode.
categorize : bool
Whether to first categorize object arrays before hashing. This is more
efficient when the array contains duplicate values.
Returns
-------
np.ndarray[uint64]
An array of hashed values.
See Also
--------
api.extensions.ExtensionArray._values_for_factorize : Return an array and
missing value suitable for factorization.
util.hash_array : Given a 1d array, return an array of hashed values.
Examples
--------
>>> pd.array([1, 2])._hash_pandas_object(
... encoding="utf-8", hash_key="1000000000000000", categorize=False
... )
array([ 6238072747940578789, 15839785061582574730], dtype=uint64)
"""
from pandas.core.util.hashing import hash_array
values, _ = self._values_for_factorize()
return hash_array(
values, encoding=encoding, hash_key=hash_key, categorize=categorize
)
|
Hook for hash_pandas_object.
Default is to use the values returned by _values_for_factorize.
Parameters
----------
encoding : str
Encoding for data & key when strings.
hash_key : str
Hash_key for string key to encode.
categorize : bool
Whether to first categorize object arrays before hashing. This is more
efficient when the array contains duplicate values.
Returns
-------
np.ndarray[uint64]
An array of hashed values.
See Also
--------
api.extensions.ExtensionArray._values_for_factorize : Return an array and
missing value suitable for factorization.
util.hash_array : Given a 1d array, return an array of hashed values.
Examples
--------
>>> pd.array([1, 2])._hash_pandas_object(
... encoding="utf-8", hash_key="1000000000000000", categorize=False
... )
array([ 6238072747940578789, 15839785061582574730], dtype=uint64)
|
python
|
pandas/core/arrays/base.py
| 2,335
|
[
"self",
"encoding",
"hash_key",
"categorize"
] |
npt.NDArray[np.uint64]
| true
| 1
| 6.64
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
_capture_with_reraise
|
def _capture_with_reraise() -> Generator[list[warnings.WarningMessage], None, None]:
"""Capture warnings in context and re-raise it on exit from the context manager."""
captured_warnings = []
try:
with warnings.catch_warnings(record=True) as captured_warnings:
yield captured_warnings
finally:
if captured_warnings:
for cw in captured_warnings:
warnings.warn_explicit(
message=cw.message,
category=cw.category,
filename=cw.filename,
lineno=cw.lineno,
source=cw.source,
)
|
Capture warnings in context and re-raise it on exit from the context manager.
|
python
|
airflow-core/src/airflow/dag_processing/dagbag.py
| 72
|
[] |
Generator[list[warnings.WarningMessage], None, None]
| true
| 3
| 7.04
|
apache/airflow
| 43,597
|
unknown
| false
|
checkDependentWaitingThreads
|
private boolean checkDependentWaitingThreads(Thread waitingThread, Thread candidateThread) {
Thread threadToCheck = waitingThread;
while ((threadToCheck = this.lenientWaitingThreads.get(threadToCheck)) != null) {
if (threadToCheck == candidateThread) {
return true;
}
}
return false;
}
|
Return the (raw) singleton object registered under the given name,
creating and registering a new one if none registered yet.
@param beanName the name of the bean
@param singletonFactory the ObjectFactory to lazily create the singleton
with, if necessary
@return the registered singleton object
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/DefaultSingletonBeanRegistry.java
| 434
|
[
"waitingThread",
"candidateThread"
] | true
| 3
| 7.6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
put
|
public JSONObject put(String name, Object value) throws JSONException {
if (value == null) {
this.nameValuePairs.remove(name);
return this;
}
if (value instanceof Number) {
// deviate from the original by checking all Numbers, not just floats &
// doubles
JSON.checkDouble(((Number) value).doubleValue());
}
this.nameValuePairs.put(checkName(name), value);
return this;
}
|
Maps {@code name} to {@code value}, clobbering any existing name/value mapping with
the same name. If the value is {@code null}, any existing mapping for {@code name}
is removed.
@param name the name of the property
@param value a {@link JSONObject}, {@link JSONArray}, String, Boolean, Integer,
Long, Double, {@link #NULL}, or {@code null}. May not be {@link Double#isNaN()
NaNs} or {@link Double#isInfinite() infinities}.
@return this object.
@throws JSONException if an error occurs
|
java
|
cli/spring-boot-cli/src/json-shade/java/org/springframework/boot/cli/json/JSONObject.java
| 261
|
[
"name",
"value"
] |
JSONObject
| true
| 3
| 8.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
append
|
public StrBuilder append(final char[] chars) {
if (chars == null) {
return appendNull();
}
final int strLen = chars.length;
if (strLen > 0) {
final int len = length();
ensureCapacity(len + strLen);
System.arraycopy(chars, 0, buffer, len, strLen);
size += strLen;
}
return this;
}
|
Appends a char array to the string builder.
Appending null will call {@link #appendNull()}.
@param chars the char array to append
@return {@code this} instance.
|
java
|
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
| 367
|
[
"chars"
] |
StrBuilder
| true
| 3
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
initializeUnchecked
|
public static <T> T initializeUnchecked(final ConcurrentInitializer<T> initializer) {
try {
return initialize(initializer);
} catch (final ConcurrentException cex) {
throw new ConcurrentRuntimeException(cex.getCause());
}
}
|
Invokes the specified {@link ConcurrentInitializer} and transforms
occurring exceptions to runtime exceptions. This method works like
{@link #initialize(ConcurrentInitializer)}, but if the {@code
ConcurrentInitializer} throws a {@link ConcurrentException}, it is
caught, and the cause is wrapped in a {@link ConcurrentRuntimeException}.
So client code does not have to deal with checked exceptions.
@param <T> the type of the object produced by the initializer
@param initializer the {@link ConcurrentInitializer} to be invoked
@return the object managed by the {@link ConcurrentInitializer}
@throws ConcurrentRuntimeException if the initializer throws an exception
|
java
|
src/main/java/org/apache/commons/lang3/concurrent/ConcurrentUtils.java
| 305
|
[
"initializer"
] |
T
| true
| 2
| 7.44
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
sniff
|
final void sniff() throws IOException {
List<Node> sniffedNodes = nodesSniffer.sniff();
if (logger.isDebugEnabled()) {
logger.debug("sniffed nodes: " + sniffedNodes);
}
if (sniffedNodes.isEmpty()) {
logger.warn("no nodes to set, nodes will be updated at the next sniffing round");
} else {
restClient.setNodes(sniffedNodes);
}
}
|
Schedule sniffing to run as soon as possible if it isn't already running. Once such sniffing round runs
it will also schedule a new round after sniffAfterFailureDelay ms.
|
java
|
client/sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java
| 208
|
[] |
void
| true
| 3
| 6.56
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
private_function_across_module
|
def private_function_across_module(file_obj: IO[str]) -> Iterable[tuple[int, str]]:
"""
Checking that a private function is not used across modules.
Parameters
----------
file_obj : IO
File-like object containing the Python code to validate.
Yields
------
line_number : int
Line number of the private function that is used across modules.
msg : str
Explanation of the error.
"""
contents = file_obj.read()
tree = ast.parse(contents)
imported_modules: set[str] = set()
for node in ast.walk(tree):
if isinstance(node, (ast.Import, ast.ImportFrom)):
for module in node.names:
module_fqdn = module.name if module.asname is None else module.asname
imported_modules.add(module_fqdn)
if not isinstance(node, ast.Call):
continue
try:
module_name = node.func.value.id
function_name = node.func.attr
except AttributeError:
continue
# Exception section #
# (Debatable) Class case
if module_name[0].isupper():
continue
# (Debatable) Dunder methods case
elif function_name.startswith("__") and function_name.endswith("__"):
continue
elif module_name + "." + function_name in PRIVATE_FUNCTIONS_ALLOWED:
continue
if module_name in imported_modules and function_name.startswith("_"):
yield (node.lineno, f"Private function '{module_name}.{function_name}'")
|
Checking that a private function is not used across modules.
Parameters
----------
file_obj : IO
File-like object containing the Python code to validate.
Yields
------
line_number : int
Line number of the private function that is used across modules.
msg : str
Explanation of the error.
|
python
|
scripts/validate_unwanted_patterns.py
| 101
|
[
"file_obj"
] |
Iterable[tuple[int, str]]
| true
| 12
| 6.88
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
read
|
def read(self, path: str) -> PrecompileCacheEntry:
"""
Abstract method to read dynamo cache entry and backends from storage.
Args:
path: Path or key to identify where to read the data from
Returns:
A tuple containing (dynamo_cache_entry, backend_content)
"""
...
|
Abstract method to read dynamo cache entry and backends from storage.
Args:
path: Path or key to identify where to read the data from
Returns:
A tuple containing (dynamo_cache_entry, backend_content)
|
python
|
torch/_dynamo/package.py
| 967
|
[
"self",
"path"
] |
PrecompileCacheEntry
| true
| 1
| 6.56
|
pytorch/pytorch
| 96,034
|
google
| false
|
logStartupInfo
|
protected void logStartupInfo(ConfigurableApplicationContext context) {
boolean isRoot = context.getParent() == null;
if (isRoot) {
new StartupInfoLogger(this.mainApplicationClass, context.getEnvironment()).logStarting(getApplicationLog());
}
}
|
Called to log startup information, subclasses may override to add additional
logging.
@param context the application context
@since 3.4.0
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/SpringApplication.java
| 633
|
[
"context"
] |
void
| true
| 2
| 6.56
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
setdiff1d
|
def setdiff1d(
x1: Array | complex,
x2: Array | complex,
/,
*,
assume_unique: bool = False,
xp: ModuleType | None = None,
) -> Array:
"""
Find the set difference of two arrays.
Return the unique values in `x1` that are not in `x2`.
Parameters
----------
x1 : array | int | float | complex | bool
Input array.
x2 : array
Input comparison array.
assume_unique : bool
If ``True``, the input arrays are both assumed to be unique, which
can speed up the calculation. Default is ``False``.
xp : array_namespace, optional
The standard-compatible namespace for `x1` and `x2`. Default: infer.
Returns
-------
array
1D array of values in `x1` that are not in `x2`. The result
is sorted when `assume_unique` is ``False``, but otherwise only sorted
if the input is sorted.
Examples
--------
>>> import array_api_strict as xp
>>> import array_api_extra as xpx
>>> x1 = xp.asarray([1, 2, 3, 2, 4, 1])
>>> x2 = xp.asarray([3, 4, 5, 6])
>>> xpx.setdiff1d(x1, x2, xp=xp)
Array([1, 2], dtype=array_api_strict.int64)
"""
if xp is None:
xp = array_namespace(x1, x2)
# https://github.com/microsoft/pyright/issues/10103
x1_, x2_ = asarrays(x1, x2, xp=xp)
if assume_unique:
x1_ = xp.reshape(x1_, (-1,))
x2_ = xp.reshape(x2_, (-1,))
else:
x1_ = xp.unique_values(x1_)
x2_ = xp.unique_values(x2_)
return x1_[_helpers.in1d(x1_, x2_, assume_unique=True, invert=True, xp=xp)]
|
Find the set difference of two arrays.
Return the unique values in `x1` that are not in `x2`.
Parameters
----------
x1 : array | int | float | complex | bool
Input array.
x2 : array
Input comparison array.
assume_unique : bool
If ``True``, the input arrays are both assumed to be unique, which
can speed up the calculation. Default is ``False``.
xp : array_namespace, optional
The standard-compatible namespace for `x1` and `x2`. Default: infer.
Returns
-------
array
1D array of values in `x1` that are not in `x2`. The result
is sorted when `assume_unique` is ``False``, but otherwise only sorted
if the input is sorted.
Examples
--------
>>> import array_api_strict as xp
>>> import array_api_extra as xpx
>>> x1 = xp.asarray([1, 2, 3, 2, 4, 1])
>>> x2 = xp.asarray([3, 4, 5, 6])
>>> xpx.setdiff1d(x1, x2, xp=xp)
Array([1, 2], dtype=array_api_strict.int64)
|
python
|
sklearn/externals/array_api_extra/_lib/_funcs.py
| 888
|
[
"x1",
"x2",
"assume_unique",
"xp"
] |
Array
| true
| 4
| 8.64
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
matrix
|
function matrix<I extends L.List>(lists: L.List<I>): Strict<I[number]>[][] {
// we cannot produce a matrix with empty lists, filter them out
const nonEmptyLists = select(lists, (list) => list.length > 0)
const nonFlatMatrix = _matrix(nonEmptyLists) // first raw matrix
const flattenMatrix = repeat(flatten, times(nonEmptyLists.length - 1))
// we flatten the matrix as many times as it had recursion levels
return flattenMatrix(nonFlatMatrix) // final flat matrix
}
|
Creates the cross-product of a list of lists.
@param lists
@returns
|
typescript
|
helpers/blaze/matrix.ts
| 29
|
[
"lists"
] | true
| 1
| 6.88
|
prisma/prisma
| 44,834
|
jsdoc
| false
|
|
configurationAsText
|
std::string configurationAsText(const ClangTidyOptions &Options) {
std::string Text;
llvm::raw_string_ostream Stream(Text);
llvm::yaml::Output Output(Stream);
// We use the same mapping method for input and output, so we need a non-const
// reference here.
ClangTidyOptions NonConstValue = Options;
Output << NonConstValue;
return Stream.str();
}
|
Parses -line-filter option and stores it to the \c Options.
|
cpp
|
clang-tools-extra/clang-tidy/ClangTidyOptions.cpp
| 572
|
[] | true
| 1
| 6
|
llvm/llvm-project
| 36,021
|
doxygen
| false
|
|
devices
|
def devices(self) -> list[_Device]:
"""
The devices supported by Dask.
For Dask, this always returns ``['cpu', DASK_DEVICE]``.
Returns
-------
devices : list[Device]
The devices supported by Dask.
See Also
--------
__array_namespace_info__.capabilities,
__array_namespace_info__.default_device,
__array_namespace_info__.default_dtypes,
__array_namespace_info__.dtypes
Examples
--------
>>> info = xp.__array_namespace_info__()
>>> info.devices()
['cpu', DASK_DEVICE]
"""
return ["cpu", _DASK_DEVICE]
|
The devices supported by Dask.
For Dask, this always returns ``['cpu', DASK_DEVICE]``.
Returns
-------
devices : list[Device]
The devices supported by Dask.
See Also
--------
__array_namespace_info__.capabilities,
__array_namespace_info__.default_device,
__array_namespace_info__.default_dtypes,
__array_namespace_info__.dtypes
Examples
--------
>>> info = xp.__array_namespace_info__()
>>> info.devices()
['cpu', DASK_DEVICE]
|
python
|
sklearn/externals/array_api_compat/dask/array/_info.py
| 391
|
[
"self"
] |
list[_Device]
| true
| 1
| 6.48
|
scikit-learn/scikit-learn
| 64,340
|
unknown
| false
|
equals
|
@Override
public boolean equals(Object obj) {
if (obj == this) {
return true;
}
if (obj instanceof ApplicationPid other) {
return ObjectUtils.nullSafeEquals(this.pid, other.pid);
}
return false;
}
|
Return the application PID as a {@link Long}.
@return the application PID or {@code null}
@since 3.4.0
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/system/ApplicationPid.java
| 80
|
[
"obj"
] | true
| 3
| 7.04
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
equals
|
@Override
public boolean equals(Object obj) {
return obj instanceof SubscriptionPattern &&
Objects.equals(pattern, ((SubscriptionPattern) obj).pattern);
}
|
@return Regular expression pattern compatible with RE2/J.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/SubscriptionPattern.java
| 54
|
[
"obj"
] | true
| 2
| 6.4
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
flushPendingSend
|
private void flushPendingSend() {
flushPendingBuffer();
if (!buffers.isEmpty()) {
ByteBuffer[] byteBufferArray = buffers.toArray(new ByteBuffer[0]);
addSend(new ByteBufferSend(byteBufferArray, sizeOfBuffers));
clearBuffers();
}
}
|
Write a record set. The underlying record data will be retained
in the result of {@link #build()}. See {@link BaseRecords#toSend()}.
@param records the records to write
|
java
|
clients/src/main/java/org/apache/kafka/common/protocol/SendBuilder.java
| 150
|
[] |
void
| true
| 2
| 6.88
|
apache/kafka
| 31,560
|
javadoc
| false
|
compare
|
@Override
public int compare(final Object obj1, final Object obj2) {
return ((Comparable) obj1).compareTo(obj2);
}
|
Comparable based compare implementation.
@param obj1 left-hand side side of comparison.
@param obj2 right-hand side side of comparison.
@return negative, 0, positive comparison value.
|
java
|
src/main/java/org/apache/commons/lang3/Range.java
| 48
|
[
"obj1",
"obj2"
] | true
| 1
| 6.16
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
checkState
|
public static void checkState(boolean expression) {
if (!expression) {
throw new IllegalStateException();
}
}
|
Ensures the truth of an expression involving the state of the calling instance, but not
involving any parameters to the calling method.
@param expression a boolean expression
@throws IllegalStateException if {@code expression} is false
@see Verify#verify Verify.verify()
|
java
|
android/guava/src/com/google/common/base/Preconditions.java
| 495
|
[
"expression"
] |
void
| true
| 2
| 6.08
|
google/guava
| 51,352
|
javadoc
| false
|
unicodeEscaped
|
public static String unicodeEscaped(final char ch) {
return "\\u" +
HEX_DIGITS[ch >> 12 & 15] +
HEX_DIGITS[ch >> 8 & 15] +
HEX_DIGITS[ch >> 4 & 15] +
HEX_DIGITS[ch & 15];
}
|
Converts the string to the Unicode format '\u0020'.
<p>This format is the Java source code format.</p>
<pre>
CharUtils.unicodeEscaped(' ') = "\u0020"
CharUtils.unicodeEscaped('A') = "\u0041"
</pre>
@param ch the character to convert
@return the escaped Unicode string
|
java
|
src/main/java/org/apache/commons/lang3/CharUtils.java
| 510
|
[
"ch"
] |
String
| true
| 1
| 6.32
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
select_one_layer_lstm_function
|
def select_one_layer_lstm_function(input, hx, params):
r"""Check whether we could use decompose lstm with mkldnn_rnn_layer.
All the below conditions need to be met:
* ``torch._C._get_mkldnn_enabled()`` returns ``True``.
* All the input args are on CPU.
* The dtypes of args are either torch.float or torch.bfloat16.
* Inference.
* ``has_projections`` returns ``False``.
Args:
* input: the input sequence to LSTM
* hx: a tuple of the input hidden state and cell state ``(h_0, c_0)`` to LSTM
* params: the weight and bias tensors of LSTM
"""
def use_mkldnn(input, hx, params):
if not torch._C._get_mkldnn_enabled():
return False
tensors = [input] + list(hx) + list(chain.from_iterable(params))
devices = {t.device for t in tensors}
if len(devices) != 1:
return False
device = devices.pop()
if device != torch.device("cpu"):
return False
# With autocast, possible to have mixed dtype here
dtypes = {t.dtype for t in tensors}
for dtype in dtypes:
if dtype not in [torch.float, torch.bfloat16]:
return False
if input.requires_grad:
return False
has_projections = hx[0].size(2) != hx[1].size(2)
if has_projections:
return False
return True
# mkldnn_one_layer_lstm does not depend on seq_len while one_layer_lstm
# will expand over the seq_len dim
if use_mkldnn(input, hx, params):
return mkldnn_one_layer_lstm
else:
return one_layer_lstm
|
r"""Check whether we could use decompose lstm with mkldnn_rnn_layer.
All the below conditions need to be met:
* ``torch._C._get_mkldnn_enabled()`` returns ``True``.
* All the input args are on CPU.
* The dtypes of args are either torch.float or torch.bfloat16.
* Inference.
* ``has_projections`` returns ``False``.
Args:
* input: the input sequence to LSTM
* hx: a tuple of the input hidden state and cell state ``(h_0, c_0)`` to LSTM
* params: the weight and bias tensors of LSTM
|
python
|
torch/_decomp/decompositions.py
| 3,641
|
[
"input",
"hx",
"params"
] | false
| 10
| 6.08
|
pytorch/pytorch
| 96,034
|
google
| false
|
|
min
|
public static char min(char... array) {
checkArgument(array.length > 0);
char min = array[0];
for (int i = 1; i < array.length; i++) {
if (array[i] < min) {
min = array[i];
}
}
return min;
}
|
Returns the least value present in {@code array}.
@param array a <i>nonempty</i> array of {@code char} values
@return the value present in {@code array} that is less than or equal to every other value in
the array
@throws IllegalArgumentException if {@code array} is empty
|
java
|
android/guava/src/com/google/common/primitives/Chars.java
| 223
|
[] | true
| 3
| 7.76
|
google/guava
| 51,352
|
javadoc
| false
|
|
format
|
public static String format(final Calendar calendar, final String pattern, final Locale locale) {
return format(calendar, pattern, getTimeZone(calendar), locale);
}
|
Formats a calendar into a specific pattern in a locale. The TimeZone from the calendar
will be used for formatting.
@param calendar the calendar to format, not null.
@param pattern the pattern to use to format the calendar, not null.
@param locale the locale to use, may be {@code null}.
@return the formatted calendar.
@see FastDateFormat#format(Calendar)
@since 2.4
|
java
|
src/main/java/org/apache/commons/lang3/time/DateFormatUtils.java
| 225
|
[
"calendar",
"pattern",
"locale"
] |
String
| true
| 1
| 6.64
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
concurrencyLimit
|
public SimpleAsyncTaskSchedulerBuilder concurrencyLimit(@Nullable Integer concurrencyLimit) {
return new SimpleAsyncTaskSchedulerBuilder(this.threadNamePrefix, concurrencyLimit, this.virtualThreads,
this.taskTerminationTimeout, this.taskDecorator, this.customizers);
}
|
Set the concurrency limit.
@param concurrencyLimit the concurrency limit
@return a new builder instance
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/task/SimpleAsyncTaskSchedulerBuilder.java
| 90
|
[
"concurrencyLimit"
] |
SimpleAsyncTaskSchedulerBuilder
| true
| 1
| 6
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
computeNext
|
protected abstract @Nullable T computeNext();
|
Returns the next element. <b>Note:</b> the implementation must call {@link #endOfData()} when
there are no elements left in the iteration. Failure to do so could result in an infinite loop.
<p>The initial invocation of {@link #hasNext()} or {@link #next()} calls this method, as does
the first invocation of {@code hasNext} or {@code next} following each successful call to
{@code next}. Once the implementation either invokes {@code endOfData} or throws an exception,
{@code computeNext} is guaranteed to never be called again.
<p>If this method throws an exception, it will propagate outward to the {@code hasNext} or
{@code next} invocation that invoked this method. Any further attempts to use the iterator will
result in an {@link IllegalStateException}.
<p>The implementation of this method may not invoke the {@code hasNext}, {@code next}, or
{@link #peek()} methods on this instance; if it does, an {@code IllegalStateException} will
result.
@return the next element if there was one. If {@code endOfData} was called during execution,
the return value will be ignored.
@throws RuntimeException if any unrecoverable error happens. This exception will propagate
outward to the {@code hasNext()}, {@code next()}, or {@code peek()} invocation that invoked
this method. Any further attempts to use the iterator will result in an {@link
IllegalStateException}.
|
java
|
android/guava/src/com/google/common/collect/AbstractIterator.java
| 111
|
[] |
T
| true
| 1
| 6.48
|
google/guava
| 51,352
|
javadoc
| false
|
count
|
def count(self, axis: Axis = 0, numeric_only: bool = False) -> Series:
"""
Count non-NA cells for each column or row.
The values `None`, `NaN`, `NaT`, ``pandas.NA`` are considered NA.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
If 0 or 'index' counts are generated for each column.
If 1 or 'columns' counts are generated for each row.
numeric_only : bool, default False
Include only `float`, `int` or `boolean` data.
Returns
-------
Series
For each column/row the number of non-NA/null entries.
See Also
--------
Series.count: Number of non-NA elements in a Series.
DataFrame.value_counts: Count unique combinations of columns.
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = pd.DataFrame(
... {
... "Person": ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24.0, np.nan, 21.0, 33, 26],
... "Single": [False, True, True, True, False],
... }
... )
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
Counts for each **row**:
>>> df.count(axis="columns")
0 3
1 2
2 3
3 3
4 3
dtype: int64
"""
axis = self._get_axis_number(axis)
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
# GH #423
if len(frame._get_axis(axis)) == 0:
result = self._constructor_sliced(0, index=frame._get_agg_axis(axis))
else:
result = notna(frame).sum(axis=axis)
return result.astype("int64").__finalize__(self, method="count")
|
Count non-NA cells for each column or row.
The values `None`, `NaN`, `NaT`, ``pandas.NA`` are considered NA.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
If 0 or 'index' counts are generated for each column.
If 1 or 'columns' counts are generated for each row.
numeric_only : bool, default False
Include only `float`, `int` or `boolean` data.
Returns
-------
Series
For each column/row the number of non-NA/null entries.
See Also
--------
Series.count: Number of non-NA elements in a Series.
DataFrame.value_counts: Count unique combinations of columns.
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = pd.DataFrame(
... {
... "Person": ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24.0, np.nan, 21.0, 33, 26],
... "Single": [False, True, True, True, False],
... }
... )
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
Counts for each **row**:
>>> df.count(axis="columns")
0 3
1 2
2 3
3 3
4 3
dtype: int64
|
python
|
pandas/core/frame.py
| 12,703
|
[
"self",
"axis",
"numeric_only"
] |
Series
| true
| 5
| 8.56
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
process
|
private void process(final PausePartitionsEvent event) {
try {
Collection<TopicPartition> partitions = event.partitions();
log.debug("Pausing partitions {}", partitions);
for (TopicPartition partition : partitions) {
subscriptions.pause(partition);
}
event.future().complete(null);
} catch (Exception e) {
event.future().completeExceptionally(e);
}
}
|
Process event indicating whether the AcknowledgeCommitCallbackHandler is configured by the user.
@param event Event containing a boolean to indicate if the callback handler is configured or not.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEventProcessor.java
| 621
|
[
"event"
] |
void
| true
| 2
| 6.08
|
apache/kafka
| 31,560
|
javadoc
| false
|
setAccessibleWorkaround
|
static <T extends AccessibleObject> T setAccessibleWorkaround(final T obj) {
if (AccessibleObjects.isAccessible(obj)) {
return obj;
}
final Member m = (Member) obj;
if (isPublic(m) && isPackage(m.getDeclaringClass().getModifiers())) {
try {
obj.setAccessible(true);
return obj;
} catch (final SecurityException ignored) {
// Ignore in favor of subsequent IllegalAccessException
}
}
return obj;
}
|
Default access superclass workaround.
<p>
When a {@code public} class has a default access superclass with {@code public} members,
these members are accessible. Calling them from compiled code works fine.
Unfortunately, on some JVMs, using reflection to invoke these members
seems to (wrongly) prevent access even when the modifier is {@code public}.
Calling {@code setAccessible(true)} solves the problem but will only work from
sufficiently privileged code. Better workarounds would be gratefully
accepted.
</p>
@param obj the AccessibleObject to set as accessible, may be null.
@return a boolean indicating whether the accessibility of the object was set to true.
|
java
|
src/main/java/org/apache/commons/lang3/reflect/MemberUtils.java
| 322
|
[
"obj"
] |
T
| true
| 5
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
faceIjkToCellBoundaryClassIII
|
private CellBoundary faceIjkToCellBoundaryClassIII(int adjRes) {
final LatLng[] points = new LatLng[CellBoundary.MAX_CELL_BNDRY_VERTS];
int numPoints = 0;
final FaceIJK fijk = new FaceIJK(this.face, new CoordIJK(0, 0, 0));
final CoordIJK scratch = new CoordIJK(0, 0, 0);
int lastFace = -1;
Overage lastOverage = Overage.NO_OVERAGE;
for (int vert = 0; vert < Constants.NUM_HEX_VERTS + 1; vert++) {
final int v = vert % Constants.NUM_HEX_VERTS;
fijk.coord.reset(
VERTEX_CLASSIII[v][0] + this.coord.i,
VERTEX_CLASSIII[v][1] + this.coord.j,
VERTEX_CLASSIII[v][2] + this.coord.k
);
fijk.coord.ijkNormalize();
fijk.face = this.face;
final Overage overage = fijk.adjustOverageClassII(adjRes, false, true);
/*
Check for edge-crossing. Each face of the underlying icosahedron is a
different projection plane. So if an edge of the hexagon crosses an
icosahedron edge, an additional vertex must be introduced at that
intersection point. Then each half of the cell edge can be projected
to geographic coordinates using the appropriate icosahedron face
projection. Note that Class II cell edges have vertices on the face
edge, with no edge line intersections.
*/
if (vert > 0 && fijk.face != lastFace && lastOverage != Overage.FACE_EDGE) {
// find hex2d of the two vertexes on original face
final int lastV = (v + 5) % Constants.NUM_HEX_VERTS;
// The center point is now in the same substrate grid as the origin
// cell vertices. Add the center point substate coordinates
// to each vertex to translate the vertices to that cell.
final Vec2d orig2d0 = orig(scratch, VERTEX_CLASSIII[lastV]);
final Vec2d orig2d1 = orig(scratch, VERTEX_CLASSIII[v]);
// find the appropriate icosa face edge vertexes
final int face2 = ((lastFace == this.face) ? fijk.face : lastFace);
// find the intersection and add the lat/lng point to the result
final Vec2d inter = findIntersectionPoint(orig2d0, orig2d1, adjRes, adjacentFaceDir[this.face][face2]);
if (inter != null) {
points[numPoints++] = inter.hex2dToGeo(this.face, adjRes, true);
}
}
// convert vertex to lat/lng and add to the result
// vert == start + NUM_HEX_VERTS is only used to test for possible
// intersection on last edge
if (vert < Constants.NUM_HEX_VERTS) {
points[numPoints++] = fijk.coord.ijkToGeo(fijk.face, adjRes, true);
}
lastFace = fijk.face;
lastOverage = overage;
}
return new CellBoundary(points, numPoints);
}
|
Generates the cell boundary in spherical coordinates for a cell given by this
FaceIJK address at a specified resolution.
@param res The H3 resolution of the cell.
|
java
|
libs/h3/src/main/java/org/elasticsearch/h3/FaceIJK.java
| 579
|
[
"adjRes"
] |
CellBoundary
| true
| 8
| 6.64
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
matchesSubscribedPattern
|
public synchronized boolean matchesSubscribedPattern(String topic) {
Pattern pattern = this.subscribedPattern;
if (hasPatternSubscription() && pattern != null)
return pattern.matcher(topic).matches();
return false;
}
|
Check whether a topic matches a subscribed pattern.
@return true if pattern subscription is in use and the topic matches the subscribed pattern, false otherwise
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/SubscriptionState.java
| 362
|
[
"topic"
] | true
| 3
| 6.88
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
binary_repr
|
def binary_repr(num, width=None):
"""
Return the binary representation of the input number as a string.
For negative numbers, if width is not given, a minus sign is added to the
front. If width is given, the two's complement of the number is
returned, with respect to that width.
In a two's-complement system negative numbers are represented by the two's
complement of the absolute value. This is the most common method of
representing signed integers on computers [1]_. A N-bit two's-complement
system can represent every integer in the range
:math:`-2^{N-1}` to :math:`+2^{N-1}-1`.
Parameters
----------
num : int
Only an integer decimal number can be used.
width : int, optional
The length of the returned string if `num` is positive, or the length
of the two's complement if `num` is negative, provided that `width` is
at least a sufficient number of bits for `num` to be represented in
the designated form. If the `width` value is insufficient, an error is
raised.
Returns
-------
bin : str
Binary representation of `num` or two's complement of `num`.
See Also
--------
base_repr: Return a string representation of a number in the given base
system.
bin: Python's built-in binary representation generator of an integer.
Notes
-----
`binary_repr` is equivalent to using `base_repr` with base 2, but about 25x
faster.
References
----------
.. [1] Wikipedia, "Two's complement",
https://en.wikipedia.org/wiki/Two's_complement
Examples
--------
>>> import numpy as np
>>> np.binary_repr(3)
'11'
>>> np.binary_repr(-3)
'-11'
>>> np.binary_repr(3, width=4)
'0011'
The two's complement is returned when the input number is negative and
width is specified:
>>> np.binary_repr(-3, width=3)
'101'
>>> np.binary_repr(-3, width=5)
'11101'
"""
def err_if_insufficient(width, binwidth):
if width is not None and width < binwidth:
raise ValueError(
f"Insufficient bit {width=} provided for {binwidth=}"
)
# Ensure that num is a Python integer to avoid overflow or unwanted
# casts to floating point.
num = operator.index(num)
if num == 0:
return '0' * (width or 1)
elif num > 0:
binary = f'{num:b}'
binwidth = len(binary)
outwidth = (binwidth if width is None
else builtins.max(binwidth, width))
err_if_insufficient(width, binwidth)
return binary.zfill(outwidth)
elif width is None:
return f'-{-num:b}'
else:
poswidth = len(f'{-num:b}')
# See gh-8679: remove extra digit
# for numbers at boundaries.
if 2**(poswidth - 1) == -num:
poswidth -= 1
twocomp = 2**(poswidth + 1) + num
binary = f'{twocomp:b}'
binwidth = len(binary)
outwidth = builtins.max(binwidth, width)
err_if_insufficient(width, binwidth)
return '1' * (outwidth - binwidth) + binary
|
Return the binary representation of the input number as a string.
For negative numbers, if width is not given, a minus sign is added to the
front. If width is given, the two's complement of the number is
returned, with respect to that width.
In a two's-complement system negative numbers are represented by the two's
complement of the absolute value. This is the most common method of
representing signed integers on computers [1]_. A N-bit two's-complement
system can represent every integer in the range
:math:`-2^{N-1}` to :math:`+2^{N-1}-1`.
Parameters
----------
num : int
Only an integer decimal number can be used.
width : int, optional
The length of the returned string if `num` is positive, or the length
of the two's complement if `num` is negative, provided that `width` is
at least a sufficient number of bits for `num` to be represented in
the designated form. If the `width` value is insufficient, an error is
raised.
Returns
-------
bin : str
Binary representation of `num` or two's complement of `num`.
See Also
--------
base_repr: Return a string representation of a number in the given base
system.
bin: Python's built-in binary representation generator of an integer.
Notes
-----
`binary_repr` is equivalent to using `base_repr` with base 2, but about 25x
faster.
References
----------
.. [1] Wikipedia, "Two's complement",
https://en.wikipedia.org/wiki/Two's_complement
Examples
--------
>>> import numpy as np
>>> np.binary_repr(3)
'11'
>>> np.binary_repr(-3)
'-11'
>>> np.binary_repr(3, width=4)
'0011'
The two's complement is returned when the input number is negative and
width is specified:
>>> np.binary_repr(-3, width=3)
'101'
>>> np.binary_repr(-3, width=5)
'11101'
|
python
|
numpy/_core/numeric.py
| 1,990
|
[
"num",
"width"
] | false
| 10
| 7.6
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
format
|
@Override
public StringBuffer format(final long millis, final StringBuffer buf) {
final Calendar c = newCalendar();
c.setTimeInMillis(millis);
return (StringBuffer) applyRules(c, (Appendable) buf);
}
|
Compares two objects for equality.
@param obj the object to compare to.
@return {@code true} if equal.
|
java
|
src/main/java/org/apache/commons/lang3/time/FastDatePrinter.java
| 1,202
|
[
"millis",
"buf"
] |
StringBuffer
| true
| 1
| 7.04
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
handleNotControllerError
|
private void handleNotControllerError(AbstractResponse response) throws ApiException {
// When sending requests directly to the follower controller, it might return NOT_LEADER_OR_FOLLOWER error.
if (response.errorCounts().containsKey(Errors.NOT_CONTROLLER)) {
handleNotControllerError(Errors.NOT_CONTROLLER);
} else if (metadataManager.usingBootstrapControllers() && response.errorCounts().containsKey(Errors.NOT_LEADER_OR_FOLLOWER)) {
handleNotControllerError(Errors.NOT_LEADER_OR_FOLLOWER);
}
}
|
Fail futures in the given Map which were retried due to exceeding quota. We propagate
the initial error back to the caller if the request timed out.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java
| 4,140
|
[
"response"
] |
void
| true
| 4
| 6
|
apache/kafka
| 31,560
|
javadoc
| false
|
stripStart
|
public static String stripStart(final String str, final String stripChars) {
final int strLen = length(str);
if (strLen == 0) {
return str;
}
int start = 0;
if (stripChars == null) {
while (start != strLen && Character.isWhitespace(str.charAt(start))) {
start++;
}
} else if (stripChars.isEmpty()) {
return str;
} else {
while (start != strLen && stripChars.indexOf(str.charAt(start)) != INDEX_NOT_FOUND) {
start++;
}
}
return str.substring(start);
}
|
Strips any of a set of characters from the start of a String.
<p>
A {@code null} input String returns {@code null}. An empty string ("") input returns the empty string.
</p>
<p>
If the stripChars String is {@code null}, whitespace is stripped as defined by {@link Character#isWhitespace(char)}.
</p>
<pre>
StringUtils.stripStart(null, *) = null
StringUtils.stripStart("", *) = ""
StringUtils.stripStart("abc", "") = "abc"
StringUtils.stripStart("abc", null) = "abc"
StringUtils.stripStart(" abc", null) = "abc"
StringUtils.stripStart("abc ", null) = "abc "
StringUtils.stripStart(" abc ", null) = "abc "
StringUtils.stripStart("yxabc ", "xyz") = "abc "
</pre>
@param str the String to remove characters from, may be null.
@param stripChars the characters to remove, null treated as whitespace.
@return the stripped String, {@code null} if null String input.
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 7,998
|
[
"str",
"stripChars"
] |
String
| true
| 8
| 7.6
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
mask_zero_div_zero
|
def mask_zero_div_zero(x, y, result: np.ndarray) -> np.ndarray:
"""
Set results of 0 // 0 to np.nan, regardless of the dtypes
of the numerator or the denominator.
Parameters
----------
x : ndarray
y : ndarray
result : ndarray
Returns
-------
ndarray
The filled result.
Examples
--------
>>> x = np.array([1, 0, -1], dtype=np.int64)
>>> x
array([ 1, 0, -1])
>>> y = 0 # int 0; numpy behavior is different with float
>>> result = x // y
>>> result # raw numpy result does not fill division by zero
array([0, 0, 0])
>>> mask_zero_div_zero(x, y, result)
array([ inf, nan, -inf])
"""
if not hasattr(y, "dtype"):
# e.g. scalar, tuple
y = np.array(y)
if not hasattr(x, "dtype"):
# e.g scalar, tuple
x = np.array(x)
zmask = y == 0
if zmask.any():
# Flip sign if necessary for -0.0
zneg_mask = zmask & np.signbit(y)
zpos_mask = zmask & ~zneg_mask
x_lt0 = x < 0
x_gt0 = x > 0
nan_mask = zmask & (x == 0)
neginf_mask = (zpos_mask & x_lt0) | (zneg_mask & x_gt0)
posinf_mask = (zpos_mask & x_gt0) | (zneg_mask & x_lt0)
if nan_mask.any() or neginf_mask.any() or posinf_mask.any():
# Fill negative/0 with -inf, positive/0 with +inf, 0/0 with NaN
result = result.astype("float64", copy=False)
result[nan_mask] = np.nan
result[posinf_mask] = np.inf
result[neginf_mask] = -np.inf
return result
|
Set results of 0 // 0 to np.nan, regardless of the dtypes
of the numerator or the denominator.
Parameters
----------
x : ndarray
y : ndarray
result : ndarray
Returns
-------
ndarray
The filled result.
Examples
--------
>>> x = np.array([1, 0, -1], dtype=np.int64)
>>> x
array([ 1, 0, -1])
>>> y = 0 # int 0; numpy behavior is different with float
>>> result = x // y
>>> result # raw numpy result does not fill division by zero
array([0, 0, 0])
>>> mask_zero_div_zero(x, y, result)
array([ inf, nan, -inf])
|
python
|
pandas/core/ops/missing.py
| 72
|
[
"x",
"y",
"result"
] |
np.ndarray
| true
| 7
| 8.48
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
size
|
public static int size(Iterator<?> iterator) {
long count = 0L;
while (iterator.hasNext()) {
iterator.next();
count++;
}
return Ints.saturatedCast(count);
}
|
Returns the number of elements remaining in {@code iterator}. The iterator will be left
exhausted: its {@code hasNext()} method will return {@code false}.
|
java
|
android/guava/src/com/google/common/collect/Iterators.java
| 170
|
[
"iterator"
] | true
| 2
| 6.56
|
google/guava
| 51,352
|
javadoc
| false
|
|
ignoreLogConfig
|
private boolean ignoreLogConfig(@Nullable String logConfig) {
return !StringUtils.hasLength(logConfig) || logConfig.startsWith("-D");
}
|
Initialize the logging system according to preferences expressed through the
{@link Environment} and the classpath.
@param environment the environment
@param classLoader the classloader
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/logging/LoggingApplicationListener.java
| 356
|
[
"logConfig"
] | true
| 2
| 6
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
clearValueReferenceQueue
|
void clearValueReferenceQueue() {
while (valueReferenceQueue.poll() != null) {}
}
|
Clears all entries from the key and value reference queues.
|
java
|
android/guava/src/com/google/common/cache/LocalCache.java
| 2,431
|
[] |
void
| true
| 2
| 6.96
|
google/guava
| 51,352
|
javadoc
| false
|
createDocumentBuilderFactory
|
protected DocumentBuilderFactory createDocumentBuilderFactory(int validationMode, boolean namespaceAware)
throws ParserConfigurationException {
// This document loader is used for loading application configuration files.
// As a result, attackers would need complete write access to application configuration
// to leverage XXE attacks. This does not qualify as privilege escalation.
DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();
factory.setNamespaceAware(namespaceAware);
if (validationMode != XmlValidationModeDetector.VALIDATION_NONE) {
factory.setValidating(true);
if (validationMode == XmlValidationModeDetector.VALIDATION_XSD) {
// Enforce namespace aware for XSD...
factory.setNamespaceAware(true);
try {
factory.setAttribute(SCHEMA_LANGUAGE_ATTRIBUTE, XSD_SCHEMA_LANGUAGE);
}
catch (IllegalArgumentException ex) {
ParserConfigurationException pcex = new ParserConfigurationException(
"Unable to validate using XSD: Your JAXP provider [" + factory +
"] does not support XML Schema. Are you running on Java 1.4 with Apache Crimson? " +
"Upgrade to Apache Xerces (or Java 1.5) for full XSD support.");
pcex.initCause(ex);
throw pcex;
}
}
}
return factory;
}
|
Create the {@link DocumentBuilderFactory} instance.
@param validationMode the type of validation: {@link XmlValidationModeDetector#VALIDATION_DTD DTD}
or {@link XmlValidationModeDetector#VALIDATION_XSD XSD})
@param namespaceAware whether the returned factory is to provide support for XML namespaces
@return the JAXP DocumentBuilderFactory
@throws ParserConfigurationException if we failed to build a proper DocumentBuilderFactory
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/xml/DefaultDocumentLoader.java
| 88
|
[
"validationMode",
"namespaceAware"
] |
DocumentBuilderFactory
| true
| 4
| 7.28
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
nansum
|
def nansum(
values: np.ndarray,
*,
axis: AxisInt | None = None,
skipna: bool = True,
min_count: int = 0,
mask: npt.NDArray[np.bool_] | None = None,
) -> npt.NDArray[np.floating] | float | NaTType:
"""
Sum the elements along an axis ignoring NaNs
Parameters
----------
values : ndarray[dtype]
axis : int, optional
skipna : bool, default True
min_count: int, default 0
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : dtype
Examples
--------
>>> from pandas.core import nanops
>>> s = pd.Series([1, 2, np.nan])
>>> nanops.nansum(s.values)
np.float64(3.0)
"""
dtype = values.dtype
values, mask = _get_values(values, skipna, fill_value=0, mask=mask)
dtype_sum = _get_dtype_max(dtype)
if dtype.kind == "f":
dtype_sum = dtype
elif dtype.kind == "m":
dtype_sum = np.dtype(np.float64)
the_sum = values.sum(axis, dtype=dtype_sum)
the_sum = _maybe_null_out(the_sum, axis, mask, values.shape, min_count=min_count)
return the_sum
|
Sum the elements along an axis ignoring NaNs
Parameters
----------
values : ndarray[dtype]
axis : int, optional
skipna : bool, default True
min_count: int, default 0
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : dtype
Examples
--------
>>> from pandas.core import nanops
>>> s = pd.Series([1, 2, np.nan])
>>> nanops.nansum(s.values)
np.float64(3.0)
|
python
|
pandas/core/nanops.py
| 599
|
[
"values",
"axis",
"skipna",
"min_count",
"mask"
] |
npt.NDArray[np.floating] | float | NaTType
| true
| 3
| 7.84
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
poly2cheb
|
def poly2cheb(pol):
"""
Convert a polynomial to a Chebyshev series.
Convert an array representing the coefficients of a polynomial (relative
to the "standard" basis) ordered from lowest degree to highest, to an
array of the coefficients of the equivalent Chebyshev series, ordered
from lowest to highest degree.
Parameters
----------
pol : array_like
1-D array containing the polynomial coefficients
Returns
-------
c : ndarray
1-D array containing the coefficients of the equivalent Chebyshev
series.
See Also
--------
cheb2poly
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy import polynomial as P
>>> p = P.Polynomial(range(4))
>>> p
Polynomial([0., 1., 2., 3.], domain=[-1., 1.], window=[-1., 1.], symbol='x')
>>> c = p.convert(kind=P.Chebyshev)
>>> c
Chebyshev([1. , 3.25, 1. , 0.75], domain=[-1., 1.], window=[-1., ...
>>> P.chebyshev.poly2cheb(range(4))
array([1. , 3.25, 1. , 0.75])
"""
[pol] = pu.as_series([pol])
deg = len(pol) - 1
res = 0
for i in range(deg, -1, -1):
res = chebadd(chebmulx(res), pol[i])
return res
|
Convert a polynomial to a Chebyshev series.
Convert an array representing the coefficients of a polynomial (relative
to the "standard" basis) ordered from lowest degree to highest, to an
array of the coefficients of the equivalent Chebyshev series, ordered
from lowest to highest degree.
Parameters
----------
pol : array_like
1-D array containing the polynomial coefficients
Returns
-------
c : ndarray
1-D array containing the coefficients of the equivalent Chebyshev
series.
See Also
--------
cheb2poly
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy import polynomial as P
>>> p = P.Polynomial(range(4))
>>> p
Polynomial([0., 1., 2., 3.], domain=[-1., 1.], window=[-1., 1.], symbol='x')
>>> c = p.convert(kind=P.Chebyshev)
>>> c
Chebyshev([1. , 3.25, 1. , 0.75], domain=[-1., 1.], window=[-1., ...
>>> P.chebyshev.poly2cheb(range(4))
array([1. , 3.25, 1. , 0.75])
|
python
|
numpy/polynomial/chebyshev.py
| 345
|
[
"pol"
] | false
| 2
| 7.36
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
fix
|
def fix(x, out=None):
"""
Round to nearest integer towards zero.
Round an array of floats element-wise to nearest integer towards zero.
The rounded values have the same data-type as the input.
Parameters
----------
x : array_like
An array to be rounded
out : ndarray, optional
A location into which the result is stored. If provided, it must have
a shape that the input broadcasts to. If not provided or None, a
freshly-allocated array is returned.
Returns
-------
out : ndarray of floats
An array with the same dimensions and data-type as the input.
If second argument is not supplied then a new array is returned
with the rounded values.
If a second argument is supplied the result is stored there.
The return value ``out`` is then a reference to that array.
See Also
--------
rint, trunc, floor, ceil
around : Round to given number of decimals
Examples
--------
>>> import numpy as np
>>> np.fix(3.14)
3.0
>>> np.fix(3)
3
>>> np.fix([2.1, 2.9, -2.1, -2.9])
array([ 2., 2., -2., -2.])
"""
return nx.trunc(x, out=out)
|
Round to nearest integer towards zero.
Round an array of floats element-wise to nearest integer towards zero.
The rounded values have the same data-type as the input.
Parameters
----------
x : array_like
An array to be rounded
out : ndarray, optional
A location into which the result is stored. If provided, it must have
a shape that the input broadcasts to. If not provided or None, a
freshly-allocated array is returned.
Returns
-------
out : ndarray of floats
An array with the same dimensions and data-type as the input.
If second argument is not supplied then a new array is returned
with the rounded values.
If a second argument is supplied the result is stored there.
The return value ``out`` is then a reference to that array.
See Also
--------
rint, trunc, floor, ceil
around : Round to given number of decimals
Examples
--------
>>> import numpy as np
>>> np.fix(3.14)
3.0
>>> np.fix(3)
3
>>> np.fix([2.1, 2.9, -2.1, -2.9])
array([ 2., 2., -2., -2.])
|
python
|
numpy/lib/_ufunclike_impl.py
| 17
|
[
"x",
"out"
] | false
| 1
| 6.48
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
entrySet
|
@Override
public Set<Entry<E>> entrySet() {
Set<Entry<E>> result = entrySet;
if (result == null) {
entrySet = result = createEntrySet();
}
return result;
}
|
Creates a new instance of this multiset's element set, which will be returned by {@link
#elementSet()}.
|
java
|
android/guava/src/com/google/common/collect/AbstractMultiset.java
| 164
|
[] | true
| 2
| 6.56
|
google/guava
| 51,352
|
javadoc
| false
|
|
replaceIn
|
public boolean replaceIn(final StringBuffer source) {
if (source == null) {
return false;
}
return replaceIn(source, 0, source.length());
}
|
Replaces all the occurrences of variables within the given source buffer
with their matching values from the resolver.
The buffer is updated with the result.
@param source the buffer to replace in, updated, null returns zero.
@return true if altered.
|
java
|
src/main/java/org/apache/commons/lang3/text/StrSubstitutor.java
| 764
|
[
"source"
] | true
| 2
| 8.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
fromStoreValue
|
protected @Nullable Object fromStoreValue(@Nullable Object storeValue) {
if (this.allowNullValues && storeValue == NullValue.INSTANCE) {
return null;
}
return storeValue;
}
|
Convert the given value from the internal store to a user value
returned from the get method (adapting {@code null}).
@param storeValue the store value
@return the value to return to the user
|
java
|
spring-context/src/main/java/org/springframework/cache/support/AbstractValueAdaptingCache.java
| 86
|
[
"storeValue"
] |
Object
| true
| 3
| 7.92
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
fchmod
|
function fchmod(fd, mode, callback) {
mode = parseFileMode(mode, 'mode');
callback = makeCallback(callback);
if (permission.isEnabled()) {
callback(new ERR_ACCESS_DENIED('fchmod API is disabled when Permission Model is enabled.'));
return;
}
const req = new FSReqCallback();
req.oncomplete = callback;
binding.fchmod(fd, mode, req);
}
|
Sets the permissions on the file.
@param {number} fd
@param {string | number} mode
@param {(err?: Error) => any} callback
@returns {void}
|
javascript
|
lib/fs.js
| 1,929
|
[
"fd",
"mode",
"callback"
] | false
| 2
| 6.08
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
from_envvar
|
def from_envvar(self, variable_name: str, silent: bool = False) -> bool:
"""Loads a configuration from an environment variable pointing to
a configuration file. This is basically just a shortcut with nicer
error messages for this line of code::
app.config.from_pyfile(os.environ['YOURAPPLICATION_SETTINGS'])
:param variable_name: name of the environment variable
:param silent: set to ``True`` if you want silent failure for missing
files.
:return: ``True`` if the file was loaded successfully.
"""
rv = os.environ.get(variable_name)
if not rv:
if silent:
return False
raise RuntimeError(
f"The environment variable {variable_name!r} is not set"
" and as such configuration could not be loaded. Set"
" this variable and make it point to a configuration"
" file"
)
return self.from_pyfile(rv, silent=silent)
|
Loads a configuration from an environment variable pointing to
a configuration file. This is basically just a shortcut with nicer
error messages for this line of code::
app.config.from_pyfile(os.environ['YOURAPPLICATION_SETTINGS'])
:param variable_name: name of the environment variable
:param silent: set to ``True`` if you want silent failure for missing
files.
:return: ``True`` if the file was loaded successfully.
|
python
|
src/flask/config.py
| 102
|
[
"self",
"variable_name",
"silent"
] |
bool
| true
| 3
| 7.44
|
pallets/flask
| 70,946
|
sphinx
| false
|
nullCheckedList
|
private static <E> List<E> nullCheckedList(Object... array) {
for (int i = 0, len = array.length; i < len; i++) {
if (array[i] == null) {
throw new NullPointerException("at index " + i);
}
}
@SuppressWarnings("unchecked")
E[] castedArray = (E[]) array;
return Arrays.asList(castedArray);
}
|
Views the array as an immutable list. The array must have only {@code E} elements.
<p>The array must be internally created.
|
java
|
guava-gwt/src-super/com/google/common/collect/super/com/google/common/collect/ImmutableList.java
| 213
|
[] | true
| 3
| 7.04
|
google/guava
| 51,352
|
javadoc
| false
|
|
abbreviate
|
public static String abbreviate(final String str, final int offset, final int maxWidth) {
return abbreviate(str, ELLIPSIS3, offset, maxWidth);
}
|
Abbreviates a String using ellipses. This will convert "Now is the time for all good men" into "...is the time for...".
<p>
Works like {@code abbreviate(String, int)}, but allows you to specify a "left edge" offset. Note that this left edge is not necessarily going to be the
leftmost character in the result, or the first character following the ellipses, but it will appear somewhere in the result.
</p>
<p>
In no case will it return a String of length greater than {@code maxWidth}.
</p>
<pre>
StringUtils.abbreviate(null, *, *) = null
StringUtils.abbreviate("", 0, 4) = ""
StringUtils.abbreviate("abcdefghijklmno", -1, 10) = "abcdefg..."
StringUtils.abbreviate("abcdefghijklmno", 0, 10) = "abcdefg..."
StringUtils.abbreviate("abcdefghijklmno", 1, 10) = "abcdefg..."
StringUtils.abbreviate("abcdefghijklmno", 4, 10) = "abcdefg..."
StringUtils.abbreviate("abcdefghijklmno", 5, 10) = "...fghi..."
StringUtils.abbreviate("abcdefghijklmno", 6, 10) = "...ghij..."
StringUtils.abbreviate("abcdefghijklmno", 8, 10) = "...ijklmno"
StringUtils.abbreviate("abcdefghijklmno", 10, 10) = "...ijklmno"
StringUtils.abbreviate("abcdefghijklmno", 12, 10) = "...ijklmno"
StringUtils.abbreviate("abcdefghij", 0, 3) = Throws {@link IllegalArgumentException}.
StringUtils.abbreviate("abcdefghij", 5, 6) = Throws {@link IllegalArgumentException}.
</pre>
@param str the String to check, may be null.
@param offset left edge of source String.
@param maxWidth maximum length of result String, must be at least 4.
@return abbreviated String, {@code null} if null String input.
@throws IllegalArgumentException if the width is too small.
@since 2.0
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 272
|
[
"str",
"offset",
"maxWidth"
] |
String
| true
| 1
| 6.32
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
toLength
|
function toLength(value) {
return value ? baseClamp(toInteger(value), 0, MAX_ARRAY_LENGTH) : 0;
}
|
Converts `value` to an integer suitable for use as the length of an
array-like object.
**Note:** This method is based on
[`ToLength`](http://ecma-international.org/ecma-262/7.0/#sec-tolength).
@static
@memberOf _
@since 4.0.0
@category Lang
@param {*} value The value to convert.
@returns {number} Returns the converted integer.
@example
_.toLength(3.2);
// => 3
_.toLength(Number.MIN_VALUE);
// => 0
_.toLength(Infinity);
// => 4294967295
_.toLength('3.2');
// => 3
|
javascript
|
lodash.js
| 12,537
|
[
"value"
] | false
| 2
| 6.96
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
pop_header_name
|
def pop_header_name(
row: list[Hashable], index_col: int | Sequence[int]
) -> tuple[Hashable | None, list[Hashable]]:
"""
Pop the header name for MultiIndex parsing.
Parameters
----------
row : list
The data row to parse for the header name.
index_col : int, list
The index columns for our data. Assumed to be non-null.
Returns
-------
header_name : str
The extracted header name.
trimmed_row : list
The original data row with the header name removed.
"""
# Pop out header name and fill w/blank.
if is_list_like(index_col):
assert isinstance(index_col, Iterable)
i = max(index_col)
else:
assert not isinstance(index_col, Iterable)
i = index_col
header_name = row[i]
header_name = None if header_name == "" else header_name
return header_name, row[:i] + [""] + row[i + 1 :]
|
Pop the header name for MultiIndex parsing.
Parameters
----------
row : list
The data row to parse for the header name.
index_col : int, list
The index columns for our data. Assumed to be non-null.
Returns
-------
header_name : str
The extracted header name.
trimmed_row : list
The original data row with the header name removed.
|
python
|
pandas/io/excel/_util.py
| 270
|
[
"row",
"index_col"
] |
tuple[Hashable | None, list[Hashable]]
| true
| 4
| 7.04
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
readSchemaFromFileOrDirectory
|
async function readSchemaFromFileOrDirectory(schemaPath: string): Promise<LookupResult> {
let stats: fs.Stats
try {
stats = await stat(schemaPath)
} catch (e) {
if (e.code === 'ENOENT') {
return { ok: false, error: { kind: 'NotFound', path: schemaPath } }
}
throw e
}
if (stats.isFile()) {
return readSchemaFromSingleFile(schemaPath)
}
if (stats.isDirectory()) {
return readSchemaFromDirectory(schemaPath)
}
return { ok: false, error: { kind: 'WrongType', path: schemaPath, expectedTypes: ['file', 'directory'] } }
}
|
Loads the schema, returns null if it is not found
Throws an error if schema is specified explicitly in
any of the available ways (argument, package.json config), but
can not be loaded
@param schemaPathFromArgs
@param schemaPathFromConfig
@param opts
@returns
|
typescript
|
packages/internals/src/cli/getSchema.ts
| 140
|
[
"schemaPath"
] | true
| 5
| 7.44
|
prisma/prisma
| 44,834
|
jsdoc
| true
|
|
getAndIncrement
|
public int getAndIncrement() {
final int last = value;
value++;
return last;
}
|
Increments this instance's value by 1; this method returns the value associated with the instance
immediately prior to the increment operation. This method is not thread safe.
@return the value associated with the instance before it was incremented.
@since 3.5
|
java
|
src/main/java/org/apache/commons/lang3/mutable/MutableInt.java
| 247
|
[] | true
| 1
| 6.88
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
parseDateWithLeniency
|
private static Date parseDateWithLeniency(final String dateStr, final Locale locale, final String[] parsePatterns,
final boolean lenient) throws ParseException {
Objects.requireNonNull(dateStr, "str");
Objects.requireNonNull(parsePatterns, "parsePatterns");
final TimeZone tz = TimeZone.getDefault();
final Locale lcl = LocaleUtils.toLocale(locale);
final ParsePosition pos = new ParsePosition(0);
final Calendar calendar = Calendar.getInstance(tz, lcl);
calendar.setLenient(lenient);
for (final String parsePattern : parsePatterns) {
final FastDateParser fdp = new FastDateParser(parsePattern, tz, lcl);
calendar.clear();
try {
if (fdp.parse(dateStr, pos, calendar) && pos.getIndex() == dateStr.length()) {
return calendar.getTime();
}
} catch (final IllegalArgumentException ignored) {
// leniency is preventing calendar from being set
}
pos.setIndex(0);
}
throw new ParseException("Unable to parse the date: " + dateStr, -1);
}
|
Parses a string representing a date by trying a variety of different parsers.
<p>The parse will try each parse pattern in turn.
A parse is only deemed successful if it parses the whole of the input string.
If no parse patterns match, a ParseException is thrown.</p>
@param dateStr the date to parse, not null.
@param locale the locale to use when interpreting the pattern, can be null in which
case the default system locale is used.
@param parsePatterns the date format patterns to use, see SimpleDateFormat, not null.
@param lenient Specify whether or not date/time parsing is to be lenient.
@return the parsed date.
@throws NullPointerException if the date string or pattern array is null.
@throws ParseException if none of the date patterns were suitable.
@see java.util.Calendar#isLenient()
|
java
|
src/main/java/org/apache/commons/lang3/time/DateUtils.java
| 1,344
|
[
"dateStr",
"locale",
"parsePatterns",
"lenient"
] |
Date
| true
| 4
| 8.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
row
|
@Override
public Map<C, @Nullable V> row(R rowKey) {
checkNotNull(rowKey);
Integer rowIndex = rowKeyToIndex.get(rowKey);
if (rowIndex == null) {
return emptyMap();
} else {
return new Row(rowIndex);
}
}
|
Returns a view of all mappings that have the given row key. If the row key isn't in {@link
#rowKeySet()}, an empty immutable map is returned.
<p>Otherwise, for each column key in {@link #columnKeySet()}, the returned map associates the
column key with the corresponding value in the table. Changes to the returned map will update
the underlying table, and vice versa.
@param rowKey key of row to search for in the table
@return the corresponding map from column keys to values
|
java
|
android/guava/src/com/google/common/collect/ArrayTable.java
| 686
|
[
"rowKey"
] | true
| 2
| 8.08
|
google/guava
| 51,352
|
javadoc
| false
|
|
_parse_json_file
|
def _parse_json_file(file_path: str) -> tuple[dict[str, Any], list[FileSyntaxError]]:
"""
Parse a file in the JSON format.
:param file_path: The location of the file that will be processed.
:return: Tuple with mapping of key and list of values and list of syntax errors
"""
with open(file_path) as f:
content = f.read()
if not content:
return {}, [FileSyntaxError(line_no=1, message="The file is empty.")]
try:
secrets = json.loads(content)
except JSONDecodeError as e:
return {}, [FileSyntaxError(line_no=int(e.lineno), message=e.msg)]
if not isinstance(secrets, dict):
return {}, [FileSyntaxError(line_no=1, message="The file should contain the object.")]
return secrets, []
|
Parse a file in the JSON format.
:param file_path: The location of the file that will be processed.
:return: Tuple with mapping of key and list of values and list of syntax errors
|
python
|
airflow-core/src/airflow/secrets/local_filesystem.py
| 135
|
[
"file_path"
] |
tuple[dict[str, Any], list[FileSyntaxError]]
| true
| 3
| 8.08
|
apache/airflow
| 43,597
|
sphinx
| false
|
clientId
|
@Override
public String clientId() {
return clientId;
}
|
This method can be used by cases where the caller has an event that needs to both block for completion but
also process background events. For some events, in order to fully process the associated logic, the
{@link ConsumerNetworkThread background thread} needs assistance from the application thread to complete.
If the application thread simply blocked on the event after submitting it, the processing would deadlock.
The logic herein is basically a loop that performs two tasks in each iteration:
<ol>
<li>Process background events, if any</li>
<li><em>Briefly</em> wait for {@link CompletableApplicationEvent an event} to complete</li>
</ol>
<p/>
Each iteration gives the application thread an opportunity to process background events, which may be
necessary to complete the overall processing.
@param future Event that contains a {@link CompletableFuture}; it is on this future that the
application thread will wait for completion
@param timer Overall timer that bounds how long to wait for the event to complete
@param ignoreErrorEventException Predicate to ignore background errors.
Any exceptions found while processing background events that match the predicate won't be propagated.
@return {@code true} if the event completed within the timeout, {@code false} otherwise
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerImpl.java
| 1,341
|
[] |
String
| true
| 1
| 6.48
|
apache/kafka
| 31,560
|
javadoc
| false
|
Promise
|
Promise(Promise&& other) noexcept
: ct_(std::move(other.ct_)),
state_(std::exchange(other.state_, nullptr)) {}
|
Construct an empty Promise.
This object is not valid use until you initialize it with move assignment.
|
cpp
|
folly/coro/Promise.h
| 71
|
[] | true
| 2
| 6.8
|
facebook/folly
| 30,157
|
doxygen
| false
|
|
getOperationContext
|
protected CacheOperationContext getOperationContext(
CacheOperation operation, Method method, @Nullable Object[] args, Object target, Class<?> targetClass) {
CacheOperationMetadata metadata = getCacheOperationMetadata(operation, method, targetClass);
return new CacheOperationContext(metadata, args, target);
}
|
Convenience method to return a String representation of this Method
for use in logging. Can be overridden in subclasses to provide a
different identifier for the given method.
@param method the method we're interested in
@param targetClass class the method is on
@return log message identifying this method
@see org.springframework.util.ClassUtils#getQualifiedMethodName
|
java
|
spring-context/src/main/java/org/springframework/cache/interceptor/CacheAspectSupport.java
| 321
|
[
"operation",
"method",
"args",
"target",
"targetClass"
] |
CacheOperationContext
| true
| 1
| 6.24
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
getStrictModeBlockScopeFunctionDeclarationMessage
|
function getStrictModeBlockScopeFunctionDeclarationMessage(node: Node) {
// Provide specialized messages to help the user understand why we think they're in
// strict mode.
if (getContainingClass(node)) {
return Diagnostics.Function_declarations_are_not_allowed_inside_blocks_in_strict_mode_when_targeting_ES5_Class_definitions_are_automatically_in_strict_mode;
}
if (file.externalModuleIndicator) {
return Diagnostics.Function_declarations_are_not_allowed_inside_blocks_in_strict_mode_when_targeting_ES5_Modules_are_automatically_in_strict_mode;
}
return Diagnostics.Function_declarations_are_not_allowed_inside_blocks_in_strict_mode_when_targeting_ES5;
}
|
Declares a Symbol for the node and adds it to symbols. Reports errors for conflicting identifier names.
@param symbolTable - The symbol table which node will be added to.
@param parent - node's parent declaration.
@param node - The declaration to be added to the symbol table
@param includes - The SymbolFlags that node has in addition to its declaration type (eg: export, ambient, etc.)
@param excludes - The flags which node cannot be declared alongside in a symbol table. Used to report forbidden declarations.
|
typescript
|
src/compiler/binder.ts
| 2,680
|
[
"node"
] | false
| 3
| 6.08
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
dehexchar
|
public static int dehexchar(char hex) {
if (hex >= '0' && hex <= '9') {
return hex - '0';
}
else if (hex >= 'A' && hex <= 'F') {
return hex - 'A' + 10;
}
else if (hex >= 'a' && hex <= 'f') {
return hex - 'a' + 10;
}
else {
return -1;
}
}
|
Returns the current position and the entire input string.
@return the current position and the entire input string.
|
java
|
cli/spring-boot-cli/src/json-shade/java/org/springframework/boot/cli/json/JSONTokener.java
| 540
|
[
"hex"
] | true
| 7
| 6.88
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
factoriesAndBeans
|
public static Loader factoriesAndBeans(ListableBeanFactory beanFactory) {
ClassLoader classLoader = (beanFactory instanceof ConfigurableBeanFactory configurableBeanFactory ?
configurableBeanFactory.getBeanClassLoader() : null);
return factoriesAndBeans(getSpringFactoriesLoader(classLoader), beanFactory);
}
|
Create a new {@link Loader} that will obtain AOT services from
{@value #FACTORIES_RESOURCE_LOCATION} as well as the given
{@link ListableBeanFactory}.
@param beanFactory the bean factory
@return a new {@link Loader} instance
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/aot/AotServices.java
| 119
|
[
"beanFactory"
] |
Loader
| true
| 2
| 7.44
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
parsePossibleParenthesizedArrowFunctionExpression
|
function parsePossibleParenthesizedArrowFunctionExpression(allowReturnTypeInArrowFunction: boolean): ArrowFunction | undefined {
const tokenPos = scanner.getTokenStart();
if (notParenthesizedArrow?.has(tokenPos)) {
return undefined;
}
const result = parseParenthesizedArrowFunctionExpression(/*allowAmbiguity*/ false, allowReturnTypeInArrowFunction);
if (!result) {
(notParenthesizedArrow || (notParenthesizedArrow = new Set())).add(tokenPos);
}
return result;
}
|
Reports a diagnostic error for the current token being an invalid name.
@param blankDiagnostic Diagnostic to report for the case of the name being blank (matched tokenIfBlankName).
@param nameDiagnostic Diagnostic to report for all other cases.
@param tokenIfBlankName Current token if the name was invalid for being blank (not provided / skipped).
|
typescript
|
src/compiler/parser.ts
| 5,381
|
[
"allowReturnTypeInArrowFunction"
] | true
| 4
| 6.72
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
invokeBeanDefiningClosure
|
protected GroovyBeanDefinitionReader invokeBeanDefiningClosure(Closure<?> callable) {
callable.setDelegate(this);
callable.call();
finalizeDeferredProperties();
return this;
}
|
When a method argument is only a closure it is a set of bean definitions.
@param callable the closure argument
@return this {@code GroovyBeanDefinitionReader} instance
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/groovy/GroovyBeanDefinitionReader.java
| 452
|
[
"callable"
] |
GroovyBeanDefinitionReader
| true
| 1
| 6.4
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
bindOrCreate
|
public <T> T bindOrCreate(String name, Class<T> target) {
return bindOrCreate(name, Bindable.of(target));
}
|
Bind the specified target {@link Class} using this binder's
{@link ConfigurationPropertySource property sources} or create a new instance of
the specified target {@link Class} if the result of the binding is {@code null}.
@param name the configuration property name to bind
@param target the target class
@param <T> the bound type
@return the bound or created object
@since 2.2.0
@see #bind(ConfigurationPropertyName, Bindable, BindHandler)
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/bind/Binder.java
| 302
|
[
"name",
"target"
] |
T
| true
| 1
| 6.32
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
_parse_date_columns
|
def _parse_date_columns(data_frame: DataFrame, parse_dates) -> DataFrame:
"""
Force non-datetime columns to be read as such.
Supports both string formatted and integer timestamp columns.
"""
parse_dates = _process_parse_dates_argument(parse_dates)
# we want to coerce datetime64_tz dtypes for now to UTC
# we could in theory do a 'nice' conversion from a FixedOffset tz
# GH11216
for i, (col_name, df_col) in enumerate(data_frame.items()):
if isinstance(df_col.dtype, DatetimeTZDtype) or col_name in parse_dates:
try:
fmt = parse_dates[col_name]
except (KeyError, TypeError):
fmt = None
data_frame.isetitem(i, _handle_date_column(df_col, format=fmt))
return data_frame
|
Force non-datetime columns to be read as such.
Supports both string formatted and integer timestamp columns.
|
python
|
pandas/io/sql.py
| 139
|
[
"data_frame",
"parse_dates"
] |
DataFrame
| true
| 4
| 6
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
optLong
|
public long optLong(int index) {
return optLong(index, 0L);
}
|
Returns the value at {@code index} if it exists and is a long or can be coerced to
a long. Returns 0 otherwise.
@param index the index to get the value from
@return the {@code value} or {@code 0}
|
java
|
cli/spring-boot-cli/src/json-shade/java/org/springframework/boot/cli/json/JSONArray.java
| 461
|
[
"index"
] | true
| 1
| 6.8
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
createInstance
|
@Override
protected Object createInstance() {
Assert.state(getServiceType() != null, "Property 'serviceType' is required");
return getObjectToExpose(ServiceLoader.load(getServiceType(), this.beanClassLoader));
}
|
Delegates to {@link #getObjectToExpose(java.util.ServiceLoader)}.
@return the object to expose
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/serviceloader/AbstractServiceLoaderBasedFactoryBean.java
| 68
|
[] |
Object
| true
| 1
| 6.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
appendCommentRange
|
function appendCommentRange(pos: number, end: number, kind: CommentKind, hasTrailingNewLine: boolean, _state: any, comments: CommentRange[] = []) {
comments.push({ kind, pos, end, hasTrailingNewLine });
return comments;
}
|
Invokes a callback for each comment range following the provided position.
Single-line comment ranges include the leading double-slash characters but not the ending
line break. Multi-line comment ranges include the leading slash-asterisk and trailing
asterisk-slash characters.
@param reduce If true, accumulates the result of calling the callback in a fashion similar
to reduceLeft. If false, iteration stops when the callback returns a truthy value.
@param text The source text to scan.
@param pos The position at which to start scanning.
@param trailing If false, whitespace is skipped until the first line break and comments
between that location and the next token are returned. If true, comments occurring
between the given position and the next line break are returned.
@param cb The callback to execute as each comment range is encountered.
@param state A state value to pass to each iteration of the callback.
@param initial An initial value to pass when accumulating results (when "reduce" is true).
@returns If "reduce" is true, the accumulated value. If "reduce" is false, the first truthy
return value of the callback.
|
typescript
|
src/compiler/scanner.ts
| 952
|
[
"pos",
"end",
"kind",
"hasTrailingNewLine",
"_state",
"comments"
] | false
| 1
| 6
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
get_masked_subclass
|
def get_masked_subclass(*arrays):
"""
Return the youngest subclass of MaskedArray from a list of (masked) arrays.
In case of siblings, the first listed takes over.
"""
if len(arrays) == 1:
arr = arrays[0]
if isinstance(arr, MaskedArray):
rcls = type(arr)
else:
rcls = MaskedArray
else:
arrcls = [type(a) for a in arrays]
rcls = arrcls[0]
if not issubclass(rcls, MaskedArray):
rcls = MaskedArray
for cls in arrcls[1:]:
if issubclass(cls, rcls):
rcls = cls
# Don't return MaskedConstant as result: revert to MaskedArray
if rcls.__name__ == 'MaskedConstant':
return MaskedArray
return rcls
|
Return the youngest subclass of MaskedArray from a list of (masked) arrays.
In case of siblings, the first listed takes over.
|
python
|
numpy/ma/core.py
| 681
|
[] | false
| 9
| 6.08
|
numpy/numpy
| 31,054
|
unknown
| false
|
|
resolveName
|
private void resolveName(ConfigurationMetadataItem item) {
item.setName(item.getId()); // fallback
ConfigurationMetadataSource source = getSource(item);
if (source != null) {
String groupId = source.getGroupId();
String dottedPrefix = groupId + ".";
String id = item.getId();
if (hasLength(groupId) && id.startsWith(dottedPrefix)) {
String name = id.substring(dottedPrefix.length());
item.setName(name);
}
}
}
|
Resolve the name of an item against this instance.
@param item the item to resolve
@see ConfigurationMetadataProperty#setName(String)
|
java
|
configuration-metadata/spring-boot-configuration-metadata/src/main/java/org/springframework/boot/configurationmetadata/RawConfigurationMetadata.java
| 74
|
[
"item"
] |
void
| true
| 4
| 6.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
parsePKCS8Encrypted
|
private static PrivateKey parsePKCS8Encrypted(BufferedReader bReader, char[] keyPassword) throws IOException, GeneralSecurityException {
StringBuilder sb = new StringBuilder();
String line = bReader.readLine();
while (line != null) {
if (PKCS8_ENCRYPTED_FOOTER.equals(line.trim())) {
break;
}
sb.append(line.trim());
line = bReader.readLine();
}
if (null == line || PKCS8_ENCRYPTED_FOOTER.equals(line.trim()) == false) {
throw new IOException("Malformed PEM file, PEM footer is invalid or missing");
}
byte[] keyBytes = Base64.getDecoder().decode(sb.toString());
final EncryptedPrivateKeyInfo encryptedPrivateKeyInfo = getEncryptedPrivateKeyInfo(keyBytes);
String algorithm = encryptedPrivateKeyInfo.getAlgName();
if (algorithm.equals("PBES2") || algorithm.equals("1.2.840.113549.1.5.13")) {
algorithm = getPBES2Algorithm(encryptedPrivateKeyInfo);
}
SecretKeyFactory secretKeyFactory = SecretKeyFactory.getInstance(algorithm);
SecretKey secretKey = secretKeyFactory.generateSecret(new PBEKeySpec(keyPassword));
Cipher cipher = Cipher.getInstance(algorithm);
cipher.init(Cipher.DECRYPT_MODE, secretKey, encryptedPrivateKeyInfo.getAlgParameters());
PKCS8EncodedKeySpec keySpec = encryptedPrivateKeyInfo.getKeySpec(cipher);
String keyAlgo = getKeyAlgorithmIdentifier(keySpec.getEncoded());
KeyFactory keyFactory = KeyFactory.getInstance(keyAlgo);
return keyFactory.generatePrivate(keySpec);
}
|
Creates a {@link PrivateKey} from the contents of {@code bReader} that contains an encrypted private key encoded in
PKCS#8
@param bReader the {@link BufferedReader} containing the key file contents
@param keyPassword The password for the encrypted (password protected) key
@return {@link PrivateKey}
@throws IOException if the file can't be read
@throws GeneralSecurityException if the private key can't be generated from the {@link PKCS8EncodedKeySpec}
|
java
|
libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/PemUtils.java
| 378
|
[
"bReader",
"keyPassword"
] |
PrivateKey
| true
| 7
| 6.64
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
hasParser
|
public boolean hasParser(Class<?> categoryClass, String name, RestApiVersion apiVersion) {
final Map<Class<?>, Map<String, Entry>> versionMap = registry.get(apiVersion);
if (versionMap == null) {
return false;
}
final Map<String, Entry> parsers = versionMap.get(categoryClass);
return parsers != null && parsers.containsKey(name);
}
|
Returns {@code true} if this registry is able to {@link #parseNamedObject parse} the referenced object, false otherwise.
Note: This method does not throw exceptions, even if the {@link RestApiVersion} or {@code categoryClass} are unknown.
|
java
|
libs/x-content/src/main/java/org/elasticsearch/xcontent/NamedXContentRegistry.java
| 156
|
[
"categoryClass",
"name",
"apiVersion"
] | true
| 3
| 6.24
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
get_event_buffer
|
def get_event_buffer(self, dag_ids=None) -> dict[TaskInstanceKey, EventBufferValueType]:
"""
Return and flush the event buffer.
In case dag_ids is specified it will only return and flush events
for the given dag_ids. Otherwise, it returns and flushes all events.
:param dag_ids: the dag_ids to return events for; returns all if given ``None``.
:return: a dict of events
"""
cleared_events: dict[TaskInstanceKey, EventBufferValueType] = {}
if dag_ids is None:
cleared_events = self.event_buffer
self.event_buffer = {}
else:
for ti_key in list(self.event_buffer.keys()):
if ti_key.dag_id in dag_ids:
cleared_events[ti_key] = self.event_buffer.pop(ti_key)
return cleared_events
|
Return and flush the event buffer.
In case dag_ids is specified it will only return and flush events
for the given dag_ids. Otherwise, it returns and flushes all events.
:param dag_ids: the dag_ids to return events for; returns all if given ``None``.
:return: a dict of events
|
python
|
airflow-core/src/airflow/executors/base_executor.py
| 494
|
[
"self",
"dag_ids"
] |
dict[TaskInstanceKey, EventBufferValueType]
| true
| 5
| 8.24
|
apache/airflow
| 43,597
|
sphinx
| false
|
containsIgnoreCase
|
@Deprecated
public static boolean containsIgnoreCase(final CharSequence str, final CharSequence searchStr) {
return Strings.CI.contains(str, searchStr);
}
|
Tests if CharSequence contains a search CharSequence irrespective of case, handling {@code null}. Case-insensitivity is defined as by
{@link String#equalsIgnoreCase(String)}.
<p>
A {@code null} CharSequence will return {@code false}.
</p>
<pre>
StringUtils.containsIgnoreCase(null, *) = false
StringUtils.containsIgnoreCase(*, null) = false
StringUtils.containsIgnoreCase("", "") = true
StringUtils.containsIgnoreCase("abc", "") = true
StringUtils.containsIgnoreCase("abc", "a") = true
StringUtils.containsIgnoreCase("abc", "z") = false
StringUtils.containsIgnoreCase("abc", "A") = true
StringUtils.containsIgnoreCase("abc", "Z") = false
</pre>
@param str the CharSequence to check, may be null.
@param searchStr the CharSequence to find, may be null.
@return true if the CharSequence contains the search CharSequence irrespective of case or false if not or {@code null} string input.
@since 3.0 Changed signature from containsIgnoreCase(String, String) to containsIgnoreCase(CharSequence, CharSequence).
@deprecated Use {@link Strings#contains(CharSequence, CharSequence) Strings.CI.contains(CharSequence, CharSequence)}.
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 1,187
|
[
"str",
"searchStr"
] | true
| 1
| 6.32
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
getErrorMessage
|
private String getErrorMessage(Integer partitionsCount, String topic, Integer partition, long maxWaitMs) {
return partitionsCount == null ?
String.format("Topic %s not present in metadata after %d ms.",
topic, maxWaitMs) :
String.format("Partition %d of topic %s with partition count %d is not present in metadata after %d ms.",
partition, topic, partitionsCount, maxWaitMs);
}
|
Wait for cluster metadata including partitions for the given topic to be available.
@param topic The topic we want metadata for
@param partition A specific partition expected to exist in metadata, or null if there's no preference
@param nowMs The current time in ms
@param maxWaitMs The maximum time in ms for waiting on the metadata
@return The cluster containing topic metadata and the amount of time we waited in ms
@throws TimeoutException if metadata could not be refreshed within {@code max.block.ms}
@throws KafkaException for all Kafka-related exceptions, including the case where this method is called after producer close
|
java
|
clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java
| 1,271
|
[
"partitionsCount",
"topic",
"partition",
"maxWaitMs"
] |
String
| true
| 2
| 7.76
|
apache/kafka
| 31,560
|
javadoc
| false
|
toHttpCacheControl
|
public @Nullable CacheControl toHttpCacheControl() {
PropertyMapper map = PropertyMapper.get();
CacheControl control = createCacheControl();
map.from(this::getMustRevalidate).whenTrue().toCall(control::mustRevalidate);
map.from(this::getNoTransform).whenTrue().toCall(control::noTransform);
map.from(this::getCachePublic).whenTrue().toCall(control::cachePublic);
map.from(this::getCachePrivate).whenTrue().toCall(control::cachePrivate);
map.from(this::getProxyRevalidate).whenTrue().toCall(control::proxyRevalidate);
map.from(this::getStaleWhileRevalidate)
.to((duration) -> control.staleWhileRevalidate(duration.getSeconds(), TimeUnit.SECONDS));
map.from(this::getStaleIfError)
.to((duration) -> control.staleIfError(duration.getSeconds(), TimeUnit.SECONDS));
map.from(this::getSMaxAge)
.to((duration) -> control.sMaxAge(duration.getSeconds(), TimeUnit.SECONDS));
// check if cacheControl remained untouched
if (control.getHeaderValue() == null) {
return null;
}
return control;
}
|
Maximum time the response should be cached by shared caches, in seconds
if no duration suffix is not specified.
|
java
|
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/web/WebProperties.java
| 578
|
[] |
CacheControl
| true
| 2
| 7.04
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.