function_name
stringlengths 1
57
| function_code
stringlengths 20
4.99k
| documentation
stringlengths 50
2k
| language
stringclasses 5
values | file_path
stringlengths 8
166
| line_number
int32 4
16.7k
| parameters
listlengths 0
20
| return_type
stringlengths 0
131
| has_type_hints
bool 2
classes | complexity
int32 1
51
| quality_score
float32 6
9.68
| repo_name
stringclasses 34
values | repo_stars
int32 2.9k
242k
| docstring_style
stringclasses 7
values | is_async
bool 2
classes |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
get_loc
|
def get_loc(self, key) -> int:
"""
Get integer location for requested label.
Parameters
----------
key : int or float
Label to locate. Integer-like floats (e.g. 3.0) are accepted and
treated as the corresponding integer. Non-integer floats and other
non-integer labels are not valid and will raise KeyError or
InvalidIndexError.
Returns
-------
int
Integer location of the label within the RangeIndex.
Raises
------
KeyError
If the label is not present in the RangeIndex or the label is a
non-integer value.
InvalidIndexError
If the label is of an invalid type for the RangeIndex.
See Also
--------
RangeIndex.get_slice_bound : Calculate slice bound that corresponds to
given label.
RangeIndex.get_indexer : Computes indexer and mask for new index given
the current index.
RangeIndex.get_non_unique : Returns indexer and masks for new index given
the current index.
RangeIndex.get_indexer_for : Returns an indexer even when non-unique.
Examples
--------
>>> idx = pd.RangeIndex(5)
>>> idx.get_loc(3)
3
>>> idx = pd.RangeIndex(2, 10, 2) # values [2, 4, 6, 8]
>>> idx.get_loc(6)
2
"""
if is_integer(key) or (is_float(key) and key.is_integer()):
new_key = int(key)
try:
return self._range.index(new_key)
except ValueError as err:
raise KeyError(key) from err
if isinstance(key, Hashable):
raise KeyError(key)
self._check_indexing_error(key)
raise KeyError(key)
|
Get integer location for requested label.
Parameters
----------
key : int or float
Label to locate. Integer-like floats (e.g. 3.0) are accepted and
treated as the corresponding integer. Non-integer floats and other
non-integer labels are not valid and will raise KeyError or
InvalidIndexError.
Returns
-------
int
Integer location of the label within the RangeIndex.
Raises
------
KeyError
If the label is not present in the RangeIndex or the label is a
non-integer value.
InvalidIndexError
If the label is of an invalid type for the RangeIndex.
See Also
--------
RangeIndex.get_slice_bound : Calculate slice bound that corresponds to
given label.
RangeIndex.get_indexer : Computes indexer and mask for new index given
the current index.
RangeIndex.get_non_unique : Returns indexer and masks for new index given
the current index.
RangeIndex.get_indexer_for : Returns an indexer even when non-unique.
Examples
--------
>>> idx = pd.RangeIndex(5)
>>> idx.get_loc(3)
3
>>> idx = pd.RangeIndex(2, 10, 2) # values [2, 4, 6, 8]
>>> idx.get_loc(6)
2
|
python
|
pandas/core/indexes/range.py
| 473
|
[
"self",
"key"
] |
int
| true
| 5
| 8.32
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
has_crawler
|
def has_crawler(self, crawler_name) -> bool:
"""
Check if the crawler already exists.
:param crawler_name: unique crawler name per AWS account
:return: Returns True if the crawler already exists and False if not.
"""
self.log.info("Checking if crawler already exists: %s", crawler_name)
try:
self.get_crawler(crawler_name)
return True
except self.glue_client.exceptions.EntityNotFoundException:
return False
|
Check if the crawler already exists.
:param crawler_name: unique crawler name per AWS account
:return: Returns True if the crawler already exists and False if not.
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/glue_crawler.py
| 50
|
[
"self",
"crawler_name"
] |
bool
| true
| 1
| 7.04
|
apache/airflow
| 43,597
|
sphinx
| false
|
asarray
|
def asarray(obj, itemsize=None, unicode=None, order=None):
"""
Convert the input to a `~numpy.char.chararray`, copying the data only if
necessary.
Versus a NumPy array of dtype `bytes_` or `str_`, this
class adds the following functionality:
1) values automatically have whitespace removed from the end
when indexed
2) comparison operators automatically remove whitespace from the
end when comparing values
3) vectorized string operations are provided as methods
(e.g. `chararray.endswith <numpy.char.chararray.endswith>`)
and infix operators (e.g. ``+``, ``*``, ``%``)
Parameters
----------
obj : array of str or unicode-like
itemsize : int, optional
`itemsize` is the number of characters per scalar in the
resulting array. If `itemsize` is None, and `obj` is an
object array or a Python list, the `itemsize` will be
automatically determined. If `itemsize` is provided and `obj`
is of type str or unicode, then the `obj` string will be
chunked into `itemsize` pieces.
unicode : bool, optional
When true, the resulting `~numpy.char.chararray` can contain Unicode
characters, when false only 8-bit characters. If unicode is
None and `obj` is one of the following:
- a `~numpy.char.chararray`,
- an ndarray of type `str_` or `unicode_`
- a Python str or unicode object,
then the unicode setting of the output array will be
automatically determined.
order : {'C', 'F'}, optional
Specify the order of the array. If order is 'C' (default), then the
array will be in C-contiguous order (last-index varies the
fastest). If order is 'F', then the returned array
will be in Fortran-contiguous order (first-index varies the
fastest).
Examples
--------
>>> import numpy as np
>>> np.char.asarray(['hello', 'world'])
chararray(['hello', 'world'], dtype='<U5')
"""
return array(obj, itemsize, copy=False,
unicode=unicode, order=order)
|
Convert the input to a `~numpy.char.chararray`, copying the data only if
necessary.
Versus a NumPy array of dtype `bytes_` or `str_`, this
class adds the following functionality:
1) values automatically have whitespace removed from the end
when indexed
2) comparison operators automatically remove whitespace from the
end when comparing values
3) vectorized string operations are provided as methods
(e.g. `chararray.endswith <numpy.char.chararray.endswith>`)
and infix operators (e.g. ``+``, ``*``, ``%``)
Parameters
----------
obj : array of str or unicode-like
itemsize : int, optional
`itemsize` is the number of characters per scalar in the
resulting array. If `itemsize` is None, and `obj` is an
object array or a Python list, the `itemsize` will be
automatically determined. If `itemsize` is provided and `obj`
is of type str or unicode, then the `obj` string will be
chunked into `itemsize` pieces.
unicode : bool, optional
When true, the resulting `~numpy.char.chararray` can contain Unicode
characters, when false only 8-bit characters. If unicode is
None and `obj` is one of the following:
- a `~numpy.char.chararray`,
- an ndarray of type `str_` or `unicode_`
- a Python str or unicode object,
then the unicode setting of the output array will be
automatically determined.
order : {'C', 'F'}, optional
Specify the order of the array. If order is 'C' (default), then the
array will be in C-contiguous order (last-index varies the
fastest). If order is 'F', then the returned array
will be in Fortran-contiguous order (first-index varies the
fastest).
Examples
--------
>>> import numpy as np
>>> np.char.asarray(['hello', 'world'])
chararray(['hello', 'world'], dtype='<U5')
|
python
|
numpy/_core/defchararray.py
| 1,357
|
[
"obj",
"itemsize",
"unicode",
"order"
] | false
| 1
| 6.32
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
resolve
|
@Override
public @Nullable NamespaceHandler resolve(String namespaceUri) {
Map<String, Object> handlerMappings = getHandlerMappings();
Object handlerOrClassName = handlerMappings.get(namespaceUri);
if (handlerOrClassName == null) {
return null;
}
else if (handlerOrClassName instanceof NamespaceHandler namespaceHandler) {
return namespaceHandler;
}
else {
String className = (String) handlerOrClassName;
try {
Class<?> handlerClass = ClassUtils.forName(className, this.classLoader);
if (!NamespaceHandler.class.isAssignableFrom(handlerClass)) {
throw new FatalBeanException("Class [" + className + "] for namespace [" + namespaceUri +
"] does not implement the [" + NamespaceHandler.class.getName() + "] interface");
}
NamespaceHandler namespaceHandler = (NamespaceHandler) BeanUtils.instantiateClass(handlerClass);
namespaceHandler.init();
handlerMappings.put(namespaceUri, namespaceHandler);
return namespaceHandler;
}
catch (ClassNotFoundException ex) {
throw new FatalBeanException("Could not find NamespaceHandler class [" + className +
"] for namespace [" + namespaceUri + "]", ex);
}
catch (LinkageError err) {
throw new FatalBeanException("Unresolvable class definition for NamespaceHandler class [" +
className + "] for namespace [" + namespaceUri + "]", err);
}
}
}
|
Locate the {@link NamespaceHandler} for the supplied namespace URI
from the configured mappings.
@param namespaceUri the relevant namespace URI
@return the located {@link NamespaceHandler}, or {@code null} if none found
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/xml/DefaultNamespaceHandlerResolver.java
| 113
|
[
"namespaceUri"
] |
NamespaceHandler
| true
| 6
| 7.44
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
addAndGet
|
public float addAndGet(final Number operand) {
this.value += operand.floatValue();
return value;
}
|
Increments this instance's value by {@code operand}; this method returns the value associated with the instance
immediately after the addition operation. This method is not thread safe.
@param operand the quantity to add, not null.
@throws NullPointerException if {@code operand} is null.
@return the value associated with this instance after adding the operand.
@since 3.5
|
java
|
src/main/java/org/apache/commons/lang3/mutable/MutableFloat.java
| 127
|
[
"operand"
] | true
| 1
| 6.64
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
merge
|
Object merge(@Nullable Object parent);
|
Merge the current value set with that of the supplied object.
<p>The supplied object is considered the parent, and values in
the callee's value set must override those of the supplied object.
@param parent the object to merge with
@return the result of the merge operation
@throws IllegalArgumentException if the supplied parent is {@code null}
@throws IllegalStateException if merging is not enabled for this instance
(i.e. {@code mergeEnabled} equals {@code false}).
|
java
|
spring-beans/src/main/java/org/springframework/beans/Mergeable.java
| 49
|
[
"parent"
] |
Object
| true
| 1
| 6.48
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
getJSONArray
|
public JSONArray getJSONArray(String name) throws JSONException {
Object object = get(name);
if (object instanceof JSONArray) {
return (JSONArray) object;
}
else {
throw JSON.typeMismatch(name, object, "JSONArray");
}
}
|
Returns the value mapped by {@code name} if it exists and is a {@code
JSONArray}.
@param name the name of the property
@return the value
@throws JSONException if the mapping doesn't exist or is not a {@code
JSONArray}.
|
java
|
cli/spring-boot-cli/src/json-shade/java/org/springframework/boot/cli/json/JSONObject.java
| 596
|
[
"name"
] |
JSONArray
| true
| 2
| 7.92
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
insert
|
def insert(self, loc: int, item) -> Self:
"""
Insert an item at the given position.
Parameters
----------
loc : int
Index where the `item` needs to be inserted.
item : scalar-like
Value to be inserted.
Returns
-------
ExtensionArray
With `item` inserted at `loc`.
See Also
--------
Index.insert: Make new Index inserting new item at location.
Notes
-----
This method should be both type and dtype-preserving. If the item
cannot be held in an array of this type/dtype, either ValueError or
TypeError should be raised.
The default implementation relies on _from_sequence to raise on invalid
items.
Examples
--------
>>> arr = pd.array([1, 2, 3])
>>> arr.insert(2, -1)
<IntegerArray>
[1, 2, -1, 3]
Length: 4, dtype: Int64
"""
loc = validate_insert_loc(loc, len(self))
item_arr = type(self)._from_sequence([item], dtype=self.dtype)
return type(self)._concat_same_type([self[:loc], item_arr, self[loc:]])
|
Insert an item at the given position.
Parameters
----------
loc : int
Index where the `item` needs to be inserted.
item : scalar-like
Value to be inserted.
Returns
-------
ExtensionArray
With `item` inserted at `loc`.
See Also
--------
Index.insert: Make new Index inserting new item at location.
Notes
-----
This method should be both type and dtype-preserving. If the item
cannot be held in an array of this type/dtype, either ValueError or
TypeError should be raised.
The default implementation relies on _from_sequence to raise on invalid
items.
Examples
--------
>>> arr = pd.array([1, 2, 3])
>>> arr.insert(2, -1)
<IntegerArray>
[1, 2, -1, 3]
Length: 4, dtype: Int64
|
python
|
pandas/core/arrays/base.py
| 2,446
|
[
"self",
"loc",
"item"
] |
Self
| true
| 1
| 7.12
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
segmentFor
|
Segment<K, V, E, S> segmentFor(int hash) {
// TODO(fry): Lazily create segments?
return segments[(hash >>> segmentShift) & segmentMask];
}
|
Returns the segment that should be used for a key with the given hash.
@param hash the hash code for the key
@return the segment
|
java
|
android/guava/src/com/google/common/collect/MapMakerInternalMap.java
| 1,139
|
[
"hash"
] | true
| 1
| 6.8
|
google/guava
| 51,352
|
javadoc
| false
|
|
freqstr
|
def freqstr(self) -> str:
"""
Return the frequency object as a string if it's set, otherwise None.
See Also
--------
DatetimeIndex.inferred_freq : Returns a string representing a frequency
generated by infer_freq.
Examples
--------
For DatetimeIndex:
>>> idx = pd.DatetimeIndex(["1/1/2020 10:00:00+00:00"], freq="D")
>>> idx.freqstr
'D'
The frequency can be inferred if there are more than 2 points:
>>> idx = pd.DatetimeIndex(
... ["2018-01-01", "2018-01-03", "2018-01-05"], freq="infer"
... )
>>> idx.freqstr
'2D'
For PeriodIndex:
>>> idx = pd.PeriodIndex(["2023-1", "2023-2", "2023-3"], freq="M")
>>> idx.freqstr
'M'
"""
from pandas import PeriodIndex
if self._data.freqstr is not None and isinstance(
self._data, (PeriodArray, PeriodIndex)
):
freq = PeriodDtype(self._data.freq)._freqstr
return freq
else:
return self._data.freqstr # type: ignore[return-value]
|
Return the frequency object as a string if it's set, otherwise None.
See Also
--------
DatetimeIndex.inferred_freq : Returns a string representing a frequency
generated by infer_freq.
Examples
--------
For DatetimeIndex:
>>> idx = pd.DatetimeIndex(["1/1/2020 10:00:00+00:00"], freq="D")
>>> idx.freqstr
'D'
The frequency can be inferred if there are more than 2 points:
>>> idx = pd.DatetimeIndex(
... ["2018-01-01", "2018-01-03", "2018-01-05"], freq="infer"
... )
>>> idx.freqstr
'2D'
For PeriodIndex:
>>> idx = pd.PeriodIndex(["2023-1", "2023-2", "2023-3"], freq="M")
>>> idx.freqstr
'M'
|
python
|
pandas/core/indexes/datetimelike.py
| 181
|
[
"self"
] |
str
| true
| 4
| 6.8
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
mro_lookup
|
def mro_lookup(cls, attr, stop=None, monkey_patched=None):
"""Return the first node by MRO order that defines an attribute.
Arguments:
cls (Any): Child class to traverse.
attr (str): Name of attribute to find.
stop (Set[Any]): A set of types that if reached will stop
the search.
monkey_patched (Sequence): Use one of the stop classes
if the attributes module origin isn't in this list.
Used to detect monkey patched attributes.
Returns:
Any: The attribute value, or :const:`None` if not found.
"""
stop = set() if not stop else stop
monkey_patched = [] if not monkey_patched else monkey_patched
for node in cls.mro():
if node in stop:
try:
value = node.__dict__[attr]
module_origin = value.__module__
except (AttributeError, KeyError):
pass
else:
if module_origin not in monkey_patched:
return node
return
if attr in node.__dict__:
return node
|
Return the first node by MRO order that defines an attribute.
Arguments:
cls (Any): Child class to traverse.
attr (str): Name of attribute to find.
stop (Set[Any]): A set of types that if reached will stop
the search.
monkey_patched (Sequence): Use one of the stop classes
if the attributes module origin isn't in this list.
Used to detect monkey patched attributes.
Returns:
Any: The attribute value, or :const:`None` if not found.
|
python
|
celery/utils/objects.py
| 14
|
[
"cls",
"attr",
"stop",
"monkey_patched"
] | false
| 8
| 7.28
|
celery/celery
| 27,741
|
google
| false
|
|
ensure_delete_replication_group
|
def ensure_delete_replication_group(
self,
replication_group_id: str,
initial_sleep_time: float | None = None,
exponential_back_off_factor: float | None = None,
max_retries: int | None = None,
) -> dict:
"""
Delete a replication group ensuring it is either deleted or can't be deleted.
:param replication_group_id: ID of replication to delete
:param initial_sleep_time: Initial sleep time in second
If this is not supplied then this is defaulted to class level value
:param exponential_back_off_factor: Multiplication factor for deciding next sleep time
If this is not supplied then this is defaulted to class level value
:param max_retries: Max retries for checking availability of replication group
If this is not supplied then this is defaulted to class level value
:return: Response from ElastiCache delete replication group API
:raises AirflowException: If replication group is not deleted
"""
self.log.info("Deleting replication group with ID %s", replication_group_id)
response, deleted = self.wait_for_deletion(
replication_group_id=replication_group_id,
initial_sleep_time=initial_sleep_time,
exponential_back_off_factor=exponential_back_off_factor,
max_retries=max_retries,
)
if not deleted:
raise AirflowException(f'Replication group could not be deleted. Response "{response}"')
return response
|
Delete a replication group ensuring it is either deleted or can't be deleted.
:param replication_group_id: ID of replication to delete
:param initial_sleep_time: Initial sleep time in second
If this is not supplied then this is defaulted to class level value
:param exponential_back_off_factor: Multiplication factor for deciding next sleep time
If this is not supplied then this is defaulted to class level value
:param max_retries: Max retries for checking availability of replication group
If this is not supplied then this is defaulted to class level value
:return: Response from ElastiCache delete replication group API
:raises AirflowException: If replication group is not deleted
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/elasticache_replication_group.py
| 249
|
[
"self",
"replication_group_id",
"initial_sleep_time",
"exponential_back_off_factor",
"max_retries"
] |
dict
| true
| 2
| 7.6
|
apache/airflow
| 43,597
|
sphinx
| false
|
check_query_status
|
def check_query_status(self, query_execution_id: str, use_cache: bool = False) -> str | None:
"""
Fetch the state of a submitted query.
.. seealso::
- :external+boto3:py:meth:`Athena.Client.get_query_execution`
:param query_execution_id: Id of submitted athena query
:return: One of valid query states, or *None* if the response is
malformed.
"""
response = self.get_query_info(query_execution_id=query_execution_id, use_cache=use_cache)
state = None
try:
state = response["QueryExecution"]["Status"]["State"]
except Exception as e:
# The error is being absorbed here and is being handled by the caller.
# The error is being absorbed to implement retries.
self.log.exception(
"Exception while getting query state. Query execution id: %s, Exception: %s",
query_execution_id,
e,
)
return state
|
Fetch the state of a submitted query.
.. seealso::
- :external+boto3:py:meth:`Athena.Client.get_query_execution`
:param query_execution_id: Id of submitted athena query
:return: One of valid query states, or *None* if the response is
malformed.
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/athena.py
| 148
|
[
"self",
"query_execution_id",
"use_cache"
] |
str | None
| true
| 1
| 6.4
|
apache/airflow
| 43,597
|
sphinx
| false
|
isWellFormed
|
public static boolean isWellFormed(byte[] bytes, int off, int len) {
int end = off + len;
checkPositionIndexes(off, end, bytes.length);
// Look for the first non-ASCII character.
for (int i = off; i < end; i++) {
if (bytes[i] < 0) {
return isWellFormedSlowPath(bytes, i, end);
}
}
return true;
}
|
Returns whether the given byte array slice is a well-formed UTF-8 byte sequence, as defined by
{@link #isWellFormed(byte[])}. Note that this can be false even when {@code
isWellFormed(bytes)} is true.
@param bytes the input buffer
@param off the offset in the buffer of the first byte to read
@param len the number of bytes to read from the buffer
|
java
|
android/guava/src/com/google/common/base/Utf8.java
| 123
|
[
"bytes",
"off",
"len"
] | true
| 3
| 6.88
|
google/guava
| 51,352
|
javadoc
| false
|
|
removePattern
|
public static String removePattern(final CharSequence text, final String regex) {
return replacePattern(text, regex, StringUtils.EMPTY);
}
|
Removes each substring of the source String that matches the given regular expression using the DOTALL option.
This call is a {@code null} safe equivalent to:
<ul>
<li>{@code text.replaceAll("(?s)" + regex, StringUtils.EMPTY)}</li>
<li>{@code Pattern.compile(regex, Pattern.DOTALL).matcher(text).replaceAll(StringUtils.EMPTY)}</li>
</ul>
<p>A {@code null} reference passed to this method is a no-op.</p>
<pre>{@code
StringUtils.removePattern(null, *) = null
StringUtils.removePattern("any", (String) null) = "any"
StringUtils.removePattern("A<__>\n<__>B", "<.*>") = "AB"
StringUtils.removePattern("ABCabc123", "[a-z]") = "ABC123"
}</pre>
@param text
the source string.
@param regex
the regular expression to which this string is to be matched.
@return The resulting {@link String}.
@see #replacePattern(CharSequence, String, String)
@see String#replaceAll(String, String)
@see Pattern#DOTALL
@since 3.18.0
|
java
|
src/main/java/org/apache/commons/lang3/RegExUtils.java
| 344
|
[
"text",
"regex"
] |
String
| true
| 1
| 6.32
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
expires
|
boolean expires() {
return expiresAfterWrite() || expiresAfterAccess();
}
|
Creates a new, empty map with the specified strategy, initial capacity and concurrency level.
|
java
|
android/guava/src/com/google/common/cache/LocalCache.java
| 328
|
[] | true
| 2
| 6.64
|
google/guava
| 51,352
|
javadoc
| false
|
|
initializeCJS
|
function initializeCJS() {
// This need to be done at runtime in case --expose-internals is set.
let modules = Module.builtinModules = BuiltinModule.getAllBuiltinModuleIds();
if (!getOptionValue('--experimental-quic')) {
modules = modules.filter((i) => i !== 'node:quic');
}
Module.builtinModules = ObjectFreeze(modules);
initializeCjsConditions();
if (!getEmbedderOptions().noGlobalSearchPaths) {
Module._initPaths();
}
// TODO(joyeecheung): deprecate this in favor of a proper hook?
Module.runMain =
require('internal/modules/run_main').executeUserEntryPoint;
}
|
Prepare to run CommonJS code.
This function is called during pre-execution, before any user code is run.
@returns {void}
|
javascript
|
lib/internal/modules/cjs/loader.js
| 458
|
[] | false
| 3
| 7.12
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
getType
|
@Override
public @Nullable Class<?> getType(String name, boolean allowFactoryBeanInit) throws NoSuchBeanDefinitionException {
String beanName = transformedBeanName(name);
// Check manually registered singletons.
Object beanInstance = getSingleton(beanName, false);
if (beanInstance != null && beanInstance.getClass() != NullBean.class) {
if (beanInstance instanceof FactoryBean<?> factoryBean && !BeanFactoryUtils.isFactoryDereference(name)) {
return getTypeForFactoryBean(factoryBean);
}
else {
return beanInstance.getClass();
}
}
// No singleton instance found -> check bean definition.
BeanFactory parentBeanFactory = getParentBeanFactory();
if (parentBeanFactory != null && !containsBeanDefinition(beanName)) {
// No bean definition found in this factory -> delegate to parent.
return parentBeanFactory.getType(originalBeanName(name));
}
RootBeanDefinition mbd = getMergedLocalBeanDefinition(beanName);
Class<?> beanClass = predictBeanType(beanName, mbd);
if (beanClass != null) {
// Check bean class whether we're dealing with a FactoryBean.
if (FactoryBean.class.isAssignableFrom(beanClass)) {
if (!BeanFactoryUtils.isFactoryDereference(name)) {
// If it's a FactoryBean, we want to look at what it creates, not at the factory class.
beanClass = getTypeForFactoryBean(beanName, mbd, allowFactoryBeanInit).resolve();
}
}
else if (BeanFactoryUtils.isFactoryDereference(name)) {
return null;
}
}
if (beanClass == null) {
// Check decorated bean definition, if any: We assume it'll be easier
// to determine the decorated bean's type than the proxy's type.
BeanDefinitionHolder dbd = mbd.getDecoratedDefinition();
if (dbd != null && !BeanFactoryUtils.isFactoryDereference(name)) {
RootBeanDefinition tbd = getMergedBeanDefinition(dbd.getBeanName(), dbd.getBeanDefinition(), mbd);
Class<?> targetClass = predictBeanType(dbd.getBeanName(), tbd);
if (targetClass != null && !FactoryBean.class.isAssignableFrom(targetClass)) {
return targetClass;
}
}
}
return beanClass;
}
|
Internal extended variant of {@link #isTypeMatch(String, ResolvableType)}
to check whether the bean with the given name matches the specified type. Allow
additional constraints to be applied to ensure that beans are not created early.
@param name the name of the bean to query
@param typeToMatch the type to match against (as a {@code ResolvableType})
@return {@code true} if the bean type matches, {@code false} if it
doesn't match or cannot be determined yet
@throws NoSuchBeanDefinitionException if there is no bean with the given name
@since 5.2
@see #getBean
@see #getType
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/AbstractBeanFactory.java
| 710
|
[
"name",
"allowFactoryBeanInit"
] | true
| 16
| 6.88
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
schedule_comm_wait
|
def schedule_comm_wait(graph: fx.Graph) -> None:
"""
Delay the execution of wait tensors of allreduce until its first user.
This algorithm considers the intermediate users, like split, getitem,
of the wait node and schedule those intermediate users as well.
This will result in a better overlapping result.
"""
ops = (
torch.ops._c10d_functional.all_reduce_.default,
torch.ops._c10d_functional.all_reduce.default,
torch.ops._c10d_functional.all_reduce_coalesced.default,
torch.ops._c10d_functional.all_reduce_coalesced_.default,
)
comm_blocks = get_all_comm_blocks(graph, ops)
if not comm_blocks:
return
# Find all the end users.
allreduce_users = OrderedSet[fx.Node]()
for allreduce in comm_blocks:
for output in allreduce.outputs:
allreduce_users.update(output.users)
node_indices = {node: i for i, node in enumerate(graph.nodes)}
for allreduce in comm_blocks:
# Find the earliest/first user -- target_node.
assert len(allreduce.outputs) >= 1, (
f"Found a allreduce that has zero outputs/users -- {allreduce}."
)
# Initialize the target node to avoid typing issues.
target_node = next(iter(next(iter(allreduce.outputs)).users))
target_node_index = 2**31
for user in (user for output in allreduce.outputs for user in output.users):
index = node_indices[user]
if index < target_node_index:
target_node = user
target_node_index = index
# Move wait nodes and all the subsequent nodes in the comm_block to
# before the first user -- target_node.
wait_idx = -1
for wait_idx, node in enumerate(allreduce.node_list):
if node == allreduce.wait_nodes[0]:
break
assert wait_idx >= 0
move_block_before(allreduce.node_list[wait_idx:], target_node)
|
Delay the execution of wait tensors of allreduce until its first user.
This algorithm considers the intermediate users, like split, getitem,
of the wait node and schedule those intermediate users as well.
This will result in a better overlapping result.
|
python
|
torch/_inductor/fx_passes/ddp_fusion.py
| 524
|
[
"graph"
] |
None
| true
| 9
| 6
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
median
|
def median(self, numeric_only: bool = False, skipna: bool = True) -> NDFrameT:
"""
Compute median of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex
Parameters
----------
numeric_only : bool, default False
Include only float, int, boolean columns.
.. versionchanged:: 2.0.0
numeric_only no longer accepts ``None`` and defaults to False.
skipna : bool, default True
Exclude NA/null values. If an entire group is NA, the result will be NA.
.. versionadded:: 3.0.0
Returns
-------
Series or DataFrame
Median of values within each group.
See Also
--------
Series.groupby : Apply a function groupby to a Series.
DataFrame.groupby : Apply a function groupby to each row or column of a
DataFrame.
Examples
--------
For SeriesGroupBy:
>>> lst = ["a", "a", "a", "b", "b", "b"]
>>> ser = pd.Series([7, 2, 8, 4, 3, 3], index=lst)
>>> ser
a 7
a 2
a 8
b 4
b 3
b 3
dtype: int64
>>> ser.groupby(level=0).median()
a 7.0
b 3.0
dtype: float64
For DataFrameGroupBy:
>>> data = {"a": [1, 3, 5, 7, 7, 8, 3], "b": [1, 4, 8, 4, 4, 2, 1]}
>>> df = pd.DataFrame(
... data, index=["dog", "dog", "dog", "mouse", "mouse", "mouse", "mouse"]
... )
>>> df
a b
dog 1 1
dog 3 4
dog 5 8
mouse 7 4
mouse 7 4
mouse 8 2
mouse 3 1
>>> df.groupby(level=0).median()
a b
dog 3.0 4.0
mouse 7.0 3.0
For Resampler:
>>> ser = pd.Series(
... [1, 2, 3, 3, 4, 5],
... index=pd.DatetimeIndex(
... [
... "2023-01-01",
... "2023-01-10",
... "2023-01-15",
... "2023-02-01",
... "2023-02-10",
... "2023-02-15",
... ]
... ),
... )
>>> ser.resample("MS").median()
2023-01-01 2.0
2023-02-01 4.0
Freq: MS, dtype: float64
"""
result = self._cython_agg_general(
"median",
alt=lambda x: Series(x, copy=False).median(
numeric_only=numeric_only, skipna=skipna
),
numeric_only=numeric_only,
skipna=skipna,
)
return result.__finalize__(self.obj, method="groupby")
|
Compute median of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex
Parameters
----------
numeric_only : bool, default False
Include only float, int, boolean columns.
.. versionchanged:: 2.0.0
numeric_only no longer accepts ``None`` and defaults to False.
skipna : bool, default True
Exclude NA/null values. If an entire group is NA, the result will be NA.
.. versionadded:: 3.0.0
Returns
-------
Series or DataFrame
Median of values within each group.
See Also
--------
Series.groupby : Apply a function groupby to a Series.
DataFrame.groupby : Apply a function groupby to each row or column of a
DataFrame.
Examples
--------
For SeriesGroupBy:
>>> lst = ["a", "a", "a", "b", "b", "b"]
>>> ser = pd.Series([7, 2, 8, 4, 3, 3], index=lst)
>>> ser
a 7
a 2
a 8
b 4
b 3
b 3
dtype: int64
>>> ser.groupby(level=0).median()
a 7.0
b 3.0
dtype: float64
For DataFrameGroupBy:
>>> data = {"a": [1, 3, 5, 7, 7, 8, 3], "b": [1, 4, 8, 4, 4, 2, 1]}
>>> df = pd.DataFrame(
... data, index=["dog", "dog", "dog", "mouse", "mouse", "mouse", "mouse"]
... )
>>> df
a b
dog 1 1
dog 3 4
dog 5 8
mouse 7 4
mouse 7 4
mouse 8 2
mouse 3 1
>>> df.groupby(level=0).median()
a b
dog 3.0 4.0
mouse 7.0 3.0
For Resampler:
>>> ser = pd.Series(
... [1, 2, 3, 3, 4, 5],
... index=pd.DatetimeIndex(
... [
... "2023-01-01",
... "2023-01-10",
... "2023-01-15",
... "2023-02-01",
... "2023-02-10",
... "2023-02-15",
... ]
... ),
... )
>>> ser.resample("MS").median()
2023-01-01 2.0
2023-02-01 4.0
Freq: MS, dtype: float64
|
python
|
pandas/core/groupby/groupby.py
| 2,304
|
[
"self",
"numeric_only",
"skipna"
] |
NDFrameT
| true
| 1
| 7.2
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
customized
|
private static <E> Consumer<Members<E>> customized(Consumer<Members<E>> members,
@Nullable StructuredLoggingJsonMembersCustomizer<?> customizer) {
return (customizer != null) ? members.andThen(customizeWith(customizer)) : members;
}
|
Create a new {@link JsonWriterStructuredLogFormatter} instance with the given
members.
@param members a consumer, which should configure the members
@param customizer an optional customizer to apply
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/logging/structured/JsonWriterStructuredLogFormatter.java
| 51
|
[
"members",
"customizer"
] | true
| 2
| 6
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
clientInstanceId
|
@Override
public Uuid clientInstanceId(Duration timeout) {
if (timeout.isNegative()) {
throw new IllegalArgumentException("The timeout cannot be negative.");
}
if (clientTelemetryReporter.isEmpty()) {
throw new IllegalStateException("Telemetry is not enabled. Set config `" + AdminClientConfig.ENABLE_METRICS_PUSH_CONFIG + "` to `true`.");
}
if (clientInstanceId != null) {
return clientInstanceId;
}
clientInstanceId = ClientTelemetryUtils.fetchClientInstanceId(clientTelemetryReporter.get(), timeout);
return clientInstanceId;
}
|
Forcefully terminates an ongoing transaction for a given transactional ID.
<p>
This API is intended for well-formed but long-running transactions that are known to the
transaction coordinator. It is primarily designed for supporting 2PC (two-phase commit) workflows,
where a coordinator may need to unilaterally terminate a participant transaction that hasn't completed.
</p>
@param transactionalId The transactional ID whose active transaction should be forcefully terminated.
@return a {@link TerminateTransactionResult} that can be used to await the operation result.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java
| 5,055
|
[
"timeout"
] |
Uuid
| true
| 4
| 7.44
|
apache/kafka
| 31,560
|
javadoc
| false
|
createInnerClass
|
private static GeneratedClass createInnerClass(GeneratedClass generatedClass, String name, ClassName target) {
return generatedClass.getOrAdd(name, type -> {
type.addJavadoc("Bean definitions for {@link $T}.", target);
type.addModifiers(Modifier.PUBLIC, Modifier.STATIC);
});
}
|
Return the {@link GeneratedClass} to use for the specified {@code target}.
<p>If the target class is an inner class, a corresponding inner class in
the original structure is created.
@param generationContext the generation context to use
@param target the chosen target class name for the bean definition
@return the generated class to use
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/aot/BeanDefinitionMethodGenerator.java
| 140
|
[
"generatedClass",
"name",
"target"
] |
GeneratedClass
| true
| 1
| 6.56
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
subscribeInternal
|
private void subscribeInternal(Collection<String> topics, Optional<ConsumerRebalanceListener> listener) {
acquireAndEnsureOpen();
try {
throwIfGroupIdNotDefined();
if (topics == null)
throw new IllegalArgumentException("Topic collection to subscribe to cannot be null");
if (topics.isEmpty()) {
// treat subscribing to empty topic list as the same as unsubscribing
this.unsubscribe();
} else {
for (String topic : topics) {
if (isBlank(topic))
throw new IllegalArgumentException("Topic collection to subscribe to cannot contain null or empty topic");
}
throwIfNoAssignorsConfigured();
// Clear the buffered data which are not a part of newly assigned topics
final Set<TopicPartition> currentTopicPartitions = new HashSet<>();
for (TopicPartition tp : subscriptions.assignedPartitions()) {
if (topics.contains(tp.topic()))
currentTopicPartitions.add(tp);
}
fetcher.clearBufferedDataForUnassignedPartitions(currentTopicPartitions);
log.info("Subscribed to topic(s): {}", String.join(", ", topics));
if (this.subscriptions.subscribe(new HashSet<>(topics), listener))
metadata.requestUpdateForNewTopics();
}
} finally {
release();
}
}
|
Internal helper method for {@link #subscribe(Collection)} and
{@link #subscribe(Collection, ConsumerRebalanceListener)}
<p>
Subscribe to the given list of topics to get dynamically assigned partitions.
<b>Topic subscriptions are not incremental. This list will replace the current
assignment (if there is one).</b> It is not possible to combine topic subscription with group management
with manual partition assignment through {@link #assign(Collection)}.
If the given list of topics is empty, it is treated the same as {@link #unsubscribe()}.
<p>
@param topics The list of topics to subscribe to
@param listener {@link Optional} listener instance to get notifications on partition assignment/revocation
for the subscribed topics
@throws IllegalArgumentException If topics is null or contains null or empty elements
@throws IllegalStateException If {@code subscribe()} is called previously with pattern, or assign is called
previously (without a subsequent call to {@link #unsubscribe()}), or if not
configured at-least one partition assignment strategy
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ClassicKafkaConsumer.java
| 476
|
[
"topics",
"listener"
] |
void
| true
| 6
| 6.4
|
apache/kafka
| 31,560
|
javadoc
| false
|
getFunctionNames
|
function getFunctionNames(functionDeclaration: ValidFunctionDeclaration): Node[] {
switch (functionDeclaration.kind) {
case SyntaxKind.FunctionDeclaration:
if (functionDeclaration.name) return [functionDeclaration.name];
// If the function declaration doesn't have a name, it should have a default modifier.
// We validated this in `isValidFunctionDeclaration` through `hasNameOrDefault`
const defaultModifier = Debug.checkDefined(
findModifier(functionDeclaration, SyntaxKind.DefaultKeyword),
"Nameless function declaration should be a default export",
);
return [defaultModifier];
case SyntaxKind.MethodDeclaration:
return [functionDeclaration.name];
case SyntaxKind.Constructor:
const ctrKeyword = Debug.checkDefined(
findChildOfKind(functionDeclaration, SyntaxKind.ConstructorKeyword, functionDeclaration.getSourceFile()),
"Constructor declaration should have constructor keyword",
);
if (functionDeclaration.parent.kind === SyntaxKind.ClassExpression) {
const variableDeclaration = functionDeclaration.parent.parent;
return [variableDeclaration.name, ctrKeyword];
}
return [ctrKeyword];
case SyntaxKind.ArrowFunction:
return [functionDeclaration.parent.name];
case SyntaxKind.FunctionExpression:
if (functionDeclaration.name) return [functionDeclaration.name, functionDeclaration.parent.name];
return [functionDeclaration.parent.name];
default:
return Debug.assertNever(functionDeclaration, `Unexpected function declaration kind ${(functionDeclaration as ValidFunctionDeclaration).kind}`);
}
}
|
Gets the symbol for the contextual type of the node if it is not a union or intersection.
|
typescript
|
src/services/refactors/convertParamsToDestructuredObject.ts
| 704
|
[
"functionDeclaration"
] | true
| 4
| 6
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
toString
|
@Override
public String toString() {
return MoreObjects.toStringHelper(this).add("source", source).add("event", event).toString();
}
|
Returns the wrapped, 'dead' event, which the system was unable to deliver to any registered
subscriber.
@return the 'dead' event that could not be delivered.
|
java
|
android/guava/src/com/google/common/eventbus/DeadEvent.java
| 66
|
[] |
String
| true
| 1
| 6.8
|
google/guava
| 51,352
|
javadoc
| false
|
canAcquire
|
private boolean canAcquire(long nowMicros, long timeoutMicros) {
return queryEarliestAvailable(nowMicros) - timeoutMicros <= nowMicros;
}
|
Acquires the given number of permits from this {@code RateLimiter} if it can be obtained
without exceeding the specified {@code timeout}, or returns {@code false} immediately (without
waiting) if the permits would not have been granted before the timeout expired.
@param permits the number of permits to acquire
@param timeout the maximum time to wait for the permits. Negative values are treated as zero.
@param unit the time unit of the timeout argument
@return {@code true} if the permits were acquired, {@code false} otherwise
@throws IllegalArgumentException if the requested number of permits is negative or zero
|
java
|
android/guava/src/com/google/common/util/concurrent/RateLimiter.java
| 428
|
[
"nowMicros",
"timeoutMicros"
] | true
| 1
| 6.64
|
google/guava
| 51,352
|
javadoc
| false
|
|
getRelatedCauses
|
public Throwable @Nullable [] getRelatedCauses() {
if (this.relatedCauses == null) {
return null;
}
return this.relatedCauses.toArray(new Throwable[0]);
}
|
Return the related causes, if any.
@return the array of related causes, or {@code null} if none
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/BeanCreationException.java
| 149
|
[] | true
| 2
| 8.24
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
invokeExactStaticMethod
|
public static Object invokeExactStaticMethod(final Class<?> cls, final String methodName, final Object... args)
throws NoSuchMethodException, IllegalAccessException, InvocationTargetException {
final Object[] actuals = ArrayUtils.nullToEmpty(args);
return invokeExactStaticMethod(cls, methodName, actuals, ClassUtils.toClass(actuals));
}
|
Invokes a {@code static} method whose parameter types match exactly the object types.
<p>
This uses reflection to invoke the method obtained from a call to {@link #getAccessibleMethod(Class, String, Class[])}.
</p>
@param cls invoke static method on this class.
@param methodName get method with this name.
@param args use these arguments - treat {@code null} as empty array.
@return The value returned by the invoked method.
@throws NoSuchMethodException Thrown if there is no such accessible method.
@throws IllegalAccessException Thrown if this found {@code Method} is enforcing Java language access control and the underlying method is
inaccessible.
@throws IllegalArgumentException Thrown if:
<ul>
<li>the found {@code Method} is an instance method and the specified {@code object} argument is not an instance of
the class or interface declaring the underlying method (or of a subclass or interface implementor);</li>
<li>the number of actual and formal parameters differ;</li>
<li>an unwrapping conversion for primitive arguments fails; or</li>
<li>after possible unwrapping, a parameter value can't be converted to the corresponding formal parameter type by a
method invocation conversion.</li>
</ul>
@throws InvocationTargetException Thrown if the underlying method throws an exception.
@throws ExceptionInInitializerError Thrown if the initialization provoked by this method fails.
|
java
|
src/main/java/org/apache/commons/lang3/reflect/MethodUtils.java
| 660
|
[
"cls",
"methodName"
] |
Object
| true
| 1
| 6.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
processImports
|
private void processImports(ConfigurationClass configClass, SourceClass currentSourceClass,
Collection<SourceClass> importCandidates, Predicate<String> filter, boolean checkForCircularImports) {
if (importCandidates.isEmpty()) {
return;
}
if (checkForCircularImports && isChainedImportOnStack(configClass)) {
this.problemReporter.error(new CircularImportProblem(configClass, this.importStack));
}
else {
this.importStack.push(configClass);
try {
for (SourceClass candidate : importCandidates) {
if (candidate.isAssignable(ImportSelector.class)) {
// Candidate class is an ImportSelector -> delegate to it to determine imports
Class<?> candidateClass = candidate.loadClass();
ImportSelector selector = ParserStrategyUtils.instantiateClass(candidateClass, ImportSelector.class,
this.environment, this.resourceLoader, this.registry);
Predicate<String> selectorFilter = selector.getExclusionFilter();
if (selectorFilter != null) {
filter = filter.or(selectorFilter);
}
if (selector instanceof DeferredImportSelector deferredImportSelector) {
this.deferredImportSelectorHandler.handle(configClass, deferredImportSelector);
}
else {
String[] importClassNames = selector.selectImports(currentSourceClass.getMetadata());
Collection<SourceClass> importSourceClasses = asSourceClasses(importClassNames, filter);
processImports(configClass, currentSourceClass, importSourceClasses, filter, false);
}
}
else if (candidate.isAssignable(BeanRegistrar.class)) {
Class<?> candidateClass = candidate.loadClass();
BeanRegistrar registrar = (BeanRegistrar) BeanUtils.instantiateClass(candidateClass);
AnnotationMetadata metadata = currentSourceClass.getMetadata();
if (registrar instanceof ImportAware importAware) {
importAware.setImportMetadata(metadata);
}
configClass.addBeanRegistrar(metadata.getClassName(), registrar);
}
else if (candidate.isAssignable(ImportBeanDefinitionRegistrar.class)) {
// Candidate class is an ImportBeanDefinitionRegistrar ->
// delegate to it to register additional bean definitions
Class<?> candidateClass = candidate.loadClass();
ImportBeanDefinitionRegistrar registrar =
ParserStrategyUtils.instantiateClass(candidateClass, ImportBeanDefinitionRegistrar.class,
this.environment, this.resourceLoader, this.registry);
configClass.addImportBeanDefinitionRegistrar(registrar, currentSourceClass.getMetadata());
}
else {
// Candidate class not an ImportSelector or ImportBeanDefinitionRegistrar ->
// process it as an @Configuration class
this.importStack.registerImport(
currentSourceClass.getMetadata(), candidate.getMetadata().getClassName());
processConfigurationClass(candidate.asConfigClass(configClass), filter);
}
}
}
catch (BeanDefinitionStoreException ex) {
throw ex;
}
catch (Throwable ex) {
throw new BeanDefinitionStoreException(
"Failed to process import candidates for configuration class [" +
configClass.getMetadata().getClassName() + "]: " + ex.getMessage(), ex);
}
finally {
this.importStack.pop();
}
}
}
|
Recursively collect all declared {@code @Import} values. Unlike most
meta-annotations it is valid to have several {@code @Import}s declared with
different values; the usual process of returning values from the first
meta-annotation on a class is not sufficient.
<p>For example, it is common for a {@code @Configuration} class to declare direct
{@code @Import}s in addition to meta-imports originating from an {@code @Enable}
annotation.
<p>As of Spring Framework 7.0, {@code @Import} annotations declared on interfaces
implemented by the configuration class are also considered. This allows imports to
be triggered indirectly via marker interfaces or shared base interfaces.
@param sourceClass the class to search
@param imports the imports collected so far
@param visited used to track visited classes and interfaces to prevent infinite
recursion
@throws IOException if there is any problem reading metadata from the named class
|
java
|
spring-context/src/main/java/org/springframework/context/annotation/ConfigurationClassParser.java
| 579
|
[
"configClass",
"currentSourceClass",
"importCandidates",
"filter",
"checkForCircularImports"
] |
void
| true
| 12
| 6.64
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
becomeSubsumedInto
|
private void becomeSubsumedInto(CloseableList otherCloseables) {
checkAndUpdateState(OPEN, SUBSUMED);
otherCloseables.add(closeables, directExecutor());
}
|
Attempts to cancel execution of this step. This attempt will fail if the step has already
completed, has already been cancelled, or could not be cancelled for some other reason. If
successful, and this step has not started when {@code cancel} is called, this step should never
run.
<p>If successful, causes the objects captured by this step (if already started) and its input
step(s) for later closing to be closed on their respective {@link Executor}s. If any such calls
specified {@link MoreExecutors#directExecutor()}, those objects will be closed synchronously.
@param mayInterruptIfRunning {@code true} if the thread executing this task should be
interrupted; otherwise, in-progress tasks are allowed to complete, but the step will be
cancelled regardless
@return {@code false} if the step could not be cancelled, typically because it has already
completed normally; {@code true} otherwise
|
java
|
android/guava/src/com/google/common/util/concurrent/ClosingFuture.java
| 1,108
|
[
"otherCloseables"
] |
void
| true
| 1
| 6.64
|
google/guava
| 51,352
|
javadoc
| false
|
make_ktc_generator
|
def make_ktc_generator(
template: Union[KernelTemplate, ExternKernelChoice],
cs: Generator[KernelTemplateParams, None, None],
extra_kwargs: dict[str, Any],
overrides: dict[str, Any],
layout: Layout,
inputs: KernelInputs,
) -> Generator[KernelTemplateChoice, None, None]:
"""
Create a generator of KernelTemplateChoice objects for a given template.
Args:
template: The template object (KernelTemplate or ExternKernelChoice)
cs: Generator of KernelTemplateParams from template heuristic
overrides: Override kwargs for the template
layout: Layout value for the template
inputs: KernelInputs for the op
Yields:
KernelTemplateChoice objects
"""
for params in cs:
# Apply overrides to params
base_kwargs = params.to_kwargs()
final_kwargs = {**base_kwargs, **overrides}
final_params = DictKernelTemplateParams(final_kwargs)
yield KernelTemplateChoice(
template=template,
params=final_params,
extra_kwargs=extra_kwargs,
layout=layout,
inputs=inputs,
)
|
Create a generator of KernelTemplateChoice objects for a given template.
Args:
template: The template object (KernelTemplate or ExternKernelChoice)
cs: Generator of KernelTemplateParams from template heuristic
overrides: Override kwargs for the template
layout: Layout value for the template
inputs: KernelInputs for the op
Yields:
KernelTemplateChoice objects
|
python
|
torch/_inductor/kernel_template_choice.py
| 67
|
[
"template",
"cs",
"extra_kwargs",
"overrides",
"layout",
"inputs"
] |
Generator[KernelTemplateChoice, None, None]
| true
| 2
| 6.24
|
pytorch/pytorch
| 96,034
|
google
| false
|
is_multi_agg_with_relabel
|
def is_multi_agg_with_relabel(**kwargs) -> bool:
"""
Check whether kwargs passed to .agg look like multi-agg with relabeling.
Parameters
----------
**kwargs : dict
Returns
-------
bool
Examples
--------
>>> is_multi_agg_with_relabel(a="max")
False
>>> is_multi_agg_with_relabel(a_max=("a", "max"), a_min=("a", "min"))
True
>>> is_multi_agg_with_relabel()
False
"""
return all(isinstance(v, tuple) and len(v) == 2 for v in kwargs.values()) and (
len(kwargs) > 0
)
|
Check whether kwargs passed to .agg look like multi-agg with relabeling.
Parameters
----------
**kwargs : dict
Returns
-------
bool
Examples
--------
>>> is_multi_agg_with_relabel(a="max")
False
>>> is_multi_agg_with_relabel(a_max=("a", "max"), a_min=("a", "min"))
True
>>> is_multi_agg_with_relabel()
False
|
python
|
pandas/core/apply.py
| 1,799
|
[] |
bool
| true
| 3
| 7.84
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
documentation
|
public abstract String documentation();
|
Documentation of the Type.
@return details about valid values, representation
|
java
|
clients/src/main/java/org/apache/kafka/common/protocol/types/Type.java
| 112
|
[] |
String
| true
| 1
| 6.16
|
apache/kafka
| 31,560
|
javadoc
| false
|
reverse
|
public static void reverse(final byte[] array) {
if (array != null) {
reverse(array, 0, array.length);
}
}
|
Reverses the order of the given array.
<p>
This method does nothing for a {@code null} input array.
</p>
@param array the array to reverse, may be {@code null}.
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 6,388
|
[
"array"
] |
void
| true
| 2
| 7.04
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
is_scalar_nan
|
def is_scalar_nan(x):
"""Test if x is NaN.
This function is meant to overcome the issue that np.isnan does not allow
non-numerical types as input, and that np.nan is not float('nan').
Parameters
----------
x : any type
Any scalar value.
Returns
-------
bool
Returns true if x is NaN, and false otherwise.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils._missing import is_scalar_nan
>>> is_scalar_nan(np.nan)
True
>>> is_scalar_nan(float("nan"))
True
>>> is_scalar_nan(None)
False
>>> is_scalar_nan("")
False
>>> is_scalar_nan([np.nan])
False
"""
return (
not isinstance(x, numbers.Integral)
and isinstance(x, numbers.Real)
and math.isnan(x)
)
|
Test if x is NaN.
This function is meant to overcome the issue that np.isnan does not allow
non-numerical types as input, and that np.nan is not float('nan').
Parameters
----------
x : any type
Any scalar value.
Returns
-------
bool
Returns true if x is NaN, and false otherwise.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils._missing import is_scalar_nan
>>> is_scalar_nan(np.nan)
True
>>> is_scalar_nan(float("nan"))
True
>>> is_scalar_nan(None)
False
>>> is_scalar_nan("")
False
>>> is_scalar_nan([np.nan])
False
|
python
|
sklearn/utils/_missing.py
| 9
|
[
"x"
] | false
| 3
| 7.2
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
_get_dependency_info
|
def _get_dependency_info() -> dict[str, JSONSerializable]:
"""
Returns dependency information as a JSON serializable dictionary.
"""
deps = [
"pandas",
# required
"numpy",
"dateutil",
# install / build,
"pip",
"Cython",
# docs
"sphinx",
# Other, not imported.
"IPython",
]
# Optional dependencies
deps.extend(list(VERSIONS))
result: dict[str, JSONSerializable] = {}
for modname in deps:
try:
mod = import_optional_dependency(modname, errors="ignore")
except Exception:
# Dependency conflicts may cause a non ImportError
result[modname] = "N/A"
else:
result[modname] = get_version(mod) if mod else None
return result
|
Returns dependency information as a JSON serializable dictionary.
|
python
|
pandas/util/_print_versions.py
| 63
|
[] |
dict[str, JSONSerializable]
| true
| 4
| 6.4
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
predict
|
def predict(self, X):
"""Predict the first class seen in `classes_`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input data.
Returns
-------
preds : ndarray of shape (n_samples,)
Predictions of the first class seen in `classes_`.
"""
if self.methods_to_check == "all" or "predict" in self.methods_to_check:
X, y = self._check_X_y(X)
rng = check_random_state(self.random_state)
return rng.choice(self.classes_, size=_num_samples(X))
|
Predict the first class seen in `classes_`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input data.
Returns
-------
preds : ndarray of shape (n_samples,)
Predictions of the first class seen in `classes_`.
|
python
|
sklearn/utils/_mocking.py
| 242
|
[
"self",
"X"
] | false
| 3
| 6.08
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
_reduce
|
def _reduce(
self, name: str, *, skipna: bool = True, keepdims: bool = False, **kwargs
):
"""
Return a scalar result of performing the reduction operation.
Parameters
----------
name : str
Name of the function, supported values are:
{ any, all, min, max, sum, mean, median, prod,
std, var, sem, kurt, skew }.
skipna : bool, default True
If True, skip NaN values.
**kwargs
Additional keyword arguments passed to the reduction function.
Currently, `ddof` is the only supported kwarg.
Returns
-------
scalar
Raises
------
TypeError : subclass does not define reductions
"""
result = self._reduce_calc(name, skipna=skipna, keepdims=keepdims, **kwargs)
if isinstance(result, pa.Array):
return self._from_pyarrow_array(result)
else:
return result
|
Return a scalar result of performing the reduction operation.
Parameters
----------
name : str
Name of the function, supported values are:
{ any, all, min, max, sum, mean, median, prod,
std, var, sem, kurt, skew }.
skipna : bool, default True
If True, skip NaN values.
**kwargs
Additional keyword arguments passed to the reduction function.
Currently, `ddof` is the only supported kwarg.
Returns
-------
scalar
Raises
------
TypeError : subclass does not define reductions
|
python
|
pandas/core/arrays/arrow/array.py
| 2,105
|
[
"self",
"name",
"skipna",
"keepdims"
] | true
| 3
| 6.72
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
create_activation_checkpointing_logging_structure_payload
|
def create_activation_checkpointing_logging_structure_payload(
joint_graph: Graph,
joint_graph_node_information: dict[str, Any],
joint_graph_edges: list[tuple[str, str]],
all_recomputable_banned_nodes: list[Node],
expected_runtime: float,
saved_node_idxs: list[int],
recomputable_node_idxs: list[int],
memories_banned_nodes: list[int],
normalized_memories_banned_nodes: list[float],
runtimes_banned_nodes: list[float],
min_cut_saved_values: list[Node],
) -> dict[str, Any]:
"""
Creates a structured payload for logging activation checkpointing information.
Args:
joint_graph: The computational graph representing operations.
joint_graph_node_information: Dictionary containing information about nodes in the joint graph.
joint_graph_edges: List of edges in the joint graph represented as tuples of node names.
all_recomputable_banned_nodes: List of nodes that are banned from recomputation.
expected_runtime: Expected runtime of the computation.
saved_node_idxs: Indices of nodes that are saved (not recomputed).
recomputable_node_idxs: Indices of nodes that can be recomputed.
memories_banned_nodes: Memory usage values (in absolute units) for banned nodes.
normalized_memories_banned_nodes: Normalized memory usage values for banned nodes,
used as input to the knapsack algorithm.
runtimes_banned_nodes: Runtime values for banned nodes, used as input to the
knapsack algorithm.
min_cut_saved_values: List of nodes saved by the min-cut algorithm.
Returns:
A dictionary containing structured logging information for activation checkpointing.
"""
activation_checkpointing_logging_structure_payload: dict[str, Any] = {
"Joint Graph Size": len(joint_graph.nodes),
"Joint Graph Edges": {
"Total": len(joint_graph_edges),
"Edges": joint_graph_edges,
},
"Joint Graph Node Information": joint_graph_node_information,
"Recomputable Banned Nodes Order": [
node.name for node in all_recomputable_banned_nodes
],
"Expected Runtime": expected_runtime,
"Knapsack Saved Nodes": saved_node_idxs,
"Knapsack Recomputed Nodes": recomputable_node_idxs,
"Absolute Memories": memories_banned_nodes,
"Knapsack Input Memories": normalized_memories_banned_nodes,
"Knapsack Input Runtimes": runtimes_banned_nodes,
"Min Cut Solution Saved Values": [node.name for node in min_cut_saved_values],
}
return activation_checkpointing_logging_structure_payload
|
Creates a structured payload for logging activation checkpointing information.
Args:
joint_graph: The computational graph representing operations.
joint_graph_node_information: Dictionary containing information about nodes in the joint graph.
joint_graph_edges: List of edges in the joint graph represented as tuples of node names.
all_recomputable_banned_nodes: List of nodes that are banned from recomputation.
expected_runtime: Expected runtime of the computation.
saved_node_idxs: Indices of nodes that are saved (not recomputed).
recomputable_node_idxs: Indices of nodes that can be recomputed.
memories_banned_nodes: Memory usage values (in absolute units) for banned nodes.
normalized_memories_banned_nodes: Normalized memory usage values for banned nodes,
used as input to the knapsack algorithm.
runtimes_banned_nodes: Runtime values for banned nodes, used as input to the
knapsack algorithm.
min_cut_saved_values: List of nodes saved by the min-cut algorithm.
Returns:
A dictionary containing structured logging information for activation checkpointing.
|
python
|
torch/_functorch/_activation_checkpointing/ac_logging_utils.py
| 55
|
[
"joint_graph",
"joint_graph_node_information",
"joint_graph_edges",
"all_recomputable_banned_nodes",
"expected_runtime",
"saved_node_idxs",
"recomputable_node_idxs",
"memories_banned_nodes",
"normalized_memories_banned_nodes",
"runtimes_banned_nodes",
"min_cut_saved_values"
] |
dict[str, Any]
| true
| 1
| 6.16
|
pytorch/pytorch
| 96,034
|
google
| false
|
setProducerState
|
public void setProducerState(long producerId, short producerEpoch, int baseSequence, boolean isTransactional) {
if (isClosed()) {
// Sequence numbers are assigned when the batch is closed while the accumulator is being drained.
// If the resulting ProduceRequest to the partition leader failed for a retriable error, the batch will
// be re queued. In this case, we should not attempt to set the state again, since changing the producerId and sequence
// once a batch has been sent to the broker risks introducing duplicates.
throw new IllegalStateException("Trying to set producer state of an already closed batch. This indicates a bug on the client.");
}
this.producerId = producerId;
this.producerEpoch = producerEpoch;
this.baseSequence = baseSequence;
this.isTransactional = isTransactional;
}
|
Return the sum of the size of the batch header (always uncompressed) and the records (before compression).
|
java
|
clients/src/main/java/org/apache/kafka/common/record/MemoryRecordsBuilder.java
| 308
|
[
"producerId",
"producerEpoch",
"baseSequence",
"isTransactional"
] |
void
| true
| 2
| 6
|
apache/kafka
| 31,560
|
javadoc
| false
|
load
|
static CliToolProvider load(Map<String, String> sysprops, String toolname, String libs) {
Path homeDir = Paths.get(sysprops.get("es.path.home")).toAbsolutePath();
final ClassLoader cliLoader;
if (libs.isBlank()) {
cliLoader = ClassLoader.getSystemClassLoader();
} else {
List<Path> libsToLoad = Stream.of(libs.split(",")).map(homeDir::resolve).toList();
cliLoader = loadJars(libsToLoad);
}
ServiceLoader<CliToolProvider> toolFinder = ServiceLoader.load(CliToolProvider.class, cliLoader);
List<CliToolProvider> tools = StreamSupport.stream(toolFinder.spliterator(), false).filter(p -> p.name().equals(toolname)).toList();
if (tools.size() > 1) {
String names = tools.stream().map(t -> t.getClass().getName()).collect(Collectors.joining(", "));
throw new AssertionError("Multiple ToolProviders found with name [" + toolname + "]: " + names);
}
if (tools.size() == 0) {
var names = StreamSupport.stream(toolFinder.spliterator(), false).map(CliToolProvider::name).toList();
throw new AssertionError("CliToolProvider [" + toolname + "] not found, available names are " + names);
}
return tools.get(0);
}
|
Loads a tool provider from the Elasticsearch distribution.
@param sysprops the system properties of the CLI process
@param toolname the name of the tool to load
@param libs the library directories to load, relative to the Elasticsearch homedir
@return the instance of the loaded tool
@throws AssertionError if the given toolname cannot be found or there are more than one tools found with the same name
|
java
|
libs/cli/src/main/java/org/elasticsearch/cli/CliToolProvider.java
| 53
|
[
"sysprops",
"toolname",
"libs"
] |
CliToolProvider
| true
| 4
| 7.76
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
releaseAll
|
private void releaseAll() {
IOException exceptionChain = null;
exceptionChain = releaseInflators(exceptionChain);
exceptionChain = releaseInputStreams(exceptionChain);
exceptionChain = releaseZipContent(exceptionChain);
exceptionChain = releaseZipContentForManifest(exceptionChain);
if (exceptionChain != null) {
throw new UncheckedIOException(exceptionChain);
}
}
|
Called by the {@link Cleaner} to free resources.
@see java.lang.Runnable#run()
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/jar/NestedJarFileResources.java
| 156
|
[] |
void
| true
| 2
| 6.88
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
equals
|
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
Options other = (Options) obj;
return this.options.equals(other.options);
}
|
Returns if the given option is contained in this set.
@param option the option to check
@return {@code true} of the option is present
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/config/ConfigData.java
| 201
|
[
"obj"
] | true
| 4
| 8.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
of
|
static PemSslStore of(@Nullable String type, @Nullable String alias, @Nullable String password,
List<X509Certificate> certificates, @Nullable PrivateKey privateKey) {
Assert.notEmpty(certificates, "'certificates' must not be empty");
return new PemSslStore() {
@Override
public @Nullable String type() {
return type;
}
@Override
public @Nullable String alias() {
return alias;
}
@Override
public @Nullable String password() {
return password;
}
@Override
public List<X509Certificate> certificates() {
return certificates;
}
@Override
public @Nullable PrivateKey privateKey() {
return privateKey;
}
};
}
|
Factory method that can be used to create a new {@link PemSslStore} with the given
values.
@param type the key store type
@param alias the alias used when setting entries in the {@link KeyStore}
@param password the password used
{@link KeyStore#setKeyEntry(String, java.security.Key, char[], java.security.cert.Certificate[])
setting key entries} in the {@link KeyStore}
@param certificates the certificates for this store
@param privateKey the private key
@return a new {@link PemSslStore} instance
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/ssl/pem/PemSslStore.java
| 157
|
[
"type",
"alias",
"password",
"certificates",
"privateKey"
] |
PemSslStore
| true
| 1
| 6.4
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
getSourceDirFromTypeScriptConfig
|
function getSourceDirFromTypeScriptConfig(): string | undefined {
const tsconfig = getTsconfig()
if (!tsconfig) {
return undefined
}
const { config } = tsconfig
return config.compilerOptions?.rootDir ?? config.compilerOptions?.baseUrl ?? config.compilerOptions?.rootDirs?.[0]
}
|
Determines the absolute path to the source directory.
|
typescript
|
packages/cli/src/utils/client-output-path.ts
| 45
|
[] | true
| 2
| 6.88
|
prisma/prisma
| 44,834
|
jsdoc
| false
|
|
onHeartbeatFailure
|
public void onHeartbeatFailure(boolean retriable) {
if (!retriable) {
metricsManager.maybeRecordRebalanceFailed();
}
// The leave group request is sent out once (not retried), so we should complete the leave
// operation once the request completes, regardless of the response.
if (state == MemberState.UNSUBSCRIBED && maybeCompleteLeaveInProgress()) {
log.warn("Member {} with epoch {} received a failed response to the heartbeat to " +
"leave the group and completed the leave operation. ", memberId, memberEpoch);
}
}
|
Notify the member that an error heartbeat response was received.
@param retriable True if the request failed with a retriable error.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractMembershipManager.java
| 301
|
[
"retriable"
] |
void
| true
| 4
| 7.04
|
apache/kafka
| 31,560
|
javadoc
| false
|
readShort
|
@CanIgnoreReturnValue // to skip some bytes
@Override
public short readShort() throws IOException {
return (short) readUnsignedShort();
}
|
Reads a {@code short} as specified by {@link DataInputStream#readShort()}, except using
little-endian byte order.
@return the next two bytes of the input stream, interpreted as a {@code short} in little-endian
byte order.
@throws IOException if an I/O error occurs.
|
java
|
android/guava/src/com/google/common/io/LittleEndianDataInputStream.java
| 190
|
[] | true
| 1
| 6.72
|
google/guava
| 51,352
|
javadoc
| false
|
|
loadAll
|
public Map<K, V> loadAll(Iterable<? extends K> keys) throws Exception {
// This will be caught by getAll(), causing it to fall back to multiple calls to
// LoadingCache.get
throw new UnsupportedLoadingOperationException();
}
|
Computes or retrieves the values corresponding to {@code keys}. This method is called by {@link
LoadingCache#getAll}.
<p>If the returned map doesn't contain all requested {@code keys} then the entries it does
contain will be cached, but {@code getAll} will throw an exception. If the returned map
contains extra keys not present in {@code keys} then all returned entries will be cached, but
only the entries for {@code keys} will be returned from {@code getAll}.
<p>This method should be overridden when bulk retrieval is significantly more efficient than
many individual lookups. Note that {@link LoadingCache#getAll} will defer to individual calls
to {@link LoadingCache#get} if this method is not overridden.
@param keys the unique, non-null keys whose values should be loaded
@return a map from each key in {@code keys} to the value associated with that key; <b>may not
contain null values</b>
@throws Exception if unable to load the result
@throws InterruptedException if this method is interrupted. {@code InterruptedException} is
treated like any other {@code Exception} in all respects except that, when it is caught,
the thread's interrupt status is set
@since 11.0
|
java
|
android/guava/src/com/google/common/cache/CacheLoader.java
| 125
|
[
"keys"
] | true
| 1
| 6.88
|
google/guava
| 51,352
|
javadoc
| false
|
|
getPermissionModelFlagsToCopy
|
function getPermissionModelFlagsToCopy() {
if (permissionModelFlagsToCopy === undefined) {
permissionModelFlagsToCopy = [...permission.availableFlags(), '--permission'];
}
return permissionModelFlagsToCopy;
}
|
Spawns the specified file as a shell.
@param {string} file
@param {string[]} [args]
@param {{
cwd?: string | URL;
env?: Record<string, string>;
encoding?: string;
timeout?: number;
maxBuffer?: number;
killSignal?: string | number;
uid?: number;
gid?: number;
windowsHide?: boolean;
windowsVerbatimArguments?: boolean;
shell?: boolean | string;
signal?: AbortSignal;
}} [options]
@param {(
error?: Error,
stdout?: string | Buffer,
stderr?: string | Buffer
) => any} [callback]
@returns {ChildProcess}
|
javascript
|
lib/child_process.js
| 543
|
[] | false
| 2
| 6.8
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
owners
|
public List<KafkaPrincipal> owners() {
return owners;
}
|
If owners is null, all the user owned tokens and tokens where user have Describe permission
will be returned.
@param owners The owners that we want to describe delegation tokens for
@return this instance
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/DescribeDelegationTokenOptions.java
| 41
|
[] | true
| 1
| 6.96
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
copyInto
|
@CanIgnoreReturnValue
public final <C extends Collection<? super E>> C copyInto(C collection) {
checkNotNull(collection);
Iterable<E> iterable = getDelegate();
if (iterable instanceof Collection) {
collection.addAll((Collection<E>) iterable);
} else {
for (E item : iterable) {
collection.add(item);
}
}
return collection;
}
|
Copies all the elements from this fluent iterable to {@code collection}. This is equivalent to
calling {@code Iterables.addAll(collection, this)}.
<p><b>{@code Stream} equivalent:</b> {@code stream.forEachOrdered(collection::add)} or {@code
stream.forEach(collection::add)}.
@param collection the collection to copy elements to
@return {@code collection}, for convenience
@since 14.0
|
java
|
android/guava/src/com/google/common/collect/FluentIterable.java
| 801
|
[
"collection"
] |
C
| true
| 2
| 7.44
|
google/guava
| 51,352
|
javadoc
| false
|
joining
|
public static Collector<Object, ?, String> joining(final CharSequence delimiter, final CharSequence prefix, final CharSequence suffix) {
return joining(delimiter, prefix, suffix, Objects::toString);
}
|
Returns a {@code Collector} that concatenates the input elements, separated by the specified delimiter, with the
specified prefix and suffix, in encounter order.
<p>
This is a variation of {@link Collectors#joining(CharSequence, CharSequence, CharSequence)} that works with any
element class, not just {@code CharSequence}.
</p>
<p>
For example:
</p>
<pre>
Stream.of(Long.valueOf(1), Long.valueOf(2), Long.valueOf(3))
.collect(LangCollectors.joining("-", "[", "]"))
returns "[1-2-3]"
</pre>
@param delimiter the delimiter to be used between each element
@param prefix the sequence of characters to be used at the beginning of the joined result
@param suffix the sequence of characters to be used at the end of the joined result
@return A {@code Collector} which concatenates CharSequence elements, separated by the specified delimiter, in
encounter order
|
java
|
src/main/java/org/apache/commons/lang3/stream/LangCollectors.java
| 181
|
[
"delimiter",
"prefix",
"suffix"
] | true
| 1
| 6.16
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
count
|
def count(self, numeric_only: bool = False):
"""
Calculate the rolling count of non NaN observations.
Parameters
----------
numeric_only : bool, default False
Include only float, int, boolean columns.
Returns
-------
Series or DataFrame
Return type is the same as the original object with ``np.float64`` dtype.
See Also
--------
Series.rolling : Calling rolling with Series data.
DataFrame.rolling : Calling rolling with DataFrames.
Series.count : Aggregating count for Series.
DataFrame.count : Aggregating count for DataFrame.
Examples
--------
>>> s = pd.Series([2, 3, np.nan, 10])
>>> s.rolling(2).count()
0 NaN
1 2.0
2 1.0
3 1.0
dtype: float64
>>> s.rolling(3).count()
0 NaN
1 NaN
2 2.0
3 2.0
dtype: float64
>>> s.rolling(4).count()
0 NaN
1 NaN
2 NaN
3 3.0
dtype: float64
"""
return super().count(numeric_only)
|
Calculate the rolling count of non NaN observations.
Parameters
----------
numeric_only : bool, default False
Include only float, int, boolean columns.
Returns
-------
Series or DataFrame
Return type is the same as the original object with ``np.float64`` dtype.
See Also
--------
Series.rolling : Calling rolling with Series data.
DataFrame.rolling : Calling rolling with DataFrames.
Series.count : Aggregating count for Series.
DataFrame.count : Aggregating count for DataFrame.
Examples
--------
>>> s = pd.Series([2, 3, np.nan, 10])
>>> s.rolling(2).count()
0 NaN
1 2.0
2 1.0
3 1.0
dtype: float64
>>> s.rolling(3).count()
0 NaN
1 NaN
2 2.0
3 2.0
dtype: float64
>>> s.rolling(4).count()
0 NaN
1 NaN
2 NaN
3 3.0
dtype: float64
|
python
|
pandas/core/window/rolling.py
| 2,122
|
[
"self",
"numeric_only"
] | true
| 1
| 7.28
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
is_keys_unchanged
|
def is_keys_unchanged(self, current_objects: set[str]) -> bool:
"""
Check for new objects after the inactivity_period and update the sensor state accordingly.
:param current_objects: set of object ids in bucket during last poke.
"""
current_num_objects = len(current_objects)
if current_objects > self.previous_objects:
# When new objects arrived, reset the inactivity_seconds
# and update previous_objects for the next poke.
self.log.info(
"New objects found at %s, resetting last_activity_time.",
os.path.join(self.bucket_name, self.prefix),
)
self.log.debug("New objects: %s", current_objects - self.previous_objects)
self.last_activity_time = datetime.now()
self.inactivity_seconds = 0
self.previous_objects = current_objects
return False
if self.previous_objects - current_objects:
# During the last poke interval objects were deleted.
if self.allow_delete:
deleted_objects = self.previous_objects - current_objects
self.previous_objects = current_objects
self.last_activity_time = datetime.now()
self.log.info(
"Objects were deleted during the last poke interval. Updating the "
"file counter and resetting last_activity_time:\n%s",
deleted_objects,
)
return False
raise AirflowException(
f"Illegal behavior: objects were deleted in {os.path.join(self.bucket_name, self.prefix)} between pokes."
)
if self.last_activity_time:
self.inactivity_seconds = int((datetime.now() - self.last_activity_time).total_seconds())
else:
# Handles the first poke where last inactivity time is None.
self.last_activity_time = datetime.now()
self.inactivity_seconds = 0
if self.inactivity_seconds >= self.inactivity_period:
path = os.path.join(self.bucket_name, self.prefix)
if current_num_objects >= self.min_objects:
self.log.info(
"SUCCESS: \nSensor found %s objects at %s.\n"
"Waited at least %s seconds, with no new objects uploaded.",
current_num_objects,
path,
self.inactivity_period,
)
return True
self.log.error("FAILURE: Inactivity Period passed, not enough objects found in %s", path)
return False
return False
|
Check for new objects after the inactivity_period and update the sensor state accordingly.
:param current_objects: set of object ids in bucket during last poke.
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/sensors/s3.py
| 303
|
[
"self",
"current_objects"
] |
bool
| true
| 8
| 6.96
|
apache/airflow
| 43,597
|
sphinx
| false
|
contains
|
@Override
boolean contains(@Nullable Object element);
|
Determines whether this multiset contains the specified element.
<p>This method refines {@link Collection#contains} to further specify that it <b>may not</b>
throw an exception in response to {@code element} being null or of the wrong type.
@param element the element to check for
@return {@code true} if this multiset contains at least one occurrence of the element
|
java
|
android/guava/src/com/google/common/collect/Multiset.java
| 388
|
[
"element"
] | true
| 1
| 6.64
|
google/guava
| 51,352
|
javadoc
| false
|
|
add
|
protected void add(char[] cbuf, int off, int len) throws IOException {
int pos = off;
if (sawReturn && len > 0) {
// Last call to add ended with a CR; we can handle the line now.
if (finishLine(cbuf[pos] == '\n')) {
pos++;
}
}
int start = pos;
for (int end = off + len; pos < end; pos++) {
switch (cbuf[pos]) {
case '\r':
line.append(cbuf, start, pos - start);
sawReturn = true;
if (pos + 1 < end) {
if (finishLine(cbuf[pos + 1] == '\n')) {
pos++;
}
}
start = pos + 1;
break;
case '\n':
line.append(cbuf, start, pos - start);
finishLine(true);
start = pos + 1;
break;
default:
// do nothing
}
}
line.append(cbuf, start, off + len - start);
}
|
Process additional characters from the stream. When a line separator is found the contents of
the line and the line separator itself are passed to the abstract {@link #handleLine} method.
@param cbuf the character buffer to process
@param off the offset into the buffer
@param len the number of characters to process
@throws IOException if an I/O error occurs
@see #finish
|
java
|
android/guava/src/com/google/common/io/LineBuffer.java
| 52
|
[
"cbuf",
"off",
"len"
] |
void
| true
| 7
| 6.72
|
google/guava
| 51,352
|
javadoc
| false
|
generateCodeForInaccessibleConstructor
|
private CodeBlock generateCodeForInaccessibleConstructor(ConstructorDescriptor descriptor,
Consumer<ReflectionHints> hints) {
Constructor<?> constructor = descriptor.constructor();
CodeWarnings codeWarnings = new CodeWarnings();
codeWarnings.detectDeprecation(constructor.getDeclaringClass(), constructor)
.detectDeprecation(Arrays.stream(constructor.getParameters()).map(Parameter::getType));
hints.accept(this.generationContext.getRuntimeHints().reflection());
GeneratedMethod generatedMethod = generateGetInstanceSupplierMethod(method -> {
method.addJavadoc("Get the bean instance supplier for '$L'.", descriptor.beanName());
method.addModifiers(PRIVATE_STATIC);
codeWarnings.suppress(method);
method.returns(ParameterizedTypeName.get(BeanInstanceSupplier.class, descriptor.publicType()));
method.addStatement(generateResolverForConstructor(descriptor));
});
return generateReturnStatement(generatedMethod);
}
|
Generate the instance supplier code.
@param registeredBean the bean to handle
@param instantiationDescriptor the executable to use to create the bean
@return the generated code
@since 6.1.7
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/aot/InstanceSupplierCodeGenerator.java
| 198
|
[
"descriptor",
"hints"
] |
CodeBlock
| true
| 1
| 6.24
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
doBackward
|
@ForOverride
protected abstract A doBackward(B b);
|
Returns a representation of {@code b} as an instance of type {@code A}. If {@code b} cannot be
converted, an unchecked exception (such as {@link IllegalArgumentException}) should be thrown.
@param b the instance to convert; will never be null
@return the converted instance; <b>must not</b> be null
@throws UnsupportedOperationException if backward conversion is not implemented; this should be
very rare. Note that if backward conversion is not only unimplemented but
unimplement<i>able</i> (for example, consider a {@code Converter<Chicken, ChickenNugget>}),
then this is not logically a {@code Converter} at all, and should just implement {@link
Function}.
|
java
|
android/guava/src/com/google/common/base/Converter.java
| 186
|
[
"b"
] |
A
| true
| 1
| 6
|
google/guava
| 51,352
|
javadoc
| false
|
distance
|
private static int distance(final Class<?>[] fromClassArray, final Class<?>[] toClassArray) {
int answer = 0;
if (!ClassUtils.isAssignable(fromClassArray, toClassArray, true)) {
return -1;
}
for (int offset = 0; offset < fromClassArray.length; offset++) {
// Note InheritanceUtils.distance() uses different scoring system.
final Class<?> aClass = fromClassArray[offset];
final Class<?> toClass = toClassArray[offset];
if (aClass == null || aClass.equals(toClass)) {
continue;
}
if (ClassUtils.isAssignable(aClass, toClass, true) && !ClassUtils.isAssignable(aClass, toClass, false)) {
answer++;
} else {
answer += 2;
}
}
return answer;
}
|
Computes the aggregate number of inheritance hops between assignable argument class types. Returns -1
if the arguments aren't assignable. Fills a specific purpose for getMatchingMethod and is not generalized.
@param fromClassArray the Class array to calculate the distance from.
@param toClassArray the Class array to calculate the distance to.
@return the aggregate number of inheritance hops between assignable argument class types.
|
java
|
src/main/java/org/apache/commons/lang3/reflect/MethodUtils.java
| 80
|
[
"fromClassArray",
"toClassArray"
] | true
| 7
| 7.6
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
getTypeArguments
|
private static Map<TypeVariable<?>, Type> getTypeArguments(final ParameterizedType parameterizedType, final Class<?> toClass,
final Map<TypeVariable<?>, Type> subtypeVarAssigns) {
final Class<?> cls = getRawType(parameterizedType);
// make sure they're assignable
if (!isAssignable(cls, toClass)) {
return null;
}
final Type ownerType = parameterizedType.getOwnerType();
final Map<TypeVariable<?>, Type> typeVarAssigns;
if (ownerType instanceof ParameterizedType) {
// get the owner type arguments first
final ParameterizedType parameterizedOwnerType = (ParameterizedType) ownerType;
typeVarAssigns = getTypeArguments(parameterizedOwnerType, getRawType(parameterizedOwnerType), subtypeVarAssigns);
} else {
// no owner, prep the type variable assignments map
typeVarAssigns = subtypeVarAssigns == null ? new HashMap<>() : new HashMap<>(subtypeVarAssigns);
}
// get the subject parameterized type's arguments
final Type[] typeArgs = parameterizedType.getActualTypeArguments();
// and get the corresponding type variables from the raw class
final TypeVariable<?>[] typeParams = cls.getTypeParameters();
// map the arguments to their respective type variables
for (int i = 0; i < typeParams.length; i++) {
final Type typeArg = typeArgs[i];
typeVarAssigns.put(typeParams[i], typeVarAssigns.getOrDefault(typeArg, typeArg));
}
if (toClass.equals(cls)) {
// target class has been reached. Done.
return typeVarAssigns;
}
// walk the inheritance hierarchy until the target class is reached
return getTypeArguments(getClosestParentType(cls, toClass), toClass, typeVarAssigns);
}
|
Gets a map of the type arguments of a parameterized type in the context of {@code toClass}.
@param parameterizedType the parameterized type.
@param toClass the class.
@param subtypeVarAssigns a map with type variables.
@return the {@link Map} with type arguments.
|
java
|
src/main/java/org/apache/commons/lang3/reflect/TypeUtils.java
| 813
|
[
"parameterizedType",
"toClass",
"subtypeVarAssigns"
] | true
| 6
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
left_shift
|
def left_shift(a, n):
"""
Shift the bits of an integer to the left.
This is the masked array version of `numpy.left_shift`, for details
see that function.
See Also
--------
numpy.left_shift
Examples
--------
Shift with a masked array:
>>> arr = np.ma.array([10, 20, 30], mask=[False, True, False])
>>> np.ma.left_shift(arr, 1)
masked_array(data=[20, --, 60],
mask=[False, True, False],
fill_value=999999)
Large shift:
>>> np.ma.left_shift(10, 10)
masked_array(data=10240,
mask=False,
fill_value=999999)
Shift with a scalar and an array:
>>> scalar = 10
>>> arr = np.ma.array([1, 2, 3], mask=[False, True, False])
>>> np.ma.left_shift(scalar, arr)
masked_array(data=[20, --, 80],
mask=[False, True, False],
fill_value=999999)
"""
m = getmask(a)
if m is nomask:
d = umath.left_shift(filled(a), n)
return masked_array(d)
else:
d = umath.left_shift(filled(a, 0), n)
return masked_array(d, mask=m)
|
Shift the bits of an integer to the left.
This is the masked array version of `numpy.left_shift`, for details
see that function.
See Also
--------
numpy.left_shift
Examples
--------
Shift with a masked array:
>>> arr = np.ma.array([10, 20, 30], mask=[False, True, False])
>>> np.ma.left_shift(arr, 1)
masked_array(data=[20, --, 60],
mask=[False, True, False],
fill_value=999999)
Large shift:
>>> np.ma.left_shift(10, 10)
masked_array(data=10240,
mask=False,
fill_value=999999)
Shift with a scalar and an array:
>>> scalar = 10
>>> arr = np.ma.array([1, 2, 3], mask=[False, True, False])
>>> np.ma.left_shift(scalar, arr)
masked_array(data=[20, --, 80],
mask=[False, True, False],
fill_value=999999)
|
python
|
numpy/ma/core.py
| 7,409
|
[
"a",
"n"
] | false
| 3
| 6.16
|
numpy/numpy
| 31,054
|
unknown
| false
|
|
doGetBundle
|
protected ResourceBundle doGetBundle(String basename, Locale locale) throws MissingResourceException {
ClassLoader classLoader = getBundleClassLoader();
Assert.state(classLoader != null, "No bundle ClassLoader set");
MessageSourceControl control = this.control;
if (control != null) {
try {
return ResourceBundle.getBundle(basename, locale, classLoader, control);
}
catch (UnsupportedOperationException ex) {
// Probably in a Java Module System environment on JDK 9+
this.control = null;
String encoding = getDefaultEncoding();
if (encoding != null && logger.isInfoEnabled()) {
logger.info("ResourceBundleMessageSource is configured to read resources with encoding '" +
encoding + "' but ResourceBundle.Control is not supported in current system environment: " +
ex.getMessage() + " - falling back to plain ResourceBundle.getBundle retrieval with the " +
"platform default encoding. Consider setting the 'defaultEncoding' property to 'null' " +
"for participating in the platform default and therefore avoiding this log message.");
}
}
}
// Fallback: plain getBundle lookup without Control handle
return ResourceBundle.getBundle(basename, locale, classLoader);
}
|
Obtain the resource bundle for the given basename and Locale.
@param basename the basename to look for
@param locale the Locale to look for
@return the corresponding ResourceBundle
@throws MissingResourceException if no matching bundle could be found
@see java.util.ResourceBundle#getBundle(String, Locale, ClassLoader)
@see #getBundleClassLoader()
|
java
|
spring-context/src/main/java/org/springframework/context/support/ResourceBundleMessageSource.java
| 230
|
[
"basename",
"locale"
] |
ResourceBundle
| true
| 5
| 7.6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
handleApiVersionsResponse
|
private void handleApiVersionsResponse(List<ClientResponse> responses,
InFlightRequest req, long now, ApiVersionsResponse apiVersionsResponse) {
final String node = req.destination;
if (apiVersionsResponse.data().errorCode() != Errors.NONE.code()) {
if (req.request.version() == 0 || apiVersionsResponse.data().errorCode() != Errors.UNSUPPORTED_VERSION.code()) {
log.warn("Received error {} from node {} when making an ApiVersionsRequest with correlation id {}. Disconnecting.",
Errors.forCode(apiVersionsResponse.data().errorCode()), node, req.header.correlationId());
this.selector.close(node);
processDisconnection(responses, node, now, ChannelState.LOCAL_CLOSE);
} else {
// Starting from Apache Kafka 2.4, ApiKeys field is populated with the supported versions of
// the ApiVersionsRequest when an UNSUPPORTED_VERSION error is returned.
// If not provided, the client falls back to version 0.
short maxApiVersion = 0;
if (apiVersionsResponse.data().apiKeys().size() > 0) {
ApiVersion apiVersion = apiVersionsResponse.data().apiKeys().find(ApiKeys.API_VERSIONS.id);
if (apiVersion != null) {
maxApiVersion = apiVersion.maxVersion();
}
}
nodesNeedingApiVersionsFetch.put(node, new ApiVersionsRequest.Builder(maxApiVersion));
}
return;
}
NodeApiVersions nodeVersionInfo = new NodeApiVersions(
apiVersionsResponse.data().apiKeys(),
apiVersionsResponse.data().supportedFeatures(),
apiVersionsResponse.data().finalizedFeatures(),
apiVersionsResponse.data().finalizedFeaturesEpoch());
apiVersions.update(node, nodeVersionInfo);
this.connectionStates.ready(node);
log.debug("Node {} has finalized features epoch: {}, finalized features: {}, supported features: {}, API versions: {}.",
node, apiVersionsResponse.data().finalizedFeaturesEpoch(), apiVersionsResponse.data().finalizedFeatures(),
apiVersionsResponse.data().supportedFeatures(), nodeVersionInfo);
}
|
Handle any completed receives and update the response list with the responses received.
@param responses The list of responses to update
@param now The current time
|
java
|
clients/src/main/java/org/apache/kafka/clients/NetworkClient.java
| 1,023
|
[
"responses",
"req",
"now",
"apiVersionsResponse"
] |
void
| true
| 6
| 7.04
|
apache/kafka
| 31,560
|
javadoc
| false
|
trimmed
|
public ImmutableLongArray trimmed() {
return isPartialView() ? new ImmutableLongArray(toArray()) : this;
}
|
Returns an immutable array containing the same values as {@code this} array. This is logically
a no-op, and in some circumstances {@code this} itself is returned. However, if this instance
is a {@link #subArray} view of a larger array, this method will copy only the appropriate range
of values, resulting in an equivalent array with a smaller memory footprint.
|
java
|
android/guava/src/com/google/common/primitives/ImmutableLongArray.java
| 634
|
[] |
ImmutableLongArray
| true
| 2
| 6.64
|
google/guava
| 51,352
|
javadoc
| false
|
_apply_defaults_to_encoded_op
|
def _apply_defaults_to_encoded_op(
cls,
encoded_op: dict[str, Any],
client_defaults: dict[str, Any] | None = None,
) -> dict[str, Any]:
"""
Apply client defaults to encoded operator before deserialization.
Args:
encoded_op: The serialized operator data (already includes applied default_args)
client_defaults: SDK-specific defaults from client_defaults section
Note: DAG default_args are already applied during task creation in the SDK,
so encoded_op contains the final resolved values.
Hierarchy (lowest to highest priority):
1. client_defaults.tasks (SDK-wide defaults for size optimization)
2. Explicit task values (already in encoded_op, includes applied default_args)
Returns a new dict with defaults merged in.
"""
# Build hierarchy from lowest to highest priority
result = {}
# Level 1: Apply client_defaults.tasks (lowest priority)
# Values are already serialized in generate_client_defaults()
if client_defaults:
task_defaults = client_defaults.get("tasks", {})
result.update(task_defaults)
# Level 2: Apply explicit task values (highest priority - overrides everything)
# Note: encoded_op already contains default_args applied during task creation
result.update(encoded_op)
return result
|
Apply client defaults to encoded operator before deserialization.
Args:
encoded_op: The serialized operator data (already includes applied default_args)
client_defaults: SDK-specific defaults from client_defaults section
Note: DAG default_args are already applied during task creation in the SDK,
so encoded_op contains the final resolved values.
Hierarchy (lowest to highest priority):
1. client_defaults.tasks (SDK-wide defaults for size optimization)
2. Explicit task values (already in encoded_op, includes applied default_args)
Returns a new dict with defaults merged in.
|
python
|
airflow-core/src/airflow/serialization/serialized_objects.py
| 2,050
|
[
"cls",
"encoded_op",
"client_defaults"
] |
dict[str, Any]
| true
| 2
| 6.4
|
apache/airflow
| 43,597
|
google
| false
|
appendln
|
public StrBuilder appendln(final StringBuffer str, final int startIndex, final int length) {
return append(str, startIndex, length).appendNewLine();
}
|
Appends part of a string buffer followed by a new line to this string builder.
Appending null will call {@link #appendNull()}.
@param str the string to append
@param startIndex the start index, inclusive, must be valid
@param length the length to append, must be valid
@return {@code this} instance.
@since 2.3
|
java
|
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
| 1,118
|
[
"str",
"startIndex",
"length"
] |
StrBuilder
| true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
partitionCUs
|
static CUPartitionVector partitionCUs(DWARFContext &DwCtx) {
CUPartitionVector Vec(2);
unsigned Counter = 0;
const DWARFDebugAbbrev *Abbr = DwCtx.getDebugAbbrev();
for (std::unique_ptr<DWARFUnit> &CU : DwCtx.compile_units()) {
Expected<const DWARFAbbreviationDeclarationSet *> AbbrDeclSet =
Abbr->getAbbreviationDeclarationSet(CU->getAbbreviationsOffset());
if (!AbbrDeclSet) {
consumeError(AbbrDeclSet.takeError());
return Vec;
}
bool CrossCURefFound = false;
for (const DWARFAbbreviationDeclaration &Decl : *AbbrDeclSet.get()) {
for (const DWARFAbbreviationDeclaration::AttributeSpec &Attr :
Decl.attributes()) {
if (Attr.Form == dwarf::DW_FORM_ref_addr) {
CrossCURefFound = true;
break;
}
}
if (CrossCURefFound)
break;
}
if (CrossCURefFound) {
Vec[0].push_back(CU.get());
} else {
++Counter;
Vec.back().push_back(CU.get());
}
if (Counter % opts::BatchSize == 0 && !Vec.back().empty())
Vec.push_back({});
}
return Vec;
}
|
as a source are put in to the same initial bucket.
|
cpp
|
bolt/lib/Rewrite/DWARFRewriter.cpp
| 547
|
[] | true
| 8
| 6.88
|
llvm/llvm-project
| 36,021
|
doxygen
| false
|
|
filteredDuration
|
public long filteredDuration() {
return filteredDuration;
}
|
Returns the duration ms value being filtered.
@return the current duration filter value in ms (negative value means transactions are not filtered by duration)
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/ListTransactionsOptions.java
| 112
|
[] | true
| 1
| 6.64
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
getOutputPathsForBundle
|
function getOutputPathsForBundle(options: CompilerOptions, forceDtsPaths: boolean): EmitFileNames {
const outPath = options.outFile!;
const jsFilePath = options.emitDeclarationOnly ? undefined : outPath;
const sourceMapFilePath = jsFilePath && getSourceMapFilePath(jsFilePath, options);
const declarationFilePath = (forceDtsPaths || getEmitDeclarations(options)) ? removeFileExtension(outPath) + Extension.Dts : undefined;
const declarationMapPath = declarationFilePath && getAreDeclarationMapsEnabled(options) ? declarationFilePath + ".map" : undefined;
return { jsFilePath, sourceMapFilePath, declarationFilePath, declarationMapPath };
}
|
Iterates over the source files that are expected to have an emit output.
@param host An EmitHost.
@param action The action to execute.
@param sourceFilesOrTargetSourceFile
If an array, the full list of source files to emit.
Else, calls `getSourceFilesToEmit` with the (optional) target source file to determine the list of source files to emit.
@internal
|
typescript
|
src/compiler/emitter.ts
| 506
|
[
"options",
"forceDtsPaths"
] | true
| 7
| 6.4
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
shouldHandle
|
@Contract("_, null -> false")
private boolean shouldHandle(ApplicationEvent event, @Nullable Object @Nullable [] args) {
if (args == null) {
return false;
}
String condition = getCondition();
if (StringUtils.hasText(condition)) {
Assert.notNull(this.evaluator, "EventExpressionEvaluator must not be null");
return this.evaluator.condition(
condition, event, this.targetMethod, this.methodKey, args);
}
return true;
}
|
Determine whether the listener method would actually handle the given
event, checking if the condition matches.
@param event the event to process through the listener method
@since 6.1
|
java
|
spring-context/src/main/java/org/springframework/context/event/ApplicationListenerMethodAdapter.java
| 272
|
[
"event",
"args"
] | true
| 3
| 6.88
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
topDomainUnderRegistrySuffix
|
public InternetDomainName topDomainUnderRegistrySuffix() {
if (isTopDomainUnderRegistrySuffix()) {
return this;
}
checkState(isUnderRegistrySuffix(), "Not under a registry suffix: %s", name);
return ancestor(registrySuffixIndex() - 1);
}
|
Returns the portion of this domain name that is one level beneath the {@linkplain
#isRegistrySuffix() registry suffix}. For example, for {@code x.adwords.google.co.uk} it
returns {@code google.co.uk}, since {@code co.uk} is a registry suffix. Similarly, for {@code
myblog.blogspot.com} it returns {@code blogspot.com}, since {@code com} is a registry suffix.
<p>If {@link #isTopDomainUnderRegistrySuffix()} is true, the current domain name instance is
returned.
<p><b>Warning:</b> This method should not be used to determine whether a domain is probably the
highest level for which cookies may be set. Use {@link #isTopPrivateDomain()} for that purpose.
@throws IllegalStateException if this domain does not end with a registry suffix
@since 23.3
|
java
|
android/guava/src/com/google/common/net/InternetDomainName.java
| 562
|
[] |
InternetDomainName
| true
| 2
| 6.4
|
google/guava
| 51,352
|
javadoc
| false
|
addAll
|
@CanIgnoreReturnValue
@Override
public Builder<E> addAll(Iterable<? extends E> elements) {
if (elements instanceof Multiset) {
for (Entry<? extends E> entry : ((Multiset<? extends E>) elements).entrySet()) {
addCopies(entry.getElement(), entry.getCount());
}
} else {
for (E e : elements) {
add(e);
}
}
return this;
}
|
Adds each element of {@code elements} to the {@code ImmutableSortedMultiset}.
@param elements the {@code Iterable} to add to the {@code ImmutableSortedMultiset}
@return this {@code Builder} object
@throws NullPointerException if {@code elements} is null or contains a null element
|
java
|
android/guava/src/com/google/common/collect/ImmutableSortedMultiset.java
| 637
|
[
"elements"
] | true
| 2
| 7.28
|
google/guava
| 51,352
|
javadoc
| false
|
|
getComponentType
|
public final @Nullable TypeToken<?> getComponentType() {
Type componentType = Types.getComponentType(runtimeType);
if (componentType == null) {
return null;
}
return of(componentType);
}
|
Returns the array component type if this type represents an array ({@code int[]}, {@code T[]},
{@code <? extends Map<String, Integer>[]>} etc.), or else {@code null} is returned.
|
java
|
android/guava/src/com/google/common/reflect/TypeToken.java
| 585
|
[] | true
| 2
| 7.04
|
google/guava
| 51,352
|
javadoc
| false
|
|
batchesFrom
|
public Iterable<FileChannelRecordBatch> batchesFrom(final int start) {
return () -> batchIterator(start);
}
|
Get an iterator over the record batches in the file, starting at a specific position. This is similar to
{@link #batches()} except that callers specify a particular position to start reading the batches from. This
method must be used with caution: the start position passed in must be a known start of a batch.
@param start The position to start record iteration from; must be a known position for start of a batch
@return An iterator over batches starting from {@code start}
|
java
|
clients/src/main/java/org/apache/kafka/common/record/FileRecords.java
| 425
|
[
"start"
] | true
| 1
| 6.96
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
containsNarrowableReference
|
function containsNarrowableReference(expr: Expression): boolean {
return isNarrowableReference(expr) || isOptionalChain(expr) && containsNarrowableReference(expr.expression);
}
|
Declares a Symbol for the node and adds it to symbols. Reports errors for conflicting identifier names.
@param symbolTable - The symbol table which node will be added to.
@param parent - node's parent declaration.
@param node - The declaration to be added to the symbol table
@param includes - The SymbolFlags that node has in addition to its declaration type (eg: export, ambient, etc.)
@param excludes - The flags which node cannot be declared alongside in a symbol table. Used to report forbidden declarations.
|
typescript
|
src/compiler/binder.ts
| 1,289
|
[
"expr"
] | true
| 3
| 6.64
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
maybeSetInitializationError
|
private void maybeSetInitializationError(KafkaException error) {
if (initializationError.compareAndSet(null, error))
return;
log.error("Consumer network thread resource initialization error ({}) will be suppressed as an error was already set", error.getMessage(), error);
}
|
Start the network thread and let it complete its initialization before proceeding. The
{@link ClassicKafkaConsumer} constructor blocks during creation of its {@link NetworkClient}, providing
precedent for waiting here.
In certain cases (e.g. an invalid {@link LoginModule} in {@link SaslConfigs#SASL_JAAS_CONFIG}), an error
could be thrown during {@link #initializeResources()}. This would result in the {@link #run()} method
exiting, no longer able to process events, which means that the consumer effectively hangs.
@param timeoutMs Length of time, in milliseconds, to wait for the thread to start and complete initialization
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java
| 175
|
[
"error"
] |
void
| true
| 2
| 6.4
|
apache/kafka
| 31,560
|
javadoc
| false
|
matches
|
boolean matches(ConditionContext context, AnnotatedTypeMetadata metadata);
|
Determine if the condition matches.
@param context the condition context
@param metadata the metadata of the {@link org.springframework.core.type.AnnotationMetadata class}
or {@link org.springframework.core.type.MethodMetadata method} being checked
@return {@code true} if the condition matches and the component can be registered,
or {@code false} to veto the annotated component's registration
|
java
|
spring-context/src/main/java/org/springframework/context/annotation/Condition.java
| 59
|
[
"context",
"metadata"
] | true
| 1
| 6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
geoToH3
|
public static long geoToH3(double lat, double lng, int res) {
checkResolution(res);
return Vec3d.geoToH3(res, toRadians(lat), toRadians(lng));
}
|
Find the H3 index of the resolution <code>res</code> cell containing the lat/lon (in degrees)
@param lat Latitude in degrees.
@param lng Longitude in degrees.
@param res Resolution, 0 <= res <= 15
@return The H3 index.
@throws IllegalArgumentException latitude, longitude, or resolution are out of range.
|
java
|
libs/h3/src/main/java/org/elasticsearch/h3/H3.java
| 201
|
[
"lat",
"lng",
"res"
] | true
| 1
| 6.64
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
newReader
|
public static BufferedReader newReader(File file, Charset charset) throws FileNotFoundException {
checkNotNull(file);
checkNotNull(charset);
return new BufferedReader(new InputStreamReader(new FileInputStream(file), charset));
}
|
Returns a buffered reader that reads from a file using the given character set.
<p><b>{@link java.nio.file.Path} equivalent:</b> {@link
java.nio.file.Files#newBufferedReader(java.nio.file.Path, Charset)}.
@param file the file to read from
@param charset the charset used to decode the input stream; see {@link StandardCharsets} for
helpful predefined constants
@return the buffered reader
|
java
|
android/guava/src/com/google/common/io/Files.java
| 86
|
[
"file",
"charset"
] |
BufferedReader
| true
| 1
| 6.4
|
google/guava
| 51,352
|
javadoc
| false
|
nonNull
|
public static <E> Stream<E> nonNull(final E array) {
return nonNull(streamOf(array));
}
|
Streams the non-null element.
@param <E> the type of elements in the collection.
@param array the element to stream or null.
@return A non-null stream that filters out a null element.
@since 3.15.0
|
java
|
src/main/java/org/apache/commons/lang3/stream/Streams.java
| 638
|
[
"array"
] | true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
round
|
def round(self, decimals=0, out=None):
"""
Return each element rounded to the given number of decimals.
Refer to `numpy.around` for full documentation.
See Also
--------
numpy.ndarray.round : corresponding function for ndarrays
numpy.around : equivalent function
Examples
--------
>>> import numpy as np
>>> import numpy.ma as ma
>>> x = ma.array([1.35, 2.5, 1.5, 1.75, 2.25, 2.75],
... mask=[0, 0, 0, 1, 0, 0])
>>> ma.round(x)
masked_array(data=[1.0, 2.0, 2.0, --, 2.0, 3.0],
mask=[False, False, False, True, False, False],
fill_value=1e+20)
"""
result = self._data.round(decimals=decimals, out=out).view(type(self))
if result.ndim > 0:
result._mask = self._mask
result._update_from(self)
elif self._mask:
# Return masked when the scalar is masked
result = masked
# No explicit output: we're done
if out is None:
return result
if isinstance(out, MaskedArray):
out.__setmask__(self._mask)
return out
|
Return each element rounded to the given number of decimals.
Refer to `numpy.around` for full documentation.
See Also
--------
numpy.ndarray.round : corresponding function for ndarrays
numpy.around : equivalent function
Examples
--------
>>> import numpy as np
>>> import numpy.ma as ma
>>> x = ma.array([1.35, 2.5, 1.5, 1.75, 2.25, 2.75],
... mask=[0, 0, 0, 1, 0, 0])
>>> ma.round(x)
masked_array(data=[1.0, 2.0, 2.0, --, 2.0, 3.0],
mask=[False, False, False, True, False, False],
fill_value=1e+20)
|
python
|
numpy/ma/core.py
| 5,568
|
[
"self",
"decimals",
"out"
] | false
| 5
| 6.48
|
numpy/numpy
| 31,054
|
unknown
| false
|
|
createIfAbsentUnchecked
|
public static <K, V> V createIfAbsentUnchecked(final ConcurrentMap<K, V> map,
final K key, final ConcurrentInitializer<V> init) {
try {
return createIfAbsent(map, key, init);
} catch (final ConcurrentException cex) {
throw new ConcurrentRuntimeException(cex.getCause());
}
}
|
Checks if a concurrent map contains a key and creates a corresponding
value if not, suppressing checked exceptions. This method calls
{@code createIfAbsent()}. If a {@link ConcurrentException} is thrown, it
is caught and re-thrown as a {@link ConcurrentRuntimeException}.
@param <K> the type of the keys of the map
@param <V> the type of the values of the map
@param map the map to be modified
@param key the key of the value to be added
@param init the {@link ConcurrentInitializer} for creating the value
@return the value stored in the map after this operation; this may or may
not be the object created by the {@link ConcurrentInitializer}
@throws ConcurrentRuntimeException if the initializer throws an exception
|
java
|
src/main/java/org/apache/commons/lang3/concurrent/ConcurrentUtils.java
| 179
|
[
"map",
"key",
"init"
] |
V
| true
| 2
| 7.92
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
writeHeader
|
private void writeHeader() throws IOException {
ByteUtils.writeUnsignedIntLE(buffer, 0, MAGIC);
bufferOffset = 4;
buffer[bufferOffset++] = flg.toByte();
buffer[bufferOffset++] = bd.toByte();
// TODO write uncompressed content size, update flg.validate()
// compute checksum on all descriptor fields
int offset = 4;
int len = bufferOffset - offset;
if (this.useBrokenFlagDescriptorChecksum) {
len += offset;
offset = 0;
}
byte hash = (byte) ((checksum.hash(buffer, offset, len, 0) >> 8) & 0xFF);
buffer[bufferOffset++] = hash;
// write out frame descriptor
out.write(buffer, 0, bufferOffset);
bufferOffset = 0;
}
|
Writes the magic number and frame descriptor to the underlying {@link OutputStream}.
@throws IOException
|
java
|
clients/src/main/java/org/apache/kafka/common/compress/Lz4BlockOutputStream.java
| 120
|
[] |
void
| true
| 2
| 6.4
|
apache/kafka
| 31,560
|
javadoc
| false
|
unicodeEscaped
|
public static String unicodeEscaped(final Character ch) {
return ch != null ? unicodeEscaped(ch.charValue()) : null;
}
|
Converts the string to the Unicode format '\u0020'.
<p>This format is the Java source code format.</p>
<p>If {@code null} is passed in, {@code null} will be returned.</p>
<pre>
CharUtils.unicodeEscaped(null) = null
CharUtils.unicodeEscaped(' ') = "\u0020"
CharUtils.unicodeEscaped('A') = "\u0041"
</pre>
@param ch the character to convert, may be null
@return the escaped Unicode string, null if null input
|
java
|
src/main/java/org/apache/commons/lang3/CharUtils.java
| 534
|
[
"ch"
] |
String
| true
| 2
| 7.68
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
executeQueryRaw
|
async function executeQueryRaw(executor: Statements, params: SqlQuery): Promise<SqlResultSet> {
const { sql, args } = params
try {
const convertedArgs = convertArgs(args, params.argTypes)
const result = await executor.query(sql, ...convertedArgs)
// Collect all rows - the driver adapter interface requires synchronous access
const rows = await result.rows.collect()
// Map columns with type information
const columnNames = result.columns.map((col) => col.name)
const columnTypes = result.columns.map((col) => fieldToColumnType(col.oid))
return {
columnNames,
columnTypes,
rows: rows.map((row) => row.values),
}
} catch (error) {
throw convertDriverError(error)
}
}
|
Executes a raw SQL query and returns the result set.
|
typescript
|
packages/adapter-ppg/src/ppg.ts
| 205
|
[
"executor",
"params"
] | true
| 2
| 6
|
prisma/prisma
| 44,834
|
jsdoc
| true
|
|
runKernel
|
int runKernel( InputArray _m1, InputArray _m2, OutputArray _model ) const CV_OVERRIDE
{
Mat m1 = _m1.getMat(), m2 = _m2.getMat();
int i, count = m1.checkVector(2);
const Point2f* M = m1.ptr<Point2f>();
const Point2f* m = m2.ptr<Point2f>();
double LtL[9][9], W[9][1], V[9][9];
Mat _LtL( 9, 9, CV_64F, &LtL[0][0] );
Mat matW( 9, 1, CV_64F, W );
Mat matV( 9, 9, CV_64F, V );
Mat _H0( 3, 3, CV_64F, V[8] );
Mat _Htemp( 3, 3, CV_64F, V[7] );
Point2d cM(0,0), cm(0,0), sM(0,0), sm(0,0);
for( i = 0; i < count; i++ )
{
cm.x += m[i].x; cm.y += m[i].y;
cM.x += M[i].x; cM.y += M[i].y;
}
cm.x /= count;
cm.y /= count;
cM.x /= count;
cM.y /= count;
for( i = 0; i < count; i++ )
{
sm.x += fabs(m[i].x - cm.x);
sm.y += fabs(m[i].y - cm.y);
sM.x += fabs(M[i].x - cM.x);
sM.y += fabs(M[i].y - cM.y);
}
if( fabs(sm.x) < DBL_EPSILON || fabs(sm.y) < DBL_EPSILON ||
fabs(sM.x) < DBL_EPSILON || fabs(sM.y) < DBL_EPSILON )
return 0;
sm.x = count/sm.x; sm.y = count/sm.y;
sM.x = count/sM.x; sM.y = count/sM.y;
double invHnorm[9] = { 1./sm.x, 0, cm.x, 0, 1./sm.y, cm.y, 0, 0, 1 };
double Hnorm2[9] = { sM.x, 0, -cM.x*sM.x, 0, sM.y, -cM.y*sM.y, 0, 0, 1 };
Mat _invHnorm( 3, 3, CV_64FC1, invHnorm );
Mat _Hnorm2( 3, 3, CV_64FC1, Hnorm2 );
_LtL.setTo(Scalar::all(0));
for( i = 0; i < count; i++ )
{
double x = (m[i].x - cm.x)*sm.x, y = (m[i].y - cm.y)*sm.y;
double X = (M[i].x - cM.x)*sM.x, Y = (M[i].y - cM.y)*sM.y;
double Lx[] = { X, Y, 1, 0, 0, 0, -x*X, -x*Y, -x };
double Ly[] = { 0, 0, 0, X, Y, 1, -y*X, -y*Y, -y };
int j, k;
for( j = 0; j < 9; j++ )
for( k = j; k < 9; k++ )
LtL[j][k] += Lx[j]*Lx[k] + Ly[j]*Ly[k];
}
completeSymm( _LtL );
eigen( _LtL, matW, matV );
_Htemp = _invHnorm*_H0;
_H0 = _Htemp*_Hnorm2;
_H0.convertTo(_model, _H0.type(), scaleFor(_H0.at<double>(2,2)));
return 1;
}
|
Normalization method:
- $x$ and $y$ coordinates are normalized independently
- first the coordinates are shifted so that the average coordinate is \f$(0,0)\f$
- then the coordinates are scaled so that the average L1 norm is 1, i.e,
the average L1 norm of the \f$x\f$ coordinates is 1 and the average
L1 norm of the \f$y\f$ coordinates is also 1.
@param _m1 source points containing (X,Y), depth is CV_32F with 1 column 2 channels or
2 columns 1 channel
@param _m2 destination points containing (x,y), depth is CV_32F with 1 column 2 channels or
2 columns 1 channel
@param _model CV_64FC1, 3x3, normalized, i.e., the last element is 1
|
cpp
|
modules/calib3d/src/fundam.cpp
| 125
|
[
"_m1",
"_m2",
"_model"
] | true
| 10
| 6.4
|
opencv/opencv
| 85,374
|
doxygen
| false
|
|
validate_parse_dates_presence
|
def validate_parse_dates_presence(
parse_dates: bool | list, columns: Sequence[Hashable]
) -> set:
"""
Check if parse_dates are in columns.
If user has provided names for parse_dates, check if those columns
are available.
Parameters
----------
columns : list
List of names of the dataframe.
Returns
-------
The names of the columns which will get parsed later if a list
is given as specification.
Raises
------
ValueError
If column to parse_date is not in dataframe.
"""
if not isinstance(parse_dates, list):
return set()
missing = set()
unique_cols = set()
for col in parse_dates:
if isinstance(col, str):
if col not in columns:
missing.add(col)
else:
unique_cols.add(col)
elif col in columns:
unique_cols.add(col)
else:
unique_cols.add(columns[col])
if missing:
missing_cols = ", ".join(sorted(missing))
raise ValueError(f"Missing column provided to 'parse_dates': '{missing_cols}'")
return unique_cols
|
Check if parse_dates are in columns.
If user has provided names for parse_dates, check if those columns
are available.
Parameters
----------
columns : list
List of names of the dataframe.
Returns
-------
The names of the columns which will get parsed later if a list
is given as specification.
Raises
------
ValueError
If column to parse_date is not in dataframe.
|
python
|
pandas/io/parsers/base_parser.py
| 875
|
[
"parse_dates",
"columns"
] |
set
| true
| 9
| 6.88
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
Program
|
Program(
const Context& context,
const VECTOR_CLASS<Device>& devices,
const STRING_CLASS& kernelNames,
cl_int* err = NULL)
{
cl_int error;
::size_t numDevices = devices.size();
cl_device_id* deviceIDs = (cl_device_id*) alloca(numDevices * sizeof(cl_device_id));
for( ::size_t deviceIndex = 0; deviceIndex < numDevices; ++deviceIndex ) {
deviceIDs[deviceIndex] = (devices[deviceIndex])();
}
object_ = ::clCreateProgramWithBuiltInKernels(
context(),
(cl_uint) devices.size(),
deviceIDs,
kernelNames.c_str(),
&error);
detail::errHandler(error, __CREATE_PROGRAM_WITH_BUILT_IN_KERNELS_ERR);
if (err != NULL) {
*err = error;
}
}
|
Create program using builtin kernels.
\param kernelNames Semi-colon separated list of builtin kernel names
|
cpp
|
3rdparty/include/opencl/1.2/CL/cl.hpp
| 4,779
|
[] | true
| 3
| 6.24
|
opencv/opencv
| 85,374
|
doxygen
| false
|
|
_set_categories
|
def _set_categories(self, categories, fastpath: bool = False) -> None:
"""
Sets new categories inplace
Parameters
----------
fastpath : bool, default False
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = pd.Categorical(["a", "b"])
>>> c
['a', 'b']
Categories (2, str): ['a', 'b']
>>> c._set_categories(pd.Index(["a", "c"]))
>>> c
['a', 'c']
Categories (2, str): ['a', 'c']
"""
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories, self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (
not fastpath
and self.dtype.categories is not None
and len(new_dtype.categories) != len(self.dtype.categories)
):
raise ValueError(
"new categories need to have the same number of "
"items as the old categories!"
)
super().__init__(self._ndarray, new_dtype)
|
Sets new categories inplace
Parameters
----------
fastpath : bool, default False
Don't perform validation of the categories for uniqueness or nulls
Examples
--------
>>> c = pd.Categorical(["a", "b"])
>>> c
['a', 'b']
Categories (2, str): ['a', 'b']
>>> c._set_categories(pd.Index(["a", "c"]))
>>> c
['a', 'c']
Categories (2, str): ['a', 'c']
|
python
|
pandas/core/arrays/categorical.py
| 939
|
[
"self",
"categories",
"fastpath"
] |
None
| true
| 6
| 7.52
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
log_event_end
|
def log_event_end(
self,
event_name: str,
time_ns: int,
metadata: dict[str, Any],
start_time_ns: int,
log_pt2_compile_event: bool,
compile_id: Optional[CompileId] = None,
) -> None:
"""
Logs the end of a single event. This function should only be
called after log_event_start with the same event_name.
:param event_name: Name of event to appear in trace
:param time_ns: Timestamp in nanoseconds
:param metadata: Any extra metadata associated with this event
:param start_time_ns: The start time timestamp in nanoseconds
:param log_pt_compile_event: If True, log to pt2_compile_events
:param compile_id: Explicit compile_id (rather than using the current context)
"""
compile_id = compile_id or torch._guards.CompileContext.current_compile_id()
metadata["compile_id"] = str(compile_id)
# Grab metadata collected during event span
all_event_data = self.get_event_data()
if event_name in all_event_data:
event_metadata = all_event_data[event_name]
del all_event_data[event_name]
else:
event_metadata = {}
# Add the passed in metadata
event_metadata.update(metadata)
event = self._log_timed_event(
event_name,
time_ns,
"E",
event_metadata,
)
def pop_stack(stack: list[str]) -> None:
while event_name != stack[-1]:
# If the event isn't the most recent one to end, pop
# off the stack until it is.
# Since event_name in self.stack, this pop is always safe
log.warning(
"ChromiumEventLogger: Detected overlapping events, fixing stack"
)
stack.pop()
event_stack = self.get_stack()
# These stack health checks currently never happen,
# but they're written this way to future proof any weird event
# overlaps in the future.
if event_name not in event_stack:
# Something went wrong, we never called start on this event,
# or it was skipped due to overlapping events below
log.warning("ChromiumEventLogger: Start event not in stack, ignoring")
return
pop_stack(event_stack)
if log_pt2_compile_event:
pt2_compile_substack = self.get_pt2_compile_substack()
pop_stack(pt2_compile_substack)
log_chromium_event_internal(
event, pt2_compile_substack, self.id_, start_time_ns
)
# Pop actual event off of stack
pt2_compile_substack.pop()
# Finally pop the actual event off the stack
event_stack.pop()
|
Logs the end of a single event. This function should only be
called after log_event_start with the same event_name.
:param event_name: Name of event to appear in trace
:param time_ns: Timestamp in nanoseconds
:param metadata: Any extra metadata associated with this event
:param start_time_ns: The start time timestamp in nanoseconds
:param log_pt_compile_event: If True, log to pt2_compile_events
:param compile_id: Explicit compile_id (rather than using the current context)
|
python
|
torch/_dynamo/utils.py
| 1,960
|
[
"self",
"event_name",
"time_ns",
"metadata",
"start_time_ns",
"log_pt2_compile_event",
"compile_id"
] |
None
| true
| 7
| 6.64
|
pytorch/pytorch
| 96,034
|
sphinx
| false
|
max
|
@ParametricNullness
public static <T extends @Nullable Object> T max(
@ParametricNullness T a, @ParametricNullness T b, Comparator<? super T> comparator) {
return (comparator.compare(a, b) >= 0) ? a : b;
}
|
Returns the maximum of the two values, according to the given comparator. If the values compare
as equal, the first is returned.
<p>The recommended solution for finding the {@code maximum} of some values depends on the type
of your data and the number of elements you have. Read more in the Guava User Guide article on
<a href="https://github.com/google/guava/wiki/CollectionUtilitiesExplained#comparators">{@code
Comparators}</a>.
@param a first value to compare, returned if greater than or equal to b.
@param b second value to compare.
@throws ClassCastException if the parameters are not <i>mutually comparable</i> using the given
comparator.
@since 30.0
|
java
|
android/guava/src/com/google/common/collect/Comparators.java
| 279
|
[
"a",
"b",
"comparator"
] |
T
| true
| 2
| 6.4
|
google/guava
| 51,352
|
javadoc
| false
|
difference
|
public static String difference(final String str1, final String str2) {
if (str1 == null) {
return str2;
}
if (str2 == null) {
return str1;
}
final int at = indexOfDifference(str1, str2);
if (at == INDEX_NOT_FOUND) {
return EMPTY;
}
return str2.substring(at);
}
|
Compares two Strings, and returns the portion where they differ. More precisely, return the remainder of the second String, starting from where it's
different from the first. This means that the difference between "abc" and "ab" is the empty String and not "c".
<p>
For example, {@code difference("i am a machine", "i am a robot") -> "robot"}.
</p>
<pre>
StringUtils.difference(null, null) = null
StringUtils.difference("", "") = ""
StringUtils.difference("", "abc") = "abc"
StringUtils.difference("abc", "") = ""
StringUtils.difference("abc", "abc") = ""
StringUtils.difference("abc", "ab") = ""
StringUtils.difference("ab", "abxyz") = "xyz"
StringUtils.difference("abcde", "abxyz") = "xyz"
StringUtils.difference("abcde", "xyz") = "xyz"
</pre>
@param str1 the first String, may be null.
@param str2 the second String, may be null.
@return the portion of str2 where it differs from str1; returns the empty String if they are equal.
@see #indexOfDifference(CharSequence,CharSequence)
@since 2.0
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 1,664
|
[
"str1",
"str2"
] |
String
| true
| 4
| 7.6
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
undefinedDependentConfigs
|
private List<String> undefinedDependentConfigs() {
Set<String> undefinedConfigKeys = new HashSet<>();
for (ConfigKey configKey : configKeys.values()) {
for (String dependent: configKey.dependents) {
if (!configKeys.containsKey(dependent)) {
undefinedConfigKeys.add(dependent);
}
}
}
return new ArrayList<>(undefinedConfigKeys);
}
|
Validate the current configuration values with the configuration definition.
@param props the current configuration values
@return List of Config, each Config contains the updated configuration information given
the current configuration values.
|
java
|
clients/src/main/java/org/apache/kafka/common/config/ConfigDef.java
| 603
|
[] | true
| 2
| 7.44
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
getCandidateConfigurations
|
protected List<String> getCandidateConfigurations(AnnotationMetadata metadata,
@Nullable AnnotationAttributes attributes) {
ImportCandidates importCandidates = ImportCandidates.load(this.autoConfigurationAnnotation,
getBeanClassLoader());
List<String> configurations = importCandidates.getCandidates();
Assert.state(!CollectionUtils.isEmpty(configurations),
"No auto configuration classes found in " + "META-INF/spring/"
+ this.autoConfigurationAnnotation.getName() + ".imports. If you "
+ "are using a custom packaging, make sure that file is correct.");
return configurations;
}
|
Return the auto-configuration class names that should be considered. By default,
this method will load candidates using {@link ImportCandidates}.
@param metadata the source metadata
@param attributes the {@link #getAttributes(AnnotationMetadata) annotation
attributes}
@return a list of candidate configurations
|
java
|
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/AutoConfigurationImportSelector.java
| 200
|
[
"metadata",
"attributes"
] | true
| 1
| 6.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
_versioned_config
|
def _versioned_config(
jk_name: str,
this_version: int,
oss_default: bool,
env_var_override: str | None = None,
) -> bool:
"""
A versioned configuration utility that determines boolean settings based on:
1. Environment variable override (highest priority)
2. JustKnobs version comparison in fbcode environments
3. OSS default fallback
This function enables gradual rollouts of features in fbcode by comparing
a local version against a JustKnobs-controlled remote version, while
allowing environment variable overrides for testing and OSS defaults
for non-fbcode environments.
Args:
jk_name: JustKnobs key name (e.g., "pytorch/inductor:feature_version")
this_version: Local version number to compare against JustKnobs version
oss_default: Default value to use in non-fbcode environments
env_var_override: Optional environment variable name that, when set,
overrides all other logic
Returns:
bool: Configuration value determined by the priority order above
"""
if (
env_var_override
and (env_var_value := os.environ.get(env_var_override)) is not None
):
return env_var_value == "1"
elif is_fbcode():
# this method returns 0 on failure, which we should check for specifically.
# in the case of JK failure, the safe bet is to simply disable the config
jk_version: int = torch._utils_internal.justknobs_getval_int(jk_name)
return (this_version >= jk_version) and (jk_version != 0)
return oss_default
|
A versioned configuration utility that determines boolean settings based on:
1. Environment variable override (highest priority)
2. JustKnobs version comparison in fbcode environments
3. OSS default fallback
This function enables gradual rollouts of features in fbcode by comparing
a local version against a JustKnobs-controlled remote version, while
allowing environment variable overrides for testing and OSS defaults
for non-fbcode environments.
Args:
jk_name: JustKnobs key name (e.g., "pytorch/inductor:feature_version")
this_version: Local version number to compare against JustKnobs version
oss_default: Default value to use in non-fbcode environments
env_var_override: Optional environment variable name that, when set,
overrides all other logic
Returns:
bool: Configuration value determined by the priority order above
|
python
|
torch/_inductor/runtime/caching/config.py
| 17
|
[
"jk_name",
"this_version",
"oss_default",
"env_var_override"
] |
bool
| true
| 5
| 7.6
|
pytorch/pytorch
| 96,034
|
google
| false
|
compile_time_record_function
|
def compile_time_record_function(name: str) -> Generator[Any, None, None]:
"""
A context manager for compile-time profiling that uses _RecordFunctionFast
for lower overhead than torch.profiler.record_function.
This is intended for use during compilation (dynamo, inductor, etc.) where
we want profiling support but with minimal overhead. Moreover, we do not
want the record_function call inside torch.compile to be dispatched.
Args:
name: The name of the record function event that will appear in profiles.
"""
if torch.autograd.profiler._is_profiler_enabled:
rf = torch._C._profiler._RecordFunctionFast(name)
rf.__enter__()
try:
yield
finally:
rf.__exit__(None, None, None)
else:
yield
|
A context manager for compile-time profiling that uses _RecordFunctionFast
for lower overhead than torch.profiler.record_function.
This is intended for use during compilation (dynamo, inductor, etc.) where
we want profiling support but with minimal overhead. Moreover, we do not
want the record_function call inside torch.compile to be dispatched.
Args:
name: The name of the record function event that will appear in profiles.
|
python
|
torch/_dynamo/utils.py
| 669
|
[
"name"
] |
Generator[Any, None, None]
| true
| 3
| 6.24
|
pytorch/pytorch
| 96,034
|
google
| false
|
instantiate
|
private Object instantiate(RegisteredBean registeredBean, Executable executable, @Nullable Object[] args) {
if (executable instanceof Constructor<?> constructor) {
if (registeredBean.getBeanFactory() instanceof DefaultListableBeanFactory dlbf &&
registeredBean.getMergedBeanDefinition().hasMethodOverrides()) {
return dlbf.getInstantiationStrategy().instantiate(registeredBean.getMergedBeanDefinition(),
registeredBean.getBeanName(), registeredBean.getBeanFactory());
}
return BeanUtils.instantiateClass(constructor, args);
}
if (executable instanceof Method method) {
Object target = null;
String factoryBeanName = registeredBean.getMergedBeanDefinition().getFactoryBeanName();
if (factoryBeanName != null) {
target = registeredBean.getBeanFactory().getBean(factoryBeanName, method.getDeclaringClass());
}
else if (!Modifier.isStatic(method.getModifiers())) {
throw new IllegalStateException("Cannot invoke instance method without factoryBeanName: " + method);
}
try {
ReflectionUtils.makeAccessible(method);
return method.invoke(target, args);
}
catch (Throwable ex) {
throw new BeanInstantiationException(method, ex.getMessage(), ex);
}
}
throw new IllegalStateException("Unsupported executable " + executable.getClass().getName());
}
|
Resolve arguments for the specified registered bean.
@param registeredBean the registered bean
@return the resolved constructor or factory method arguments
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/aot/BeanInstanceSupplier.java
| 345
|
[
"registeredBean",
"executable",
"args"
] |
Object
| true
| 8
| 7.28
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
isReusableVariableDeclaration
|
function isReusableVariableDeclaration(node: Node) {
if (node.kind !== SyntaxKind.VariableDeclaration) {
return false;
}
// Very subtle incremental parsing bug. Consider the following code:
//
// let v = new List < A, B
//
// This is actually legal code. It's a list of variable declarators "v = new List<A"
// on one side and "B" on the other. If you then change that to:
//
// let v = new List < A, B >()
//
// then we have a problem. "v = new List<A" doesn't intersect the change range, so we
// start reparsing at "B" and we completely fail to handle this properly.
//
// In order to prevent this, we do not allow a variable declarator to be reused if it
// has an initializer.
const variableDeclarator = node as VariableDeclaration;
return variableDeclarator.initializer === undefined;
}
|
Reports a diagnostic error for the current token being an invalid name.
@param blankDiagnostic Diagnostic to report for the case of the name being blank (matched tokenIfBlankName).
@param nameDiagnostic Diagnostic to report for all other cases.
@param tokenIfBlankName Current token if the name was invalid for being blank (not provided / skipped).
|
typescript
|
src/compiler/parser.ts
| 3,376
|
[
"node"
] | false
| 2
| 6.08
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
addFormatters
|
private void addFormatters(DateTimeFormatters dateTimeFormatters) {
addFormatterForFieldAnnotation(new NumberFormatAnnotationFormatterFactory());
if (JSR_354_PRESENT) {
addFormatter(new CurrencyUnitFormatter());
addFormatter(new MonetaryAmountFormatter());
addFormatterForFieldAnnotation(new Jsr354NumberFormatAnnotationFormatterFactory());
}
registerJsr310(dateTimeFormatters);
registerJavaDate(dateTimeFormatters);
}
|
Create a new WebConversionService that configures formatters with the provided
date, time, and date-time formats, or registers the default if no custom format is
provided.
@param dateTimeFormatters the formatters to use for date, time, and date-time
formatting
@since 2.3.0
|
java
|
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/web/format/WebConversionService.java
| 65
|
[
"dateTimeFormatters"
] |
void
| true
| 2
| 6.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.