function_name
stringlengths 1
57
| function_code
stringlengths 20
4.99k
| documentation
stringlengths 50
2k
| language
stringclasses 5
values | file_path
stringlengths 8
166
| line_number
int32 4
16.7k
| parameters
listlengths 0
20
| return_type
stringlengths 0
131
| has_type_hints
bool 2
classes | complexity
int32 1
51
| quality_score
float32 6
9.68
| repo_name
stringclasses 34
values | repo_stars
int32 2.9k
242k
| docstring_style
stringclasses 7
values | is_async
bool 2
classes |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
apply
|
public static <O1, O2, O, T extends Throwable> O apply(final FailableBiFunction<O1, O2, O, T> function,
final O1 input1, final O2 input2) {
return get(() -> function.apply(input1, input2));
}
|
Applies a function and rethrows any exception as a {@link RuntimeException}.
@param function the function to apply
@param input1 the first input to apply {@code function} on
@param input2 the second input to apply {@code function} on
@param <O1> the type of the first argument the function accepts
@param <O2> the type of the second argument the function accepts
@param <O> the return type of the function
@param <T> the type of checked exception the function may throw
@return the value returned from the function
|
java
|
src/main/java/org/apache/commons/lang3/Functions.java
| 324
|
[
"function",
"input1",
"input2"
] |
O
| true
| 1
| 6.48
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
trimMatcher
|
public static StrMatcher trimMatcher() {
return TRIM_MATCHER;
}
|
Gets the matcher to String trim() whitespace characters.
@return the trim matcher.
|
java
|
src/main/java/org/apache/commons/lang3/text/StrMatcher.java
| 372
|
[] |
StrMatcher
| true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
apply
|
R apply(long input) throws E;
|
Applies this function.
@param input the input for the function
@return the result of the function
@throws E Thrown when the function fails.
|
java
|
src/main/java/org/apache/commons/lang3/function/FailableLongFunction.java
| 55
|
[
"input"
] |
R
| true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
isImplicitlyExcluded
|
private boolean isImplicitlyExcluded(RegisteredBean registeredBean) {
if (Boolean.TRUE.equals(registeredBean.getMergedBeanDefinition()
.getAttribute(BeanRegistrationAotProcessor.IGNORE_REGISTRATION_ATTRIBUTE))) {
return true;
}
Class<?> beanClass = registeredBean.getBeanClass();
if (BeanFactoryInitializationAotProcessor.class.isAssignableFrom(beanClass)) {
return true;
}
if (BeanRegistrationAotProcessor.class.isAssignableFrom(beanClass)) {
BeanRegistrationAotProcessor processor = this.aotProcessors.findByBeanName(registeredBean.getBeanName());
return (processor == null || processor.isBeanExcludedFromAotProcessing());
}
return false;
}
|
Return a {@link BeanDefinitionMethodGenerator} for the given
{@link RegisteredBean} or {@code null} if the registered bean is excluded
by a {@link BeanRegistrationExcludeFilter}. The resulting
{@link BeanDefinitionMethodGenerator} will include all
{@link BeanRegistrationAotProcessor} provided contributions.
@param registeredBean the registered bean
@return a new {@link BeanDefinitionMethodGenerator} instance or {@code null}
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/aot/BeanDefinitionMethodGeneratorFactory.java
| 133
|
[
"registeredBean"
] | true
| 5
| 7.12
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
_with_list_option
|
def _with_list_option(self, key):
"""Gets the value at the given self.options[key] as a list.
If the value is not a list, it will be converted to one and saved in self.options.
If the key does not exist, an empty list will be set and returned instead.
Arguments:
key (str): The key to get the value for.
Returns:
List: The value at the given key as a list or an empty list if the key does not exist.
"""
items = self.options.setdefault(key, [])
if not isinstance(items, MutableSequence):
items = self.options[key] = [items]
return items
|
Gets the value at the given self.options[key] as a list.
If the value is not a list, it will be converted to one and saved in self.options.
If the key does not exist, an empty list will be set and returned instead.
Arguments:
key (str): The key to get the value for.
Returns:
List: The value at the given key as a list or an empty list if the key does not exist.
|
python
|
celery/canvas.py
| 684
|
[
"self",
"key"
] | false
| 2
| 7.12
|
celery/celery
| 27,741
|
google
| false
|
|
firePendingCompletedRequests
|
private void firePendingCompletedRequests() {
boolean completedRequestsFired = false;
for (;;) {
RequestFutureCompletionHandler completionHandler = pendingCompletion.poll();
if (completionHandler == null)
break;
completionHandler.fireCompletion();
completedRequestsFired = true;
}
// wakeup the client in case it is blocking in poll for this future's completion
if (completedRequestsFired)
client.wakeup();
}
|
Check whether there is pending request. This includes both requests that
have been transmitted (i.e. in-flight requests) and those which are awaiting transmission.
@return A boolean indicating whether there is pending request
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkClient.java
| 422
|
[] |
void
| true
| 4
| 8.08
|
apache/kafka
| 31,560
|
javadoc
| false
|
_get_names
|
def _get_names(self) -> FrozenList:
"""
Get names on index.
This method returns a FrozenList containing the names of the object.
It's primarily intended for internal use.
Returns
-------
FrozenList
A FrozenList containing the object's names, contains None if the object
does not have a name.
See Also
--------
Index.name : Index name as a string, or None for MultiIndex.
Examples
--------
>>> idx = pd.Index([1, 2, 3], name="x")
>>> idx.names
FrozenList(['x'])
>>> idx = pd.Index([1, 2, 3], name=("x", "y"))
>>> idx.names
FrozenList([('x', 'y')])
If the index does not have a name set:
>>> idx = pd.Index([1, 2, 3])
>>> idx.names
FrozenList([None])
"""
return FrozenList((self.name,))
|
Get names on index.
This method returns a FrozenList containing the names of the object.
It's primarily intended for internal use.
Returns
-------
FrozenList
A FrozenList containing the object's names, contains None if the object
does not have a name.
See Also
--------
Index.name : Index name as a string, or None for MultiIndex.
Examples
--------
>>> idx = pd.Index([1, 2, 3], name="x")
>>> idx.names
FrozenList(['x'])
>>> idx = pd.Index([1, 2, 3], name=("x", "y"))
>>> idx.names
FrozenList([('x', 'y')])
If the index does not have a name set:
>>> idx = pd.Index([1, 2, 3])
>>> idx.names
FrozenList([None])
|
python
|
pandas/core/indexes/base.py
| 1,885
|
[
"self"
] |
FrozenList
| true
| 1
| 6.08
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
isValidHeritageClauseObjectLiteral
|
function isValidHeritageClauseObjectLiteral() {
Debug.assert(token() === SyntaxKind.OpenBraceToken);
if (nextToken() === SyntaxKind.CloseBraceToken) {
// if we see "extends {}" then only treat the {} as what we're extending (and not
// the class body) if we have:
//
// extends {} {
// extends {},
// extends {} extends
// extends {} implements
const next = nextToken();
return next === SyntaxKind.CommaToken || next === SyntaxKind.OpenBraceToken || next === SyntaxKind.ExtendsKeyword || next === SyntaxKind.ImplementsKeyword;
}
return true;
}
|
Reports a diagnostic error for the current token being an invalid name.
@param blankDiagnostic Diagnostic to report for the case of the name being blank (matched tokenIfBlankName).
@param nameDiagnostic Diagnostic to report for all other cases.
@param tokenIfBlankName Current token if the name was invalid for being blank (not provided / skipped).
|
typescript
|
src/compiler/parser.ts
| 2,945
|
[] | false
| 5
| 6.08
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
castFixed
|
function castFixed(name, func, n) {
if (config.fixed && (forceFixed || !mapping.skipFixed[name])) {
var data = mapping.methodSpread[name],
start = data && data.start;
return start === undefined ? ary(func, n) : flatSpread(func, start);
}
return func;
}
|
Casts `func` to a fixed arity function if needed.
@private
@param {string} name The name of the function to inspect.
@param {Function} func The function to inspect.
@param {number} n The arity cap.
@returns {Function} Returns the cast function.
|
javascript
|
fp/_baseConvert.js
| 315
|
[
"name",
"func",
"n"
] | false
| 6
| 6.24
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
enableAspectJWeaving
|
public static void enableAspectJWeaving(
@Nullable LoadTimeWeaver weaverToUse, @Nullable ClassLoader beanClassLoader) {
if (weaverToUse == null) {
if (InstrumentationLoadTimeWeaver.isInstrumentationAvailable()) {
weaverToUse = new InstrumentationLoadTimeWeaver(beanClassLoader);
}
else {
throw new IllegalStateException("No LoadTimeWeaver available");
}
}
weaverToUse.addTransformer(
new AspectJClassBypassingClassFileTransformer(new ClassPreProcessorAgentAdapter()));
}
|
Enable AspectJ weaving with the given {@link LoadTimeWeaver}.
@param weaverToUse the LoadTimeWeaver to apply to (or {@code null} for a default weaver)
@param beanClassLoader the class loader to create a default weaver for (if necessary)
|
java
|
spring-context/src/main/java/org/springframework/context/weaving/AspectJWeavingEnabler.java
| 84
|
[
"weaverToUse",
"beanClassLoader"
] |
void
| true
| 3
| 6.56
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
getJsDocTagsOfDeclarations
|
function getJsDocTagsOfDeclarations(declarations: Declaration[] | undefined, checker: TypeChecker | undefined): JSDocTagInfo[] {
if (!declarations) return emptyArray;
let tags = JsDoc.getJsDocTagsFromDeclarations(declarations, checker);
if (checker && (tags.length === 0 || declarations.some(hasJSDocInheritDocTag))) {
const seenSymbols = new Set<Symbol>();
for (const declaration of declarations) {
const inheritedTags = findBaseOfDeclaration(checker, declaration, symbol => {
if (!seenSymbols.has(symbol)) {
seenSymbols.add(symbol);
if (declaration.kind === SyntaxKind.GetAccessor || declaration.kind === SyntaxKind.SetAccessor) {
return symbol.getContextualJsDocTags(declaration, checker);
}
return symbol.declarations?.length === 1 ? symbol.getJsDocTags(checker) : undefined;
}
});
if (inheritedTags) {
tags = [...inheritedTags, ...tags];
}
}
}
return tags;
}
|
Returns whether or not the given node has a JSDoc "inheritDoc" tag on it.
@param node the Node in question.
@returns `true` if `node` has a JSDoc "inheritDoc" tag on it, otherwise `false`.
|
typescript
|
src/services/services.ts
| 1,004
|
[
"declarations",
"checker"
] | true
| 10
| 8.08
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
drainEvents
|
public List<BackgroundEvent> drainEvents() {
List<BackgroundEvent> events = new ArrayList<>();
backgroundEventQueue.drainTo(events);
asyncConsumerMetrics.recordBackgroundEventQueueSize(0);
return events;
}
|
Drain all the {@link BackgroundEvent events} from the handler.
@return A list of {@link BackgroundEvent events} that were drained
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/BackgroundEventHandler.java
| 65
|
[] | true
| 1
| 6.72
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
tryAppendForSplit
|
private boolean tryAppendForSplit(long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers, Thunk thunk) {
if (!recordsBuilder.hasRoomFor(timestamp, key, value, headers)) {
return false;
} else {
// No need to get the CRC.
this.recordsBuilder.append(timestamp, key, value, headers);
this.maxRecordSize = Math.max(this.maxRecordSize, AbstractRecords.estimateSizeInBytesUpperBound(magic(),
recordsBuilder.compression().type(), key, value, headers));
FutureRecordMetadata future = new FutureRecordMetadata(this.produceFuture, this.recordCount,
timestamp,
key == null ? -1 : key.remaining(),
value == null ? -1 : value.remaining(),
Time.SYSTEM);
// Chain the future to the original thunk.
thunk.future.chain(future);
this.thunks.add(thunk);
this.recordCount++;
return true;
}
}
|
This method is only used by {@link #split(int)} when splitting a large batch to smaller ones.
@return true if the record has been successfully appended, false otherwise.
|
java
|
clients/src/main/java/org/apache/kafka/clients/producer/internals/ProducerBatch.java
| 169
|
[
"timestamp",
"key",
"value",
"headers",
"thunk"
] | true
| 4
| 7.2
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
jsdocTreatAsExported
|
function jsdocTreatAsExported(node: Node) {
if (node.parent && isModuleDeclaration(node)) {
node = node.parent;
}
if (!isJSDocTypeAlias(node)) return false;
// jsdoc typedef handling is a bit of a doozy, but to summarize, treat the typedef as exported if:
// 1. It has an explicit name (since by default typedefs are always directly exported, either at the top level or in a container), or
if (!isJSDocEnumTag(node) && !!node.fullName) return true;
// 2. The thing a nameless typedef pulls its name from is implicitly a direct export (either by assignment or actual export flag).
const declName = getNameOfDeclaration(node);
if (!declName) return false;
if (isPropertyAccessEntityNameExpression(declName.parent) && isTopLevelNamespaceAssignment(declName.parent)) return true;
if (isDeclaration(declName.parent) && getCombinedModifierFlags(declName.parent) & ModifierFlags.Export) return true;
// This could potentially be simplified by having `delayedBindJSDocTypedefTag` pass in an override for `hasExportModifier`, since it should
// already have calculated and branched on most of this.
return false;
}
|
Declares a Symbol for the node and adds it to symbols. Reports errors for conflicting identifier names.
@param symbolTable - The symbol table which node will be added to.
@param parent - node's parent declaration.
@param node - The declaration to be added to the symbol table
@param includes - The SymbolFlags that node has in addition to its declaration type (eg: export, ambient, etc.)
@param excludes - The flags which node cannot be declared alongside in a symbol table. Used to report forbidden declarations.
|
typescript
|
src/compiler/binder.ts
| 931
|
[
"node"
] | false
| 11
| 6.08
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
appendln
|
public StrBuilder appendln(final String format, final Object... objs) {
return append(format, objs).appendNewLine();
}
|
Calls {@link String#format(String, Object...)} and appends the result.
@param format the format string
@param objs the objects to use in the format string
@return {@code this} to enable chaining
@see String#format(String, Object...)
@since 3.2
|
java
|
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
| 1,092
|
[
"format"
] |
StrBuilder
| true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
get_dag
|
def get_dag(self) -> SerializedDAG:
"""
Return the Dag associated with this DagRun.
:return: DAG
"""
if not self.dag:
raise AirflowException(f"The DAG (.dag) for {self} needs to be set")
return self.dag
|
Return the Dag associated with this DagRun.
:return: DAG
|
python
|
airflow-core/src/airflow/models/dagrun.py
| 932
|
[
"self"
] |
SerializedDAG
| true
| 2
| 6.56
|
apache/airflow
| 43,597
|
unknown
| false
|
run
|
public static ConfigurableApplicationContext run(Class<?> primarySource, String... args) {
return run(new Class<?>[] { primarySource }, args);
}
|
Static helper that can be used to run a {@link SpringApplication} from the
specified source using default settings.
@param primarySource the primary source to load
@param args the application arguments (usually passed from a Java main method)
@return the running {@link ApplicationContext}
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/SpringApplication.java
| 1,353
|
[
"primarySource"
] |
ConfigurableApplicationContext
| true
| 1
| 6.32
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
appendln
|
public StrBuilder appendln(final Object obj) {
return append(obj).appendNewLine();
}
|
Appends an object followed by a new line to this string builder.
Appending null will call {@link #appendNull()}.
@param obj the object to append
@return {@code this} instance.
@since 2.3
|
java
|
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
| 1,027
|
[
"obj"
] |
StrBuilder
| true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
saveStateAndInvoke
|
function saveStateAndInvoke<T, U extends Node>(node: U, f: (node: U) => T): T {
// Save state
const savedCurrentScope = currentLexicalScope;
const savedCurrentScopeFirstDeclarationsOfName = currentScopeFirstDeclarationsOfName;
// Handle state changes before visiting a node.
onBeforeVisitNode(node);
const visited = f(node);
// Restore state
if (currentLexicalScope !== savedCurrentScope) {
currentScopeFirstDeclarationsOfName = savedCurrentScopeFirstDeclarationsOfName;
}
currentLexicalScope = savedCurrentScope;
return visited;
}
|
Visits a node, saving and restoring state variables on the stack.
@param node The node to visit.
|
typescript
|
src/compiler/transformers/ts.ts
| 322
|
[
"node",
"f"
] | true
| 2
| 6.88
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
close
|
@Override
public void close() throws IOException {
if (in != null) {
try {
in.close();
} finally {
in = null;
}
}
}
|
Creates a new instance.
@param it an iterator of I/O suppliers that will provide each substream
|
java
|
android/guava/src/com/google/common/io/MultiInputStream.java
| 50
|
[] |
void
| true
| 2
| 7.04
|
google/guava
| 51,352
|
javadoc
| false
|
tail
|
function tail(array) {
var length = array == null ? 0 : array.length;
return length ? baseSlice(array, 1, length) : [];
}
|
Gets all but the first element of `array`.
@static
@memberOf _
@since 4.0.0
@category Array
@param {Array} array The array to query.
@returns {Array} Returns the slice of `array`.
@example
_.tail([1, 2, 3]);
// => [2, 3]
|
javascript
|
lodash.js
| 8,242
|
[
"array"
] | false
| 3
| 7.6
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
checkStrictModePrefixUnaryExpression
|
function checkStrictModePrefixUnaryExpression(node: PrefixUnaryExpression) {
// Grammar checking
if (inStrictMode) {
if (node.operator === SyntaxKind.PlusPlusToken || node.operator === SyntaxKind.MinusMinusToken) {
checkStrictModeEvalOrArguments(node, node.operand as Identifier);
}
}
}
|
Declares a Symbol for the node and adds it to symbols. Reports errors for conflicting identifier names.
@param symbolTable - The symbol table which node will be added to.
@param parent - node's parent declaration.
@param node - The declaration to be added to the symbol table
@param includes - The SymbolFlags that node has in addition to its declaration type (eg: export, ambient, etc.)
@param excludes - The flags which node cannot be declared alongside in a symbol table. Used to report forbidden declarations.
|
typescript
|
src/compiler/binder.ts
| 2,720
|
[
"node"
] | false
| 4
| 6.08
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
allAsList
|
@SafeVarargs
public static <V extends @Nullable Object> ListenableFuture<List<V>> allAsList(
ListenableFuture<? extends V>... futures) {
ListenableFuture<List<@Nullable V>> nullable =
new ListFuture<V>(ImmutableList.copyOf(futures), true);
// allAsList ensures that it fills the output list with V instances.
@SuppressWarnings("nullness")
ListenableFuture<List<V>> nonNull = nullable;
return nonNull;
}
|
Creates a new {@code ListenableFuture} whose value is a list containing the values of all its
input futures, if all succeed.
<p>The list of results is in the same order as the input list.
<p>This differs from {@link #successfulAsList(ListenableFuture[])} in that it will return a
failed future if any of the items fails.
<p>Canceling this future will attempt to cancel all the component futures, and if any of the
provided futures fails or is canceled, this one is, too.
@param futures futures to combine
@return a future that provides a list of the results of the component futures
@since 10.0
|
java
|
android/guava/src/com/google/common/util/concurrent/Futures.java
| 575
|
[] | true
| 1
| 7.04
|
google/guava
| 51,352
|
javadoc
| false
|
|
getStrictModeIdentifierMessage
|
function getStrictModeIdentifierMessage(node: Node) {
// Provide specialized messages to help the user understand why we think they're in
// strict mode.
if (getContainingClass(node)) {
return Diagnostics.Identifier_expected_0_is_a_reserved_word_in_strict_mode_Class_definitions_are_automatically_in_strict_mode;
}
if (file.externalModuleIndicator) {
return Diagnostics.Identifier_expected_0_is_a_reserved_word_in_strict_mode_Modules_are_automatically_in_strict_mode;
}
return Diagnostics.Identifier_expected_0_is_a_reserved_word_in_strict_mode;
}
|
Declares a Symbol for the node and adds it to symbols. Reports errors for conflicting identifier names.
@param symbolTable - The symbol table which node will be added to.
@param parent - node's parent declaration.
@param node - The declaration to be added to the symbol table
@param includes - The SymbolFlags that node has in addition to its declaration type (eg: export, ambient, etc.)
@param excludes - The flags which node cannot be declared alongside in a symbol table. Used to report forbidden declarations.
|
typescript
|
src/compiler/binder.ts
| 2,592
|
[
"node"
] | false
| 3
| 6.08
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
findDeclaredMethodWithMinimalParameters
|
public static @Nullable Method findDeclaredMethodWithMinimalParameters(Class<?> clazz, String methodName)
throws IllegalArgumentException {
Method targetMethod = findMethodWithMinimalParameters(clazz.getDeclaredMethods(), methodName);
if (targetMethod == null && clazz.getSuperclass() != null) {
targetMethod = findDeclaredMethodWithMinimalParameters(clazz.getSuperclass(), methodName);
}
return targetMethod;
}
|
Find a method with the given method name and minimal parameters (best case: none),
declared on the given class or one of its superclasses. Will return a public,
protected, package access, or private method.
<p>Checks {@code Class.getDeclaredMethods}, cascading upwards to all superclasses.
@param clazz the class to check
@param methodName the name of the method to find
@return the Method object, or {@code null} if not found
@throws IllegalArgumentException if methods of the given name were found but
could not be resolved to a unique method with minimal parameters
@see Class#getDeclaredMethods
|
java
|
spring-beans/src/main/java/org/springframework/beans/BeanUtils.java
| 382
|
[
"clazz",
"methodName"
] |
Method
| true
| 3
| 7.92
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
format
|
public static String format(final Date date, final String pattern) {
return format(date, pattern, null, null);
}
|
Formats a date/time into a specific pattern.
@param date the date to format, not null.
@param pattern the pattern to use to format the date, not null.
@return the formatted date.
|
java
|
src/main/java/org/apache/commons/lang3/time/DateFormatUtils.java
| 266
|
[
"date",
"pattern"
] |
String
| true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
toSend
|
@Override
public DefaultRecordsSend<Records> toSend() {
return new DefaultRecordsSend<>(this);
}
|
Get an iterator over the deep records.
@return An iterator over the records
|
java
|
clients/src/main/java/org/apache/kafka/common/record/AbstractRecords.java
| 68
|
[] | true
| 1
| 6.8
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
lazyWeakLock
|
public static Striped<Lock> lazyWeakLock(int stripes) {
return lazyWeakCustom(stripes, () -> new ReentrantLock(false));
}
|
Creates a {@code Striped<Lock>} with lazily initialized, weakly referenced locks. Every lock is
reentrant.
@param stripes the minimum number of stripes (locks) required
@return a new {@code Striped<Lock>}
|
java
|
android/guava/src/com/google/common/util/concurrent/Striped.java
| 219
|
[
"stripes"
] | true
| 1
| 6.48
|
google/guava
| 51,352
|
javadoc
| false
|
|
h3ToChildrenSize
|
public static long h3ToChildrenSize(String h3Address, int childRes) {
return h3ToChildrenSize(stringToH3(h3Address), childRes);
}
|
h3ToChildrenSize returns the exact number of children for a h3 affress at a
given child resolution.
@param h3Address H3 address to find the number of children of
@param childRes The child resolution you're interested in
@return int Exact number of children (handles hexagons and pentagons
correctly)
|
java
|
libs/h3/src/main/java/org/elasticsearch/h3/H3.java
| 475
|
[
"h3Address",
"childRes"
] | true
| 1
| 6.48
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
describeTopics
|
default DescribeTopicsResult describeTopics(TopicCollection topics) {
return describeTopics(topics, new DescribeTopicsOptions());
}
|
This is a convenience method for {@link #describeTopics(TopicCollection, DescribeTopicsOptions)}
with default options. See the overload for more details.
<p>
When using topic IDs, this operation is supported by brokers with version 3.1.0 or higher.
@param topics The topics to describe.
@return The DescribeTopicsResult.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
| 319
|
[
"topics"
] |
DescribeTopicsResult
| true
| 1
| 6.32
|
apache/kafka
| 31,560
|
javadoc
| false
|
compress_rows
|
def compress_rows(a):
"""
Suppress whole rows of a 2-D array that contain masked values.
This is equivalent to ``np.ma.compress_rowcols(a, 0)``, see
`compress_rowcols` for details.
Parameters
----------
x : array_like, MaskedArray
The array to operate on. If not a MaskedArray instance (or if no array
elements are masked), `x` is interpreted as a MaskedArray with
`mask` set to `nomask`. Must be a 2D array.
Returns
-------
compressed_array : ndarray
The compressed array.
See Also
--------
compress_rowcols
Examples
--------
>>> import numpy as np
>>> a = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0],
... [1, 0, 0],
... [0, 0, 0]])
>>> np.ma.compress_rows(a)
array([[6, 7, 8]])
"""
a = asarray(a)
if a.ndim != 2:
raise NotImplementedError("compress_rows works for 2D arrays only.")
return compress_rowcols(a, 0)
|
Suppress whole rows of a 2-D array that contain masked values.
This is equivalent to ``np.ma.compress_rowcols(a, 0)``, see
`compress_rowcols` for details.
Parameters
----------
x : array_like, MaskedArray
The array to operate on. If not a MaskedArray instance (or if no array
elements are masked), `x` is interpreted as a MaskedArray with
`mask` set to `nomask`. Must be a 2D array.
Returns
-------
compressed_array : ndarray
The compressed array.
See Also
--------
compress_rowcols
Examples
--------
>>> import numpy as np
>>> a = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0],
... [1, 0, 0],
... [0, 0, 0]])
>>> np.ma.compress_rows(a)
array([[6, 7, 8]])
|
python
|
numpy/ma/extras.py
| 955
|
[
"a"
] | false
| 2
| 7.68
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
unreflect
|
private static MethodHandle unreflect(final Method method) throws IllegalAccessException {
return MethodHandles.lookup().unreflect(requireMethod(method));
}
|
Throws NullPointerException if {@code method} is {@code null}.
@param method The method to test.
@return The given method.
@throws NullPointerException if {@code method} is {@code null}.
|
java
|
src/main/java/org/apache/commons/lang3/function/MethodInvokers.java
| 240
|
[
"method"
] |
MethodHandle
| true
| 1
| 6.48
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
triton_version_hash
|
def triton_version_hash() -> str | None:
"""Get Triton version key if Triton is available.
Returns:
Triton version key if Triton is available, None otherwise.
"""
from torch._inductor.runtime.triton_compat import HAS_TRITON, triton_key
return triton_key() if HAS_TRITON else None
|
Get Triton version key if Triton is available.
Returns:
Triton version key if Triton is available, None otherwise.
|
python
|
torch/_inductor/runtime/caching/context.py
| 156
|
[] |
str | None
| true
| 2
| 7.76
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
_get_common_dtype
|
def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
"""
Return the common dtype, if one exists.
Used in `find_common_type` implementation. This is for example used
to determine the resulting dtype in a concat operation.
If no common dtype exists, return None (which gives the other dtypes
the chance to determine a common dtype). If all dtypes in the list
return None, then the common dtype will be "object" dtype (this means
it is never needed to return "object" dtype from this method itself).
Parameters
----------
dtypes : list of dtypes
The dtypes for which to determine a common dtype. This is a list
of np.dtype or ExtensionDtype instances.
Returns
-------
Common dtype (np.dtype or ExtensionDtype) or None
"""
if len(set(dtypes)) == 1:
# only itself
return self
else:
return None
|
Return the common dtype, if one exists.
Used in `find_common_type` implementation. This is for example used
to determine the resulting dtype in a concat operation.
If no common dtype exists, return None (which gives the other dtypes
the chance to determine a common dtype). If all dtypes in the list
return None, then the common dtype will be "object" dtype (this means
it is never needed to return "object" dtype from this method itself).
Parameters
----------
dtypes : list of dtypes
The dtypes for which to determine a common dtype. This is a list
of np.dtype or ExtensionDtype instances.
Returns
-------
Common dtype (np.dtype or ExtensionDtype) or None
|
python
|
pandas/core/dtypes/base.py
| 371
|
[
"self",
"dtypes"
] |
DtypeObj | None
| true
| 3
| 7.04
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
isLogConfigurationMessage
|
private boolean isLogConfigurationMessage(@Nullable Throwable ex) {
if (ex == null) {
return false;
}
if (ex instanceof InvocationTargetException) {
return isLogConfigurationMessage(ex.getCause());
}
String message = ex.getMessage();
if (message != null) {
for (String candidate : LOG_CONFIGURATION_MESSAGES) {
if (message.contains(candidate)) {
return true;
}
}
}
return false;
}
|
Check if the exception is a log configuration message, i.e. the log call might not
have actually output anything.
@param ex the source exception
@return {@code true} if the exception contains a log configuration message
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/SpringBootExceptionHandler.java
| 90
|
[
"ex"
] | true
| 5
| 8.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
asRanges
|
@Override
public Set<Range<C>> asRanges() {
Set<Range<C>> result = asRanges;
return (result == null) ? asRanges = new AsRanges(rangesByLowerBound.values()) : result;
}
|
Returns a {@code TreeRangeSet} representing the union of the specified ranges.
<p>This is the smallest {@code RangeSet} which encloses each of the specified ranges. An
element will be contained in this {@code RangeSet} if and only if it is contained in at least
one {@code Range} in {@code ranges}.
@since 21.0
|
java
|
android/guava/src/com/google/common/collect/TreeRangeSet.java
| 84
|
[] | true
| 2
| 6.88
|
google/guava
| 51,352
|
javadoc
| false
|
|
isCXXOnlyStmt
|
static bool isCXXOnlyStmt(const Stmt *S) {
const StringRef Name = S->getStmtClassName();
if (Name.starts_with("CXX"))
return true;
// Check for all other class names in ExprCXX.h that have no 'CXX' prefix.
return isa<ArrayTypeTraitExpr, BuiltinBitCastExpr, CUDAKernelCallExpr,
CoawaitExpr, CoreturnStmt, CoroutineBodyStmt, CoroutineSuspendExpr,
CoyieldExpr, DependentCoawaitExpr, DependentScopeDeclRefExpr,
ExprWithCleanups, ExpressionTraitExpr, FunctionParmPackExpr,
LambdaExpr, MSDependentExistsStmt, MSPropertyRefExpr,
MSPropertySubscriptExpr, MaterializeTemporaryExpr, OverloadExpr,
PackExpansionExpr, SizeOfPackExpr, SubstNonTypeTemplateParmExpr,
SubstNonTypeTemplateParmPackExpr, TypeTraitExpr,
UserDefinedLiteral>(S);
}
|
and every other statement that is declared in file ExprCXX.h.
|
cpp
|
clang-tools-extra/clang-tidy/bugprone/SignalHandlerCheck.cpp
| 285
|
[] | true
| 2
| 6.88
|
llvm/llvm-project
| 36,021
|
doxygen
| false
|
|
maybeResetTimerAndRequestState
|
void maybeResetTimerAndRequestState() {
if (requestType == AcknowledgeRequestType.COMMIT_ASYNC) {
resetTimeout(timeoutMs);
reset();
}
}
|
Resets the timer with the configured timeout and resets the RequestState.
This is only applicable for commitAsync() requests as these states could be re-used.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManager.java
| 1,310
|
[] |
void
| true
| 2
| 6.56
|
apache/kafka
| 31,560
|
javadoc
| false
|
handleListOffsetResponse
|
OffsetFetcherUtils.ListOffsetResult handleListOffsetResponse(ListOffsetsResponse listOffsetsResponse) {
Map<TopicPartition, OffsetFetcherUtils.ListOffsetData> fetchedOffsets = new HashMap<>();
Set<TopicPartition> partitionsToRetry = new HashSet<>();
Set<String> unauthorizedTopics = new HashSet<>();
for (ListOffsetsResponseData.ListOffsetsTopicResponse topic : listOffsetsResponse.topics()) {
for (ListOffsetsResponseData.ListOffsetsPartitionResponse partition : topic.partitions()) {
TopicPartition topicPartition = new TopicPartition(topic.name(), partition.partitionIndex());
Errors error = Errors.forCode(partition.errorCode());
switch (error) {
case NONE:
log.debug("Handling ListOffsetResponse response for {}. Fetched offset {}, timestamp {}",
topicPartition, partition.offset(), partition.timestamp());
if (partition.offset() != ListOffsetsResponse.UNKNOWN_OFFSET) {
Optional<Integer> leaderEpoch = (partition.leaderEpoch() == ListOffsetsResponse.UNKNOWN_EPOCH)
? Optional.empty()
: Optional.of(partition.leaderEpoch());
OffsetFetcherUtils.ListOffsetData offsetData = new OffsetFetcherUtils.ListOffsetData(partition.offset(), partition.timestamp(),
leaderEpoch);
fetchedOffsets.put(topicPartition, offsetData);
}
break;
case UNSUPPORTED_FOR_MESSAGE_FORMAT:
// The message format on the broker side is before 0.10.0, which means it does not
// support timestamps. We treat this case the same as if we weren't able to find an
// offset corresponding to the requested timestamp and leave it out of the result.
log.debug("Cannot search by timestamp for partition {} because the message format version " +
"is before 0.10.0", topicPartition);
break;
case NOT_LEADER_OR_FOLLOWER:
case REPLICA_NOT_AVAILABLE:
case KAFKA_STORAGE_ERROR:
case OFFSET_NOT_AVAILABLE:
case LEADER_NOT_AVAILABLE:
case FENCED_LEADER_EPOCH:
case UNKNOWN_LEADER_EPOCH:
log.debug("Attempt to fetch offsets for partition {} failed due to {}, retrying.",
topicPartition, error);
partitionsToRetry.add(topicPartition);
break;
case UNKNOWN_TOPIC_OR_PARTITION:
log.warn("Received unknown topic or partition error in ListOffset request for partition {}", topicPartition);
partitionsToRetry.add(topicPartition);
break;
case TOPIC_AUTHORIZATION_FAILED:
unauthorizedTopics.add(topicPartition.topic());
break;
default:
log.warn("Attempt to fetch offsets for partition {} failed due to unexpected exception: {}, retrying.",
topicPartition, error.message());
partitionsToRetry.add(topicPartition);
}
}
}
if (!unauthorizedTopics.isEmpty())
throw new TopicAuthorizationException(unauthorizedTopics);
else
return new OffsetFetcherUtils.ListOffsetResult(fetchedOffsets, partitionsToRetry);
}
|
Callback for the response of the list offset call.
@param listOffsetsResponse The response from the server.
@return {@link OffsetFetcherUtils.ListOffsetResult} extracted from the response, containing the fetched offsets
and partitions to retry.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/OffsetFetcherUtils.java
| 107
|
[
"listOffsetsResponse"
] | true
| 4
| 7.68
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
state_from_response
|
def state_from_response(response: dict[str, Any]) -> str:
"""
Get state from response dictionary.
:param response: response from AWS API
:return: current state of the cluster
"""
return response["Cluster"]["Status"]["State"]
|
Get state from response dictionary.
:param response: response from AWS API
:return: current state of the cluster
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/sensors/emr.py
| 491
|
[
"response"
] |
str
| true
| 1
| 7.04
|
apache/airflow
| 43,597
|
sphinx
| false
|
basis
|
def basis(cls, deg, domain=None, window=None, symbol='x'):
"""Series basis polynomial of degree `deg`.
Returns the series representing the basis polynomial of degree `deg`.
Parameters
----------
deg : int
Degree of the basis polynomial for the series. Must be >= 0.
domain : {None, array_like}, optional
If given, the array must be of the form ``[beg, end]``, where
``beg`` and ``end`` are the endpoints of the domain. If None is
given then the class domain is used. The default is None.
window : {None, array_like}, optional
If given, the resulting array must be if the form
``[beg, end]``, where ``beg`` and ``end`` are the endpoints of
the window. If None is given then the class window is used. The
default is None.
symbol : str, optional
Symbol representing the independent variable. Default is 'x'.
Returns
-------
new_series : series
A series with the coefficient of the `deg` term set to one and
all others zero.
"""
if domain is None:
domain = cls.domain
if window is None:
window = cls.window
ideg = int(deg)
if ideg != deg or ideg < 0:
raise ValueError("deg must be non-negative integer")
return cls([0] * ideg + [1], domain, window, symbol)
|
Series basis polynomial of degree `deg`.
Returns the series representing the basis polynomial of degree `deg`.
Parameters
----------
deg : int
Degree of the basis polynomial for the series. Must be >= 0.
domain : {None, array_like}, optional
If given, the array must be of the form ``[beg, end]``, where
``beg`` and ``end`` are the endpoints of the domain. If None is
given then the class domain is used. The default is None.
window : {None, array_like}, optional
If given, the resulting array must be if the form
``[beg, end]``, where ``beg`` and ``end`` are the endpoints of
the window. If None is given then the class window is used. The
default is None.
symbol : str, optional
Symbol representing the independent variable. Default is 'x'.
Returns
-------
new_series : series
A series with the coefficient of the `deg` term set to one and
all others zero.
|
python
|
numpy/polynomial/_polybase.py
| 1,115
|
[
"cls",
"deg",
"domain",
"window",
"symbol"
] | false
| 5
| 6.08
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
cast_scalar_indexer
|
def cast_scalar_indexer(val):
"""
Disallow indexing with a float key, even if that key is a round number.
Parameters
----------
val : scalar
Returns
-------
outval : scalar
"""
# assumes lib.is_scalar(val)
if lib.is_float(val) and val.is_integer():
raise IndexError(
# GH#34193
"Indexing with a float is no longer supported. Manually convert "
"to an integer key instead."
)
return val
|
Disallow indexing with a float key, even if that key is a round number.
Parameters
----------
val : scalar
Returns
-------
outval : scalar
|
python
|
pandas/core/common.py
| 159
|
[
"val"
] | false
| 3
| 6.24
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
toString
|
@Override
public String toString() {
if (ObjectUtils.isEmpty(this.messageExceptions)) {
return super.toString();
}
else {
StringBuilder sb = new StringBuilder(super.toString());
sb.append("; message exceptions (").append(this.messageExceptions.length).append(") are:");
for (int i = 0; i < this.messageExceptions.length; i++) {
Exception subEx = this.messageExceptions[i];
sb.append('\n').append("Failed message ").append(i + 1).append(": ");
sb.append(subEx);
}
return sb.toString();
}
}
|
Return an array with thrown message exceptions.
<p>Note that a general mail server connection failure will not result
in failed messages being returned here: A message will only be
contained here if actually sending it was attempted but failed.
@return the array of thrown message exceptions,
or an empty array if no failed messages
|
java
|
spring-context-support/src/main/java/org/springframework/mail/MailSendException.java
| 149
|
[] |
String
| true
| 3
| 8.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
shuffle
|
public static void shuffle(final boolean[] array, final Random random) {
for (int i = array.length; i > 1; i--) {
swap(array, i - 1, random.nextInt(i), 1);
}
}
|
Shuffles randomly the elements of the specified array using the <a href="https://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle">Fisher-Yates shuffle
algorithm</a>.
@param array the array to shuffle.
@param random the source of randomness used to permute the elements.
@see <a href="https://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle">Fisher-Yates shuffle algorithm</a>
@since 3.6
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 7,435
|
[
"array",
"random"
] |
void
| true
| 2
| 6.56
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
getMessageSourceAccessor
|
protected final @Nullable MessageSourceAccessor getMessageSourceAccessor() throws IllegalStateException {
if (this.messageSourceAccessor == null && isContextRequired()) {
throw new IllegalStateException(
"ApplicationObjectSupport instance [" + this + "] does not run in an ApplicationContext");
}
return this.messageSourceAccessor;
}
|
Return a MessageSourceAccessor for the application context
used by this object, for easy message access.
@throws IllegalStateException if not running in an ApplicationContext
|
java
|
spring-context/src/main/java/org/springframework/context/support/ApplicationObjectSupport.java
| 166
|
[] |
MessageSourceAccessor
| true
| 3
| 6.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
rotate
|
public static String rotate(final String str, final int shift) {
if (str == null) {
return null;
}
final int strLen = str.length();
if (shift == 0 || strLen == 0 || shift % strLen == 0) {
return str;
}
final StringBuilder builder = new StringBuilder(strLen);
final int offset = -(shift % strLen);
builder.append(substring(str, offset));
builder.append(substring(str, 0, offset));
return builder.toString();
}
|
Rotate (circular shift) a String of {@code shift} characters.
<ul>
<li>If {@code shift > 0}, right circular shift (ex : ABCDEF => FABCDE)</li>
<li>If {@code shift < 0}, left circular shift (ex : ABCDEF => BCDEFA)</li>
</ul>
<pre>
StringUtils.rotate(null, *) = null
StringUtils.rotate("", *) = ""
StringUtils.rotate("abcdefg", 0) = "abcdefg"
StringUtils.rotate("abcdefg", 2) = "fgabcde"
StringUtils.rotate("abcdefg", -2) = "cdefgab"
StringUtils.rotate("abcdefg", 7) = "abcdefg"
StringUtils.rotate("abcdefg", -7) = "abcdefg"
StringUtils.rotate("abcdefg", 9) = "fgabcde"
StringUtils.rotate("abcdefg", -9) = "cdefgab"
</pre>
@param str the String to rotate, may be null.
@param shift number of time to shift (positive : right shift, negative : left shift).
@return the rotated String, or the original String if {@code shift == 0}, or {@code null} if null String input.
@since 3.5
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 6,996
|
[
"str",
"shift"
] |
String
| true
| 5
| 7.76
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
defaultCloseCallback
|
function defaultCloseCallback(err) {
if (err != null) throw err;
}
|
Synchronously reads the entire contents of a file.
@param {string | Buffer | URL | number} path
@param {{
encoding?: string | null;
flag?: string;
}} [options]
@returns {string | Buffer}
|
javascript
|
lib/fs.js
| 488
|
[
"err"
] | false
| 2
| 6
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
equals
|
@Override
public boolean equals(final Object obj) {
if (obj instanceof MutableLong) {
return value == ((MutableLong) obj).longValue();
}
return false;
}
|
Compares this object to the specified object. The result is {@code true} if and only if the argument
is not {@code null} and is a {@link MutableLong} object that contains the same {@code long}
value as this object.
@param obj the object to compare with, null returns false.
@return {@code true} if the objects are the same; {@code false} otherwise.
|
java
|
src/main/java/org/apache/commons/lang3/mutable/MutableLong.java
| 180
|
[
"obj"
] | true
| 2
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
diag
|
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
This function is the equivalent of `numpy.diag` that takes masked
values into account, see `numpy.diag` for details.
See Also
--------
numpy.diag : Equivalent function for ndarrays.
Examples
--------
>>> import numpy as np
Create an array with negative values masked:
>>> import numpy as np
>>> x = np.array([[11.2, -3.973, 18], [0.801, -1.41, 12], [7, 33, -12]])
>>> masked_x = np.ma.masked_array(x, mask=x < 0)
>>> masked_x
masked_array(
data=[[11.2, --, 18.0],
[0.801, --, 12.0],
[7.0, 33.0, --]],
mask=[[False, True, False],
[False, True, False],
[False, False, True]],
fill_value=1e+20)
Isolate the main diagonal from the masked array:
>>> np.ma.diag(masked_x)
masked_array(data=[11.2, --, --],
mask=[False, True, True],
fill_value=1e+20)
Isolate the first diagonal below the main diagonal:
>>> np.ma.diag(masked_x, -1)
masked_array(data=[0.801, 33.0],
mask=[False, False],
fill_value=1e+20)
"""
output = np.diag(v, k).view(MaskedArray)
if getmask(v) is not nomask:
output._mask = np.diag(v._mask, k)
return output
|
Extract a diagonal or construct a diagonal array.
This function is the equivalent of `numpy.diag` that takes masked
values into account, see `numpy.diag` for details.
See Also
--------
numpy.diag : Equivalent function for ndarrays.
Examples
--------
>>> import numpy as np
Create an array with negative values masked:
>>> import numpy as np
>>> x = np.array([[11.2, -3.973, 18], [0.801, -1.41, 12], [7, 33, -12]])
>>> masked_x = np.ma.masked_array(x, mask=x < 0)
>>> masked_x
masked_array(
data=[[11.2, --, 18.0],
[0.801, --, 12.0],
[7.0, 33.0, --]],
mask=[[False, True, False],
[False, True, False],
[False, False, True]],
fill_value=1e+20)
Isolate the main diagonal from the masked array:
>>> np.ma.diag(masked_x)
masked_array(data=[11.2, --, --],
mask=[False, True, True],
fill_value=1e+20)
Isolate the first diagonal below the main diagonal:
>>> np.ma.diag(masked_x, -1)
masked_array(data=[0.801, 33.0],
mask=[False, False],
fill_value=1e+20)
|
python
|
numpy/ma/core.py
| 7,358
|
[
"v",
"k"
] | false
| 2
| 6
|
numpy/numpy
| 31,054
|
unknown
| false
|
|
parameterizeWithOwner
|
public static final ParameterizedType parameterizeWithOwner(final Type owner, final Class<?> rawClass, final Type... typeArguments) {
Objects.requireNonNull(rawClass, "rawClass");
final Type useOwner;
if (rawClass.getEnclosingClass() == null) {
Validate.isTrue(owner == null, "no owner allowed for top-level %s", rawClass);
useOwner = null;
} else if (owner == null) {
useOwner = rawClass.getEnclosingClass();
} else {
Validate.isTrue(isAssignable(owner, rawClass.getEnclosingClass()), "%s is invalid owner type for parameterized %s", owner, rawClass);
useOwner = owner;
}
Validate.noNullElements(typeArguments, "null type argument at index %s");
Validate.isTrue(rawClass.getTypeParameters().length == typeArguments.length, "invalid number of type parameters specified: expected %d, got %d",
rawClass.getTypeParameters().length, typeArguments.length);
return new ParameterizedTypeImpl(rawClass, useOwner, typeArguments);
}
|
Creates a parameterized type instance.
@param owner the owning type.
@param rawClass the raw class to create a parameterized type instance for.
@param typeArguments the types used for parameterization.
@return {@link ParameterizedType}.
@throws NullPointerException if {@code rawClass} is {@code null}.
@since 3.2
|
java
|
src/main/java/org/apache/commons/lang3/reflect/TypeUtils.java
| 1,462
|
[
"owner",
"rawClass"
] |
ParameterizedType
| true
| 3
| 7.6
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
createEvaluationContext
|
public EvaluationContext createEvaluationContext(Collection<? extends Cache> caches,
Method method, @Nullable Object[] args, Object target, Class<?> targetClass, Method targetMethod,
@Nullable Object result) {
CacheExpressionRootObject rootObject = new CacheExpressionRootObject(
caches, method, args, target, targetClass);
CacheEvaluationContext evaluationContext = this.evaluationContextFactory
.forOperation(rootObject, targetMethod, args);
if (result == RESULT_UNAVAILABLE) {
evaluationContext.addUnavailableVariable(RESULT_VARIABLE);
}
else if (result != NO_RESULT) {
evaluationContext.setVariable(RESULT_VARIABLE, result);
}
return evaluationContext;
}
|
Create an {@link EvaluationContext}.
@param caches the current caches
@param method the method
@param args the method arguments
@param target the target object
@param targetClass the target class
@param result the return value (can be {@code null}) or
{@link #NO_RESULT} if there is no return at this time
@return the evaluation context
|
java
|
spring-context/src/main/java/org/springframework/cache/interceptor/CacheOperationExpressionEvaluator.java
| 88
|
[
"caches",
"method",
"args",
"target",
"targetClass",
"targetMethod",
"result"
] |
EvaluationContext
| true
| 3
| 7.76
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
generationIfStable
|
protected synchronized Generation generationIfStable() {
if (this.state != MemberState.STABLE)
return null;
return generation;
}
|
Get the current generation state if the group is stable, otherwise return null
@return the current generation or null
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinator.java
| 1,047
|
[] |
Generation
| true
| 2
| 7.6
|
apache/kafka
| 31,560
|
javadoc
| false
|
generate_regular_range
|
def generate_regular_range(
start: Timestamp | Timedelta | None,
end: Timestamp | Timedelta | None,
periods: int | None,
freq: BaseOffset,
unit: TimeUnit = "ns",
) -> npt.NDArray[np.intp]:
"""
Generate a range of dates or timestamps with the spans between dates
described by the given `freq` DateOffset.
Parameters
----------
start : Timedelta, Timestamp or None
First point of produced date range.
end : Timedelta, Timestamp or None
Last point of produced date range.
periods : int or None
Number of periods in produced date range.
freq : Tick
Describes space between dates in produced date range.
unit : {'s', 'ms', 'us', 'ns'}, default "ns"
The resolution the output is meant to represent.
Returns
-------
ndarray[np.int64]
Representing the given resolution.
"""
istart = start._value if start is not None else None
iend = end._value if end is not None else None
if isinstance(freq, Day):
# In contexts without a timezone, a Day offset is unambiguously
# interpretable as Timedelta-like.
td = Timedelta(days=freq.n)
else:
freq.nanos # raises if non-fixed frequency
td = Timedelta(freq)
b: int
e: int
try:
td = td.as_unit(unit, round_ok=False)
except ValueError as err:
raise ValueError(
f"freq={freq} is incompatible with unit={unit}. "
"Use a lower freq or a higher unit instead."
) from err
stride = int(td._value)
if periods is None and istart is not None and iend is not None:
b = istart
# cannot just use e = Timestamp(end) + 1 because arange breaks when
# stride is too large, see GH10887
e = b + (iend - b) // stride * stride + stride // 2 + 1
elif istart is not None and periods is not None:
b = istart
e = _generate_range_overflow_safe(b, periods, stride, side="start")
elif iend is not None and periods is not None:
e = iend + stride
b = _generate_range_overflow_safe(e, periods, stride, side="end")
else:
raise ValueError(
"at least 'start' or 'end' should be specified if a 'period' is given."
)
return range_to_ndarray(range(b, e, stride))
|
Generate a range of dates or timestamps with the spans between dates
described by the given `freq` DateOffset.
Parameters
----------
start : Timedelta, Timestamp or None
First point of produced date range.
end : Timedelta, Timestamp or None
Last point of produced date range.
periods : int or None
Number of periods in produced date range.
freq : Tick
Describes space between dates in produced date range.
unit : {'s', 'ms', 'us', 'ns'}, default "ns"
The resolution the output is meant to represent.
Returns
-------
ndarray[np.int64]
Representing the given resolution.
|
python
|
pandas/core/arrays/_ranges.py
| 31
|
[
"start",
"end",
"periods",
"freq",
"unit"
] |
npt.NDArray[np.intp]
| true
| 13
| 6.96
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
open
|
def open(side_effect=None):
"""Patch builtins.open so that it returns StringIO object.
:param side_effect: Additional side effect for when the open context
is entered.
Example::
>>> with mock.open(io.BytesIO) as open_fh:
... something_opening_and_writing_bytes_to_a_file()
... self.assertIn(b'foo', open_fh.getvalue())
"""
with patch('builtins.open') as open_:
with _mock_context(open_) as context:
if side_effect is not None:
context.__enter__.side_effect = side_effect
val = context.__enter__.return_value = WhateverIO()
val.__exit__ = Mock()
yield val
|
Patch builtins.open so that it returns StringIO object.
:param side_effect: Additional side effect for when the open context
is entered.
Example::
>>> with mock.open(io.BytesIO) as open_fh:
... something_opening_and_writing_bytes_to_a_file()
... self.assertIn(b'foo', open_fh.getvalue())
|
python
|
t/unit/conftest.py
| 718
|
[
"side_effect"
] | false
| 2
| 7.04
|
celery/celery
| 27,741
|
sphinx
| false
|
|
refreshAfterWrite
|
@GwtIncompatible // To be supported (synchronously).
@Deprecated // GoodTime
@CanIgnoreReturnValue
public CacheBuilder<K, V> refreshAfterWrite(long duration, TimeUnit unit) {
checkNotNull(unit);
checkState(refreshNanos == UNSET_INT, "refresh was already set to %s ns", refreshNanos);
checkArgument(duration > 0, "duration must be positive: %s %s", duration, unit);
this.refreshNanos = unit.toNanos(duration);
return this;
}
|
Specifies that active entries are eligible for automatic refresh once a fixed duration has
elapsed after the entry's creation, or the most recent replacement of its value. The semantics
of refreshes are specified in {@link LoadingCache#refresh}, and are performed by calling {@link
CacheLoader#reload}.
<p>As the default implementation of {@link CacheLoader#reload} is synchronous, it is
recommended that users of this method override {@link CacheLoader#reload} with an asynchronous
implementation; otherwise refreshes will be performed during unrelated cache read and write
operations.
<p>Currently automatic refreshes are performed when the first stale request for an entry
occurs. The request triggering refresh will make a synchronous call to {@link
CacheLoader#reload}
and immediately return the new value if the returned future is complete, and the old value
otherwise.
<p><b>Note:</b> <i>all exceptions thrown during refresh will be logged and then swallowed</i>.
<p>If you can represent the duration as a {@link Duration} (which should be preferred when
feasible), use {@link #refreshAfterWrite(Duration)} instead.
@param duration the length of time after an entry is created that it should be considered
stale, and thus eligible for refresh
@param unit the unit that {@code duration} is expressed in
@return this {@code CacheBuilder} instance (for chaining)
@throws IllegalArgumentException if {@code duration} is negative
@throws IllegalStateException if {@link #refreshAfterWrite} was already set
@since 11.0
@deprecated Use {@link #refreshAfterWrite(Duration)} instead.
|
java
|
android/guava/src/com/google/common/cache/CacheBuilder.java
| 923
|
[
"duration",
"unit"
] | true
| 1
| 6.24
|
google/guava
| 51,352
|
javadoc
| false
|
|
mod
|
def mod(a, values):
"""
Return (a % i), that is pre-Python 2.6 string formatting
(interpolation), element-wise for a pair of array_likes of str
or unicode.
Parameters
----------
a : array_like, with `np.bytes_` or `np.str_` dtype
values : array_like of values
These values will be element-wise interpolated into the string.
Returns
-------
out : ndarray
Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype,
depending on input types
Examples
--------
>>> import numpy as np
>>> a = np.array(["NumPy is a %s library"])
>>> np.strings.mod(a, values=["Python"])
array(['NumPy is a Python library'], dtype='<U25')
>>> a = np.array([b'%d bytes', b'%d bits'])
>>> values = np.array([8, 64])
>>> np.strings.mod(a, values)
array([b'8 bytes', b'64 bits'], dtype='|S7')
"""
return _to_bytes_or_str_array(
_vec_string(a, np.object_, '__mod__', (values,)), a)
|
Return (a % i), that is pre-Python 2.6 string formatting
(interpolation), element-wise for a pair of array_likes of str
or unicode.
Parameters
----------
a : array_like, with `np.bytes_` or `np.str_` dtype
values : array_like of values
These values will be element-wise interpolated into the string.
Returns
-------
out : ndarray
Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype,
depending on input types
Examples
--------
>>> import numpy as np
>>> a = np.array(["NumPy is a %s library"])
>>> np.strings.mod(a, values=["Python"])
array(['NumPy is a Python library'], dtype='<U25')
>>> a = np.array([b'%d bytes', b'%d bits'])
>>> values = np.array([8, 64])
>>> np.strings.mod(a, values)
array([b'8 bytes', b'64 bits'], dtype='|S7')
|
python
|
numpy/_core/strings.py
| 219
|
[
"a",
"values"
] | false
| 1
| 6
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
validBytes
|
public int validBytes() {
if (validBytes >= 0)
return validBytes;
int bytes = 0;
for (RecordBatch batch : batches())
bytes += batch.sizeInBytes();
this.validBytes = bytes;
return bytes;
}
|
The total number of bytes in this message set not including any partial, trailing messages. This
may be smaller than what is returned by {@link #sizeInBytes()}.
@return The number of valid bytes
|
java
|
clients/src/main/java/org/apache/kafka/common/record/MemoryRecords.java
| 97
|
[] | true
| 2
| 7.92
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
equals
|
@Override
public boolean equals(Object o) {
if (this == o)
return true;
else if (!(o instanceof ProducerRecord))
return false;
ProducerRecord<?, ?> that = (ProducerRecord<?, ?>) o;
return Objects.equals(key, that.key) &&
Objects.equals(partition, that.partition) &&
Objects.equals(topic, that.topic) &&
Objects.equals(headers, that.headers) &&
Objects.equals(value, that.value) &&
Objects.equals(timestamp, that.timestamp);
}
|
@return The partition to which the record will be sent (or null if no partition was specified)
|
java
|
clients/src/main/java/org/apache/kafka/clients/producer/ProducerRecord.java
| 198
|
[
"o"
] | true
| 8
| 6.56
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
retainIncluded
|
public void retainIncluded(Map<String, @Nullable Object> map) {
for (Include candidate : Include.values()) {
if (!this.includes.contains(candidate)) {
map.remove(candidate.key);
}
}
}
|
Remove elements from the given map if they are not included in this set of options.
@param map the map to update
@since 3.2.7
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/web/error/ErrorAttributeOptions.java
| 90
|
[
"map"
] |
void
| true
| 2
| 7.04
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
_set_value
|
def _set_value(self, label, value, takeable: bool = False) -> None:
"""
Quickly set single value at passed label.
If label is not contained, a new object is created with the label
placed at the end of the result index.
Parameters
----------
label : object
Partial indexing with MultiIndex not allowed.
value : object
Scalar value.
takeable : interpret the index as indexers, default False
"""
if not takeable:
try:
loc = self.index.get_loc(label)
except KeyError:
# set using a non-recursive method
self.loc[label] = value
return
else:
loc = label
self._set_values(loc, value)
|
Quickly set single value at passed label.
If label is not contained, a new object is created with the label
placed at the end of the result index.
Parameters
----------
label : object
Partial indexing with MultiIndex not allowed.
value : object
Scalar value.
takeable : interpret the index as indexers, default False
|
python
|
pandas/core/series.py
| 1,174
|
[
"self",
"label",
"value",
"takeable"
] |
None
| true
| 3
| 6.88
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
removeFrom
|
static String removeFrom(String value) {
Matcher matcher = PATTERN.matcher(value);
return (matcher.matches()) ? matcher.group(1) : value;
}
|
Remove any hint from the given value.
@param value the source value
@return the value without any hint
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/config/FileExtensionHint.java
| 79
|
[
"value"
] |
String
| true
| 2
| 8.16
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
removeIf
|
@CanIgnoreReturnValue
public static <T extends @Nullable Object> boolean removeIf(
Iterable<T> removeFrom, Predicate<? super T> predicate) {
if (removeFrom instanceof RandomAccess && removeFrom instanceof List) {
return removeIfFromRandomAccessList((List<T>) removeFrom, checkNotNull(predicate));
}
return Iterators.removeIf(removeFrom.iterator(), predicate);
}
|
Removes, from an iterable, every element that satisfies the provided predicate.
<p>Removals may or may not happen immediately as each element is tested against the predicate.
The behavior of this method is not specified if {@code predicate} is dependent on {@code
removeFrom}.
<p><b>Java 8+ users:</b> if {@code removeFrom} is a {@link Collection}, use {@code
removeFrom.removeIf(predicate)} instead.
@param removeFrom the iterable to (potentially) remove elements from
@param predicate a predicate that determines whether an element should be removed
@return {@code true} if any elements were removed from the iterable
@throws UnsupportedOperationException if the iterable does not support {@code remove()}.
@since 2.0
|
java
|
android/guava/src/com/google/common/collect/Iterables.java
| 187
|
[
"removeFrom",
"predicate"
] | true
| 3
| 7.44
|
google/guava
| 51,352
|
javadoc
| false
|
|
isAllLowerCase
|
public static boolean isAllLowerCase(final CharSequence cs) {
if (isEmpty(cs)) {
return false;
}
final int sz = cs.length();
for (int i = 0; i < sz; i++) {
if (!Character.isLowerCase(cs.charAt(i))) {
return false;
}
}
return true;
}
|
Tests if the CharSequence contains only lowercase characters.
<p>
{@code null} will return {@code false}. An empty CharSequence (length()=0) will return {@code false}.
</p>
<pre>
StringUtils.isAllLowerCase(null) = false
StringUtils.isAllLowerCase("") = false
StringUtils.isAllLowerCase(" ") = false
StringUtils.isAllLowerCase("abc") = true
StringUtils.isAllLowerCase("abC") = false
StringUtils.isAllLowerCase("ab c") = false
StringUtils.isAllLowerCase("ab1c") = false
StringUtils.isAllLowerCase("ab/c") = false
</pre>
@param cs the CharSequence to check, may be null.
@return {@code true} if only contains lowercase characters, and is non-null.
@since 2.5
@since 3.0 Changed signature from isAllLowerCase(String) to isAllLowerCase(CharSequence)
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 3,195
|
[
"cs"
] | true
| 4
| 7.44
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
onFailure
|
private void onFailure(final Throwable exception, final long responseTimeMs) {
heartbeatRequestState.onFailedAttempt(responseTimeMs);
heartbeatState.reset();
if (exception instanceof RetriableException) {
coordinatorRequestManager.handleCoordinatorDisconnect(exception, responseTimeMs);
String message = String.format("StreamsGroupHeartbeatRequest failed because of a retriable exception. Will retry in %s ms: %s",
heartbeatRequestState.remainingBackoffMs(responseTimeMs),
exception.getMessage());
logger.debug(message);
membershipManager.onRetriableHeartbeatFailure();
} else {
if (exception instanceof UnsupportedVersionException) {
logger.error("StreamsGroupHeartbeatRequest failed because of an unsupported version exception: {}",
exception.getMessage());
handleFatalFailure(new UnsupportedVersionException(UNSUPPORTED_VERSION_ERROR_MESSAGE));
} else {
logger.error("StreamsGroupHeartbeatRequest failed because of a fatal exception while sending request: {}",
exception.getMessage());
handleFatalFailure(exception);
}
membershipManager.onFatalHeartbeatFailure();
}
}
|
A heartbeat should be sent without waiting for the heartbeat interval to expire if:
- the member is leaving the group
or
- the member is joining the group or acknowledging the assignment and for both cases there is no heartbeat request
in flight.
@return true if a heartbeat should be sent before the interval expires, false otherwise
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/StreamsGroupHeartbeatRequestManager.java
| 663
|
[
"exception",
"responseTimeMs"
] |
void
| true
| 3
| 6.56
|
apache/kafka
| 31,560
|
javadoc
| false
|
close
|
public void close(final Runnable onInitialClose, final Runnable onSubsequentClose) {
if (isClosed.compareAndSet(false, true)) {
if (onInitialClose != null)
onInitialClose.run();
} else {
if (onSubsequentClose != null)
onSubsequentClose.run();
}
}
|
Closes the resource in a thread-safe manner.
<p/>
After the execution has completed, calls to {@link #isClosed()} will return {@code false} and calls to
{@link #assertOpen(String)} and {@link #assertOpen(Supplier)}
will throw an {@link IllegalStateException}.
@param onInitialClose Optional {@link Runnable} to execute when the resource is closed. Note that the
object will still be considered closed even if an exception is thrown during the course
of its execution; can be {@code null}
@param onSubsequentClose Optional {@link Runnable} to execute if this resource was previously closed. Note that
no state will be affected if an exception is thrown during its execution; can be
{@code null}
|
java
|
clients/src/main/java/org/apache/kafka/common/internals/IdempotentCloser.java
| 158
|
[
"onInitialClose",
"onSubsequentClose"
] |
void
| true
| 4
| 6.72
|
apache/kafka
| 31,560
|
javadoc
| false
|
validate_na_arg
|
def validate_na_arg(value, name: str):
"""
Validate na arguments.
Parameters
----------
value : object
Value to validate.
name : str
Name of the argument, used to raise an informative error message.
Raises
______
ValueError
When ``value`` is determined to be invalid.
"""
if (
value is lib.no_default
or isinstance(value, bool)
or value is None
or value is NA
or (lib.is_float(value) and np.isnan(value))
):
return
raise ValueError(f"{name} must be None, pd.NA, np.nan, True, or False; got {value}")
|
Validate na arguments.
Parameters
----------
value : object
Value to validate.
name : str
Name of the argument, used to raise an informative error message.
Raises
______
ValueError
When ``value`` is determined to be invalid.
|
python
|
pandas/util/_validators.py
| 273
|
[
"value",
"name"
] | true
| 7
| 6.72
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
disableWakeups
|
public void disableWakeups() {
pendingTask.set(new DisabledWakeups());
}
|
If there is no pending task, set the pending task active.
If wakeup was called before setting an active task, the current task will complete exceptionally with
WakeupException right away.
If there is an active task, throw exception.
@param currentTask
@param <T>
@return
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/WakeupTrigger.java
| 130
|
[] |
void
| true
| 1
| 6.8
|
apache/kafka
| 31,560
|
javadoc
| false
|
checkStrictModeLabeledStatement
|
function checkStrictModeLabeledStatement(node: LabeledStatement) {
// Grammar checking for labeledStatement
if (inStrictMode && getEmitScriptTarget(options) >= ScriptTarget.ES2015) {
if (isDeclarationStatement(node.statement) || isVariableStatement(node.statement)) {
errorOnFirstToken(node.label, Diagnostics.A_label_is_not_allowed_here);
}
}
}
|
Declares a Symbol for the node and adds it to symbols. Reports errors for conflicting identifier names.
@param symbolTable - The symbol table which node will be added to.
@param parent - node's parent declaration.
@param node - The declaration to be added to the symbol table
@param includes - The SymbolFlags that node has in addition to its declaration type (eg: export, ambient, etc.)
@param excludes - The flags which node cannot be declared alongside in a symbol table. Used to report forbidden declarations.
|
typescript
|
src/compiler/binder.ts
| 2,736
|
[
"node"
] | false
| 5
| 6.08
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
decode
|
public static String decode(String source, Charset charset) {
int length = source.length();
int firstPercentIndex = source.indexOf('%');
if (length == 0 || firstPercentIndex < 0) {
return source;
}
StringBuilder output = new StringBuilder(length);
output.append(source, 0, firstPercentIndex);
byte[] bytes = null;
int i = firstPercentIndex;
while (i < length) {
char ch = source.charAt(i);
if (ch == '%') {
try {
if (bytes == null) {
bytes = new byte[(length - i) / 3];
}
int pos = 0;
while (i + 2 < length && ch == '%') {
bytes[pos++] = (byte) HexFormat.fromHexDigits(source, i + 1, i + 3);
i += 3;
if (i < length) {
ch = source.charAt(i);
}
}
if (i < length && ch == '%') {
throw new IllegalArgumentException("Incomplete trailing escape (%) pattern");
}
output.append(new String(bytes, 0, pos, charset));
}
catch (NumberFormatException ex) {
throw new IllegalArgumentException("Invalid encoded sequence \"" + source.substring(i) + "\"");
}
}
else {
output.append(ch);
i++;
}
}
return output.toString();
}
|
Decode the given encoded URI component value by replacing each "<i>{@code %xy}</i>"
sequence with a hexadecimal representation of the character in the specified
character encoding, leaving other characters unmodified.
@param source the encoded URI component value
@param charset the character encoding to use to decode the "<i>{@code %xy}</i>"
sequences
@return the decoded value
@since 4.0.0
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/net/util/UrlDecoder.java
| 57
|
[
"source",
"charset"
] |
String
| true
| 12
| 7.76
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
isNameOfDeclarationWithCollidingName
|
function isNameOfDeclarationWithCollidingName(node: Identifier) {
switch (node.parent.kind) {
case SyntaxKind.BindingElement:
case SyntaxKind.ClassDeclaration:
case SyntaxKind.EnumDeclaration:
case SyntaxKind.VariableDeclaration:
return (node.parent as NamedDeclaration).name === node
&& resolver.isDeclarationWithCollidingName(node.parent as Declaration);
}
return false;
}
|
Determines whether a name is the name of a declaration with a colliding name.
NOTE: This function expects to be called with an original source tree node.
@param node An original source tree node.
|
typescript
|
src/compiler/transformers/es2015.ts
| 4,942
|
[
"node"
] | false
| 2
| 6.08
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
get_memory_usage
|
def get_memory_usage():
"""
Get current memory usage in MB. This includes all child processes.
Returns:
Total memory usage in MB
"""
process = psutil.Process()
main_memory = process.memory_full_info().pss
# Add memory usage of all child processes
for child in process.children(recursive=True):
try:
child_mem = child.memory_full_info().pss
main_memory += child_mem
except (psutil.NoSuchProcess, psutil.AccessDenied, AttributeError):
# Process might have terminated or doesn't support PSS, fall back to USS
print(f"Failed to get PSS for {child}, falling back to USS")
child_mem = child.memory_info().uss
main_memory += child_mem
return main_memory / (1024 * 1024)
|
Get current memory usage in MB. This includes all child processes.
Returns:
Total memory usage in MB
|
python
|
benchmarks/data/dataloader_benchmark.py
| 31
|
[] | false
| 2
| 7.12
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
|
toTopicNamePartitionSet
|
protected SortedSet<TopicPartition> toTopicNamePartitionSet() {
SortedSet<TopicPartition> result = new TreeSet<>(TOPIC_PARTITION_COMPARATOR);
topicIdPartitions.forEach(topicIdPartition -> result.add(topicIdPartition.topicPartition()));
return result;
}
|
@return Set of topic partitions (with topic name and partition number)
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/TopicIdPartitionSet.java
| 113
|
[] | true
| 1
| 6.4
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
max
|
def max(
self,
numeric_only: bool = False,
min_count: int = 0,
):
"""
Compute max value of group.
Parameters
----------
numeric_only : bool, default False
Include only float, int, boolean columns.
.. versionchanged:: 2.0.0
numeric_only no longer accepts ``None``.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer
than ``min_count`` non-NA values are present the result will be NA.
Returns
-------
Series or DataFrame
Computes the maximum value in the given Series or Dataframe.
See Also
--------
core.resample.Resampler.min : Compute min value of group.
core.resample.Resampler.mean : Compute mean of groups, excluding missing values.
core.resample.Resampler.median : Compute median of groups, excluding missing
values.
Examples
--------
>>> ser = pd.Series(
... [1, 2, 3, 4],
... index=pd.DatetimeIndex(
... ["2023-01-01", "2023-01-15", "2023-02-01", "2023-02-15"]
... ),
... )
>>> ser
2023-01-01 1
2023-01-15 2
2023-02-01 3
2023-02-15 4
dtype: int64
>>> ser.resample("MS").max()
2023-01-01 2
2023-02-01 4
Freq: MS, dtype: int64
"""
return self._downsample("max", numeric_only=numeric_only, min_count=min_count)
|
Compute max value of group.
Parameters
----------
numeric_only : bool, default False
Include only float, int, boolean columns.
.. versionchanged:: 2.0.0
numeric_only no longer accepts ``None``.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer
than ``min_count`` non-NA values are present the result will be NA.
Returns
-------
Series or DataFrame
Computes the maximum value in the given Series or Dataframe.
See Also
--------
core.resample.Resampler.min : Compute min value of group.
core.resample.Resampler.mean : Compute mean of groups, excluding missing values.
core.resample.Resampler.median : Compute median of groups, excluding missing
values.
Examples
--------
>>> ser = pd.Series(
... [1, 2, 3, 4],
... index=pd.DatetimeIndex(
... ["2023-01-01", "2023-01-15", "2023-02-01", "2023-02-15"]
... ),
... )
>>> ser
2023-01-01 1
2023-01-15 2
2023-02-01 3
2023-02-15 4
dtype: int64
>>> ser.resample("MS").max()
2023-01-01 2
2023-02-01 4
Freq: MS, dtype: int64
|
python
|
pandas/core/resample.py
| 1,268
|
[
"self",
"numeric_only",
"min_count"
] | true
| 1
| 6.8
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
autowireResource
|
protected Object autowireResource(BeanFactory factory, LookupElement element, @Nullable String requestingBeanName)
throws NoSuchBeanDefinitionException {
Object resource;
Set<String> autowiredBeanNames;
String name = element.name;
if (factory instanceof AutowireCapableBeanFactory autowireCapableBeanFactory) {
if (this.fallbackToDefaultTypeMatch && element.isDefaultName && !factory.containsBean(name)) {
autowiredBeanNames = new LinkedHashSet<>();
resource = autowireCapableBeanFactory.resolveDependency(
element.getDependencyDescriptor(), requestingBeanName, autowiredBeanNames, null);
if (resource == null) {
throw new NoSuchBeanDefinitionException(element.getLookupType(), "No resolvable resource object");
}
}
else {
resource = autowireCapableBeanFactory.resolveBeanByName(name, element.getDependencyDescriptor());
autowiredBeanNames = Collections.singleton(name);
}
}
else {
resource = factory.getBean(name, element.lookupType);
autowiredBeanNames = Collections.singleton(name);
}
if (factory instanceof ConfigurableBeanFactory configurableBeanFactory) {
for (String autowiredBeanName : autowiredBeanNames) {
if (requestingBeanName != null && configurableBeanFactory.containsBean(autowiredBeanName)) {
configurableBeanFactory.registerDependentBean(autowiredBeanName, requestingBeanName);
}
}
}
return resource;
}
|
Obtain a resource object for the given name and type through autowiring
based on the given factory.
@param factory the factory to autowire against
@param element the descriptor for the annotated field/method
@param requestingBeanName the name of the requesting bean
@return the resource object (never {@code null})
@throws NoSuchBeanDefinitionException if no corresponding target resource found
|
java
|
spring-context/src/main/java/org/springframework/context/annotation/CommonAnnotationBeanPostProcessor.java
| 540
|
[
"factory",
"element",
"requestingBeanName"
] |
Object
| true
| 9
| 7.28
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
fill
|
public static <T, E extends Throwable> T[] fill(final T[] array, final FailableIntFunction<? extends T, E> generator) throws E {
if (array != null && generator != null) {
for (int i = 0; i < array.length; i++) {
array[i] = generator.apply(i);
}
}
return array;
}
|
Fills and returns the given array, using the provided generator supplier to compute each element. Like
{@link Arrays#setAll(Object[], IntFunction)} with exception support.
<p>
If the generator supplier throws an exception, it is relayed to the caller and the array is left in an indeterminate
state.
</p>
@param <T> type of elements of the array.
@param array array to be initialized.
@param generator a function accepting an index and producing the desired value for that position.
@return the input array
@param <E> The kind of thrown exception or error.
@throws E Thrown by the given {@code generator}.
@see Arrays#setAll(Object[], IntFunction)
@since 3.18.0
|
java
|
src/main/java/org/apache/commons/lang3/ArrayFill.java
| 170
|
[
"array",
"generator"
] | true
| 4
| 7.92
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
check_training_config
|
def check_training_config(self, training_config: dict) -> None:
"""
Check if a training configuration is valid.
:param training_config: training_config
"""
if "InputDataConfig" in training_config:
for channel in training_config["InputDataConfig"]:
if "S3DataSource" in channel["DataSource"]:
self.check_s3_url(channel["DataSource"]["S3DataSource"]["S3Uri"])
|
Check if a training configuration is valid.
:param training_config: training_config
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/sagemaker.py
| 231
|
[
"self",
"training_config"
] |
None
| true
| 4
| 6.4
|
apache/airflow
| 43,597
|
sphinx
| false
|
get_authorized_pools
|
def get_authorized_pools(
self,
*,
user: T,
method: ResourceMethod = "GET",
session: Session = NEW_SESSION,
) -> set[str]:
"""
Get pools the user has access to.
:param user: the user
:param method: the method to filter on
:param session: the session
"""
stmt = select(Pool.pool, Pool.team_name)
rows = session.execute(stmt).all()
pools_by_team: dict[str | None, set[str]] = defaultdict(set)
for pool_name, team_name in rows:
pools_by_team[team_name].add(pool_name)
pool_names: set[str] = set()
for team_name, team_pool_names in pools_by_team.items():
pool_names.update(
self.filter_authorized_pools(
pool_names=team_pool_names, user=user, method=method, team_name=team_name
)
)
return pool_names
|
Get pools the user has access to.
:param user: the user
:param method: the method to filter on
:param session: the session
|
python
|
airflow-core/src/airflow/api_fastapi/auth/managers/base_auth_manager.py
| 580
|
[
"self",
"user",
"method",
"session"
] |
set[str]
| true
| 3
| 7.04
|
apache/airflow
| 43,597
|
sphinx
| false
|
getCovariantTypeResolver
|
private TypeResolver getCovariantTypeResolver() {
TypeResolver resolver = covariantTypeResolver;
if (resolver == null) {
resolver = (covariantTypeResolver = TypeResolver.covariantly(runtimeType));
}
return resolver;
}
|
Returns the type token representing the generic type declaration of {@code cls}. For example:
{@code TypeToken.getGenericType(Iterable.class)} returns {@code Iterable<T>}.
<p>If {@code cls} isn't parameterized and isn't a generic array, the type token of the class is
returned.
|
java
|
android/guava/src/com/google/common/reflect/TypeToken.java
| 1,194
|
[] |
TypeResolver
| true
| 2
| 6.24
|
google/guava
| 51,352
|
javadoc
| false
|
on
|
public static Splitter on(CharMatcher separatorMatcher) {
checkNotNull(separatorMatcher);
return new Splitter(
(splitter, toSplit) ->
new SplittingIterator(splitter, toSplit) {
@Override
int separatorStart(int start) {
return separatorMatcher.indexIn(toSplit, start);
}
@Override
int separatorEnd(int separatorPosition) {
return separatorPosition + 1;
}
});
}
|
Returns a splitter that considers any single character matched by the given {@code CharMatcher}
to be a separator. For example, {@code
Splitter.on(CharMatcher.anyOf(";,")).split("foo,;bar,quux")} returns an iterable containing
{@code ["foo", "", "bar", "quux"]}.
@param separatorMatcher a {@link CharMatcher} that determines whether a character is a
separator
@return a splitter, with default settings, that uses this matcher
|
java
|
android/guava/src/com/google/common/base/Splitter.java
| 140
|
[
"separatorMatcher"
] |
Splitter
| true
| 1
| 6.24
|
google/guava
| 51,352
|
javadoc
| false
|
add_dummy_feature
|
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : {ndarray, sparse matrix} of shape (n_samples, n_features + 1)
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[1., 0., 1.],
[1., 1., 0.]])
"""
X = check_array(X, accept_sparse=["csc", "csr", "coo"], dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if X.format == "coo":
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.full(n_samples, value), X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif X.format == "csc":
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.full(n_samples, value), X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.full((n_samples, 1), value), X))
|
Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : {ndarray, sparse matrix} of shape (n_samples, n_features + 1)
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[1., 0., 1.],
[1., 1., 0.]])
|
python
|
sklearn/preprocessing/_data.py
| 2,593
|
[
"X",
"value"
] | false
| 6
| 7.68
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
postProcessBeanFactory
|
@Override
public void postProcessBeanFactory(ConfigurableListableBeanFactory beanFactory) throws BeansException {
if (this.scopes != null) {
this.scopes.forEach((scopeKey, value) -> {
if (value instanceof Scope scope) {
beanFactory.registerScope(scopeKey, scope);
}
else if (value instanceof Class<?> scopeClass) {
Assert.isAssignable(Scope.class, scopeClass, "Invalid scope class");
beanFactory.registerScope(scopeKey, (Scope) BeanUtils.instantiateClass(scopeClass));
}
else if (value instanceof String scopeClassName) {
Class<?> scopeClass = ClassUtils.resolveClassName(scopeClassName, this.beanClassLoader);
Assert.isAssignable(Scope.class, scopeClass, "Invalid scope class");
beanFactory.registerScope(scopeKey, (Scope) BeanUtils.instantiateClass(scopeClass));
}
else {
throw new IllegalArgumentException("Mapped value [" + value + "] for scope key [" +
scopeKey + "] is not an instance of required type [" + Scope.class.getName() +
"] or a corresponding Class or String value indicating a Scope implementation");
}
});
}
}
|
Add the given scope to this configurer's map of scopes.
@param scopeName the name of the scope
@param scope the scope implementation
@since 4.1.1
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/config/CustomScopeConfigurer.java
| 96
|
[
"beanFactory"
] |
void
| true
| 5
| 6.88
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
timeoutCallsInFlight
|
private void timeoutCallsInFlight(TimeoutProcessor processor) {
int numTimedOut = 0;
for (Map.Entry<String, Call> entry : callsInFlight.entrySet()) {
Call call = entry.getValue();
String nodeId = entry.getKey();
if (processor.callHasExpired(call)) {
log.info("Disconnecting from {} due to timeout while awaiting {}", nodeId, call);
client.disconnect(nodeId);
numTimedOut++;
// We don't remove anything from the callsInFlight data structure. Because the connection
// has been closed, the calls should be returned by the next client#poll(),
// and handled at that point.
}
}
if (numTimedOut > 0)
log.debug("Timed out {} call(s) in flight.", numTimedOut);
}
|
Time out expired calls that are in flight.
<p>
Calls that are in flight may have been partially or completely sent over the wire. They may
even be in the process of being processed by the remote server. At the moment, our only option
to time them out is to close the entire connection.
@param processor The timeout processor.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java
| 1,320
|
[
"processor"
] |
void
| true
| 3
| 7.2
|
apache/kafka
| 31,560
|
javadoc
| false
|
_clean_stack_name
|
def _clean_stack_name(stack_name: str) -> str:
"""
Clean up FX node's nn_module_stack metadata string to match the module name hierarchies
Example:
Input: "L['self']._modules['layers']['0']._modules['attention']"
Output: "layers.0.attention"
"""
cleaned = re.sub(r"^L\['self'\]\.?", "", stack_name)
parts = re.findall(r"\['([^']+)'\]", cleaned)
return ".".join(parts) if parts else cleaned
|
Clean up FX node's nn_module_stack metadata string to match the module name hierarchies
Example:
Input: "L['self']._modules['layers']['0']._modules['attention']"
Output: "layers.0.attention"
|
python
|
torch/_inductor/fx_passes/graph_view.py
| 95
|
[
"stack_name"
] |
str
| true
| 2
| 7.36
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
get_option
|
def get_option(pat: str) -> Any:
"""
Retrieve the value of the specified option.
This method allows users to query the current value of a given option
in the pandas configuration system. Options control various display,
performance, and behavior-related settings within pandas.
Parameters
----------
pat : str
Regexp which should match a single option.
.. warning::
Partial matches are supported for convenience, but unless you use the
full option name (e.g. x.y.z.option_name), your code may break in future
versions if new options with similar names are introduced.
Returns
-------
Any
The value of the option.
Raises
------
OptionError : if no such option exists
See Also
--------
set_option : Set the value of the specified option or options.
reset_option : Reset one or more options to their default value.
describe_option : Print the description for one or more registered options.
Notes
-----
For all available options, please view the :ref:`User Guide <options.available>`
or use ``pandas.describe_option()``.
Examples
--------
>>> pd.get_option("display.max_columns") # doctest: +SKIP
4
"""
key = _get_single_key(pat)
# walk the nested dict
root, k = _get_root(key)
return root[k]
|
Retrieve the value of the specified option.
This method allows users to query the current value of a given option
in the pandas configuration system. Options control various display,
performance, and behavior-related settings within pandas.
Parameters
----------
pat : str
Regexp which should match a single option.
.. warning::
Partial matches are supported for convenience, but unless you use the
full option name (e.g. x.y.z.option_name), your code may break in future
versions if new options with similar names are introduced.
Returns
-------
Any
The value of the option.
Raises
------
OptionError : if no such option exists
See Also
--------
set_option : Set the value of the specified option or options.
reset_option : Reset one or more options to their default value.
describe_option : Print the description for one or more registered options.
Notes
-----
For all available options, please view the :ref:`User Guide <options.available>`
or use ``pandas.describe_option()``.
Examples
--------
>>> pd.get_option("display.max_columns") # doctest: +SKIP
4
|
python
|
pandas/_config/config.py
| 143
|
[
"pat"
] |
Any
| true
| 1
| 7.28
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.