function_name
stringlengths 1
57
| function_code
stringlengths 20
4.99k
| documentation
stringlengths 50
2k
| language
stringclasses 5
values | file_path
stringlengths 8
166
| line_number
int32 4
16.7k
| parameters
listlengths 0
20
| return_type
stringlengths 0
131
| has_type_hints
bool 2
classes | complexity
int32 1
51
| quality_score
float32 6
9.68
| repo_name
stringclasses 34
values | repo_stars
int32 2.9k
242k
| docstring_style
stringclasses 7
values | is_async
bool 2
classes |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
patch_trigger_dag_run_post_body
|
def patch_trigger_dag_run_post_body():
"""
Post-process the generated `TriggerDAGRunPostBody` model to explicitly include `"logical_date": None`
in the `to_dict()` output if it was not set.
Why this is needed:
- The Airflow API server expects the `logical_date` field to always be present in the request payload,
even if its value is `null`.
- By default, the OpenAPI-generated Pydantic model uses `model_dump(exclude_none=True)`, which omits
any fields set to `None`, including `logical_date`.
- This causes a 422 error from the server when the field is missing, despite it being marked `nullable`.
Since we cannot fix this cleanly via OpenAPI spec due to OpenAPI Generator's limitations with
`nullable` and `anyOf`, we insert an explicit fallback into `to_dict()` after client codegen.
This patch:
- Locates the `_dict = self.model_dump(...)` line in `to_dict()`
- Inserts a conditional to add `"logical_date": None` if it's missing
"""
current_python_version = f"{sys.version_info.major}.{sys.version_info.minor}"
if current_python_version != DEFAULT_PYTHON_MAJOR_MINOR_VERSION:
get_console().print(
f"[error]Python version mismatch: current version is {current_python_version}, "
f"but default version is {DEFAULT_PYTHON_MAJOR_MINOR_VERSION} - this might cause "
f"reproducibility problems with prepared package.[/]"
)
get_console().print(
f"[info]Please reinstall breeze with uv using Python {DEFAULT_PYTHON_MAJOR_MINOR_VERSION}:[/]"
)
get_console().print(
f"\nuv tool install --python {DEFAULT_PYTHON_MAJOR_MINOR_VERSION} -e ./dev/breeze --force\n"
)
sys.exit(1)
TRIGGER_MODEL_PATH = PYTHON_CLIENT_TMP_DIR / Path(
"airflow_client/client/models/trigger_dag_run_post_body.py"
)
class LogicalDateDictPatch(ast.NodeTransformer):
def visit_FunctionDef(self, node: ast.FunctionDef) -> ast.FunctionDef:
if node.name != "to_dict":
return node
# Inject this:
injected = ast.parse('if "logical_date" not in _dict:\n _dict["logical_date"] = None').body
for idx, stmt in enumerate(node.body):
if (
isinstance(stmt, ast.Assign)
and isinstance(stmt.targets[0], ast.Name)
and stmt.targets[0].id == "_dict"
and isinstance(stmt.value, ast.Call)
and isinstance(stmt.value.func, ast.Attribute)
and stmt.value.func.attr == "model_dump"
):
node.body.insert(idx + 1, *injected)
break
return node
source = TRIGGER_MODEL_PATH.read_text(encoding="utf-8")
tree = ast.parse(source)
LogicalDateDictPatch().visit(tree)
ast.fix_missing_locations(tree)
TRIGGER_MODEL_PATH.write_text(ast.unparse(tree), encoding="utf-8")
|
Post-process the generated `TriggerDAGRunPostBody` model to explicitly include `"logical_date": None`
in the `to_dict()` output if it was not set.
Why this is needed:
- The Airflow API server expects the `logical_date` field to always be present in the request payload,
even if its value is `null`.
- By default, the OpenAPI-generated Pydantic model uses `model_dump(exclude_none=True)`, which omits
any fields set to `None`, including `logical_date`.
- This causes a 422 error from the server when the field is missing, despite it being marked `nullable`.
Since we cannot fix this cleanly via OpenAPI spec due to OpenAPI Generator's limitations with
`nullable` and `anyOf`, we insert an explicit fallback into `to_dict()` after client codegen.
This patch:
- Locates the `_dict = self.model_dump(...)` line in `to_dict()`
- Inserts a conditional to add `"logical_date": None` if it's missing
|
python
|
dev/breeze/src/airflow_breeze/commands/release_management_commands.py
| 3,594
|
[] | false
| 10
| 6
|
apache/airflow
| 43,597
|
unknown
| false
|
|
toIntegerObject
|
public static Integer toIntegerObject(final Boolean bool) {
if (bool == null) {
return null;
}
return bool.booleanValue() ? NumberUtils.INTEGER_ONE : NumberUtils.INTEGER_ZERO;
}
|
Converts a Boolean to an Integer using the convention that
{@code zero} is {@code false}.
<p>{@code null} will be converted to {@code null}.</p>
<pre>
BooleanUtils.toIntegerObject(Boolean.TRUE) = Integer.valueOf(1)
BooleanUtils.toIntegerObject(Boolean.FALSE) = Integer.valueOf(0)
</pre>
@param bool the Boolean to convert
@return one if Boolean.TRUE, zero if Boolean.FALSE, {@code null} if {@code null}
|
java
|
src/main/java/org/apache/commons/lang3/BooleanUtils.java
| 976
|
[
"bool"
] |
Integer
| true
| 3
| 7.6
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
forFactoryMethod
|
public static <T> BeanInstanceSupplier<T> forFactoryMethod(
Class<?> declaringClass, String methodName, Class<?>... parameterTypes) {
Assert.notNull(declaringClass, "'declaringClass' must not be null");
Assert.hasText(methodName, "'methodName' must not be empty");
Assert.notNull(parameterTypes, "'parameterTypes' must not be null");
Assert.noNullElements(parameterTypes, "'parameterTypes' must not contain null elements");
return new BeanInstanceSupplier<>(
new FactoryMethodLookup(declaringClass, methodName, parameterTypes),
null, null, null);
}
|
Create a new {@link BeanInstanceSupplier} that
resolves arguments for the specified factory method.
@param <T> the type of instance supplied
@param declaringClass the class that declares the factory method
@param methodName the factory method name
@param parameterTypes the factory method parameter types
@return a new {@link BeanInstanceSupplier} instance
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/aot/BeanInstanceSupplier.java
| 131
|
[
"declaringClass",
"methodName"
] | true
| 1
| 6.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
standardPeek
|
protected @Nullable E standardPeek() {
try {
return element();
} catch (NoSuchElementException caught) {
return null;
}
}
|
A sensible definition of {@link #peek} in terms of {@link #element}. If you override {@link
#element}, you may wish to override {@link #peek} to forward to this implementation.
@since 7.0
|
java
|
android/guava/src/com/google/common/collect/ForwardingQueue.java
| 107
|
[] |
E
| true
| 2
| 7.04
|
google/guava
| 51,352
|
javadoc
| false
|
create
|
static MethodDescriptor create(String beanName, Class<?> beanClass, String methodName) {
try {
Class<?> declaringClass = beanClass;
String methodNameToUse = methodName;
// Parse fully-qualified method name if necessary.
int indexOfDot = methodName.lastIndexOf('.');
if (indexOfDot > 0) {
String className = methodName.substring(0, indexOfDot);
methodNameToUse = methodName.substring(indexOfDot + 1);
if (!beanClass.getName().equals(className)) {
declaringClass = ClassUtils.forName(className, beanClass.getClassLoader());
}
}
return new MethodDescriptor(declaringClass, methodNameToUse);
}
catch (Exception | LinkageError ex) {
throw new BeanDefinitionValidationException(
"Could not create MethodDescriptor for method '%s' on bean with name '%s': %s"
.formatted(methodName, beanName, ex.getMessage()));
}
}
|
Create a {@link MethodDescriptor} for the supplied bean class and method name.
<p>The supplied {@code methodName} may be a {@linkplain Method#getName()
simple method name} or a {@linkplain ClassUtils#getQualifiedMethodName(Method)
qualified method name}.
<p>If the method name is fully qualified, this utility will parse the
method name and its declaring class from the qualified method name and then
attempt to load the method's declaring class using the {@link ClassLoader}
of the supplied {@code beanClass}. Otherwise, the returned descriptor will
reference the supplied {@code beanClass} and {@code methodName}.
@param beanName the bean name in the factory (for debugging purposes)
@param beanClass the bean class
@param methodName the name of the method
@return a new {@code MethodDescriptor}; never {@code null}
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/MethodDescriptor.java
| 51
|
[
"beanName",
"beanClass",
"methodName"
] |
MethodDescriptor
| true
| 4
| 7.76
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
customizeBeanRegistrationCodeFragments
|
default BeanRegistrationCodeFragments customizeBeanRegistrationCodeFragments(
GenerationContext generationContext, BeanRegistrationCodeFragments codeFragments) {
return codeFragments;
}
|
Customize the {@link BeanRegistrationCodeFragments} that will be used to
generate the bean registration code. Custom code fragments can be used if
default code generation isn't suitable.
@param generationContext the generation context
@param codeFragments the existing code fragments
@return the code fragments to use, may be the original instance or a wrapper
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/aot/BeanRegistrationAotContribution.java
| 46
|
[
"generationContext",
"codeFragments"
] |
BeanRegistrationCodeFragments
| true
| 1
| 6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
resolveEntity
|
@Override
public @Nullable InputSource resolveEntity(@Nullable String publicId, @Nullable String systemId)
throws SAXException, IOException {
if (systemId != null) {
if (systemId.endsWith(DTD_SUFFIX)) {
return this.dtdResolver.resolveEntity(publicId, systemId);
}
else if (systemId.endsWith(XSD_SUFFIX)) {
return this.schemaResolver.resolveEntity(publicId, systemId);
}
}
// Fall back to the parser's default behavior.
return null;
}
|
Create a new DelegatingEntityResolver that delegates to
the given {@link EntityResolver EntityResolvers}.
@param dtdResolver the EntityResolver to resolve DTDs with
@param schemaResolver the EntityResolver to resolve XML schemas with
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/xml/DelegatingEntityResolver.java
| 80
|
[
"publicId",
"systemId"
] |
InputSource
| true
| 4
| 6.24
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
toString
|
public static String toString(long x, int radix) {
checkArgument(
radix >= Character.MIN_RADIX && radix <= Character.MAX_RADIX,
"radix (%s) must be between Character.MIN_RADIX and Character.MAX_RADIX",
radix);
if (x == 0) {
// Simply return "0"
return "0";
} else if (x > 0) {
return Long.toString(x, radix);
} else {
char[] buf = new char[64];
int i = buf.length;
if ((radix & (radix - 1)) == 0) {
// Radix is a power of two so we can avoid division.
int shift = Integer.numberOfTrailingZeros(radix);
int mask = radix - 1;
do {
buf[--i] = Character.forDigit(((int) x) & mask, radix);
x >>>= shift;
} while (x != 0);
} else {
// Separate off the last digit using unsigned division. That will leave
// a number that is nonnegative as a signed integer.
long quotient;
if ((radix & 1) == 0) {
// Fast path for the usual case where the radix is even.
quotient = (x >>> 1) / (radix >>> 1);
} else {
quotient = divide(x, radix);
}
long rem = x - quotient * radix;
buf[--i] = Character.forDigit((int) rem, radix);
x = quotient;
// Simple modulo/division approach
while (x > 0) {
buf[--i] = Character.forDigit((int) (x % radix), radix);
x /= radix;
}
}
// Generate string
return new String(buf, i, buf.length - i);
}
}
|
Returns a string representation of {@code x} for the given radix, where {@code x} is treated as
unsigned.
<p><b>Java 8+ users:</b> use {@link Long#toUnsignedString(long, int)} instead.
@param x the value to convert to a string.
@param radix the radix to use while working with {@code x}
@throws IllegalArgumentException if {@code radix} is not between {@link Character#MIN_RADIX}
and {@link Character#MAX_RADIX}.
|
java
|
android/guava/src/com/google/common/primitives/UnsignedLongs.java
| 459
|
[
"x",
"radix"
] |
String
| true
| 7
| 6.72
|
google/guava
| 51,352
|
javadoc
| false
|
getTargetShadowMatch
|
private ShadowMatch getTargetShadowMatch(Method method, Class<?> targetClass) {
Method targetMethod = AopUtils.getMostSpecificMethod(method, targetClass);
if (targetMethod.getDeclaringClass().isInterface() && targetMethod.getDeclaringClass() != targetClass &&
obtainPointcutExpression().getPointcutExpression().contains("." + targetMethod.getName() + "(")) {
// Try to build the most specific interface possible for inherited methods to be
// considered for sub-interface matches as well, in particular for proxy classes.
// Note: AspectJ is only going to take Method.getDeclaringClass() into account.
Set<Class<?>> ifcs = ClassUtils.getAllInterfacesForClassAsSet(targetClass);
if (ifcs.size() > 1) {
try {
Class<?> compositeInterface = ClassUtils.createCompositeInterface(
ClassUtils.toClassArray(ifcs), targetClass.getClassLoader());
targetMethod = ClassUtils.getMostSpecificMethod(targetMethod, compositeInterface);
}
catch (IllegalArgumentException ex) {
// Implemented interfaces probably expose conflicting method signatures...
// Proceed with original target method.
}
}
}
return getShadowMatch(targetMethod, method);
}
|
Get a new pointcut expression based on a target class's loader rather than the default.
|
java
|
spring-aop/src/main/java/org/springframework/aop/aspectj/AspectJExpressionPointcut.java
| 442
|
[
"method",
"targetClass"
] |
ShadowMatch
| true
| 6
| 6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
supportsEventType
|
@Override
public boolean supportsEventType(Class<? extends ApplicationEvent> eventType) {
return ApplicationEnvironmentPreparedEvent.class.isAssignableFrom(eventType)
|| ApplicationPreparedEvent.class.isAssignableFrom(eventType)
|| ApplicationFailedEvent.class.isAssignableFrom(eventType);
}
|
Factory method that creates an {@link EnvironmentPostProcessorApplicationListener}
with a specific {@link EnvironmentPostProcessorsFactory}.
@param postProcessorsFactory the environment post processor factory
@return an {@link EnvironmentPostProcessorApplicationListener} instance
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/support/EnvironmentPostProcessorApplicationListener.java
| 110
|
[
"eventType"
] | true
| 3
| 7.12
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
append
|
public StrBuilder append(final long value) {
return append(String.valueOf(value));
}
|
Appends a long value to the string builder using {@code String.valueOf}.
@param value the value to append
@return {@code this} instance.
|
java
|
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
| 547
|
[
"value"
] |
StrBuilder
| true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
getThrowStatementOwner
|
function getThrowStatementOwner(throwStatement: ThrowStatement): Node | undefined {
let child: Node = throwStatement;
while (child.parent) {
const parent = child.parent;
if (isFunctionBlock(parent) || parent.kind === SyntaxKind.SourceFile) {
return parent;
}
// A throw-statement is only owned by a try-statement if the try-statement has
// a catch clause, and if the throw-statement occurs within the try block.
if (isTryStatement(parent) && parent.tryBlock === child && parent.catchClause) {
return child;
}
child = parent;
}
return undefined;
}
|
For lack of a better name, this function takes a throw statement and returns the
nearest ancestor that is a try-block (whose try statement has a catch clause),
function-block, or source file.
|
typescript
|
src/services/documentHighlights.ts
| 225
|
[
"throwStatement"
] | true
| 7
| 6
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
toObject
|
public static Integer[] toObject(final int[] array) {
if (array == null) {
return null;
}
if (array.length == 0) {
return EMPTY_INTEGER_OBJECT_ARRAY;
}
return setAll(new Integer[array.length], i -> Integer.valueOf(array[i]));
}
|
Converts an array of primitive ints to objects.
<p>This method returns {@code null} for a {@code null} input array.</p>
@param array an {@code int} array.
@return an {@link Integer} array, {@code null} if null array input.
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 8,762
|
[
"array"
] | true
| 3
| 8.24
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
equals
|
@Override
public boolean equals(final Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
final ArrayKey other = (ArrayKey) obj;
return Arrays.deepEquals(keys, other.keys);
}
|
Constructs an instance of {@link MultipartKey} to hold the specified objects.
@param keys the set of objects that make up the key. Each key may be null.
|
java
|
src/main/java/org/apache/commons/lang3/time/AbstractFormatCache.java
| 58
|
[
"obj"
] | true
| 4
| 6.72
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
append
|
public StrBuilder append(final char[] chars, final int startIndex, final int length) {
if (chars == null) {
return appendNull();
}
if (startIndex < 0 || startIndex > chars.length) {
throw new StringIndexOutOfBoundsException("Invalid startIndex: " + length);
}
if (length < 0 || startIndex + length > chars.length) {
throw new StringIndexOutOfBoundsException("Invalid length: " + length);
}
if (length > 0) {
final int len = length();
ensureCapacity(len + length);
System.arraycopy(chars, startIndex, buffer, len, length);
size += length;
}
return this;
}
|
Appends a char array to the string builder.
Appending null will call {@link #appendNull()}.
@param chars the char array to append
@param startIndex the start index, inclusive, must be valid
@param length the length to append, must be valid
@return {@code this} instance.
|
java
|
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
| 390
|
[
"chars",
"startIndex",
"length"
] |
StrBuilder
| true
| 7
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
hasIndexedElement
|
public boolean hasIndexedElement() {
for (int i = 0; i < getNumberOfElements(); i++) {
if (isIndexed(i)) {
return true;
}
}
return false;
}
|
Return {@code true} if any element in the name is indexed.
@return if the element has one or more indexed elements
@since 2.2.10
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/source/ConfigurationPropertyName.java
| 106
|
[] | true
| 3
| 8.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
create_rot_n
|
def create_rot_n(n: int) -> list[Instruction]:
"""
Returns a "simple" sequence of instructions that rotates TOS to the n-th
position in the stack. For Python < 3.11, returns a single ROT_*
instruction. If no such instruction exists, an error is raised and the
caller is expected to generate an equivalent sequence of instructions.
For Python >= 3.11, any rotation can be expressed as a simple sequence of
swaps.
"""
if n <= 1:
# don't rotate
return []
if sys.version_info >= (3, 11):
# rotate can be expressed as a sequence of swap operations
# e.g. rotate 3 is equivalent to swap 3, swap 2
return [create_instruction("SWAP", arg=i) for i in range(n, 1, -1)]
if n <= 4:
return [create_instruction("ROT_" + ["TWO", "THREE", "FOUR"][n - 2])]
return [create_instruction("ROT_N", arg=n)]
|
Returns a "simple" sequence of instructions that rotates TOS to the n-th
position in the stack. For Python < 3.11, returns a single ROT_*
instruction. If no such instruction exists, an error is raised and the
caller is expected to generate an equivalent sequence of instructions.
For Python >= 3.11, any rotation can be expressed as a simple sequence of
swaps.
|
python
|
torch/_dynamo/bytecode_transformation.py
| 233
|
[
"n"
] |
list[Instruction]
| true
| 4
| 6
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
addFilter
|
function addFilter(
query: string,
vectorSelectorPositions: VectorSelectorPosition[],
filter: QueryBuilderLabelFilter
): string {
let newQuery = '';
let prev = 0;
for (let i = 0; i < vectorSelectorPositions.length; i++) {
// This is basically just doing splice on a string for each matched vector selector.
const match = vectorSelectorPositions[i];
const isLast = i === vectorSelectorPositions.length - 1;
const start = query.substring(prev, match.from);
const end = isLast ? query.substring(match.to) : '';
const labelToMatch = labelExists(match.query.labels, filter);
if (labelToMatch) {
// if label exists, check the operator, if it is different, update it.
// We don't want to add duplicate labels.
if (labelToMatch.op !== filter.op) {
match.query.labels = match.query.labels.map((label) =>
label.label === filter.label && label.value === filter.value ? filter : label
);
}
} else {
// label does not exist, add as is.
match.query.labels.push(filter);
}
const newLabels = renderQuery(match.query);
newQuery += start + newLabels + end;
prev = match.to;
}
return newQuery;
}
|
Parse the string and get all VectorSelector positions in the query together with parsed representation of the vector
selector.
@param query
|
typescript
|
packages/grafana-prometheus/src/add_label_to_query.ts
| 65
|
[
"query",
"vectorSelectorPositions",
"filter"
] | true
| 8
| 6.56
|
grafana/grafana
| 71,362
|
jsdoc
| false
|
|
_vander_nd
|
def _vander_nd(vander_fs, points, degrees):
r"""
A generalization of the Vandermonde matrix for N dimensions
The result is built by combining the results of 1d Vandermonde matrices,
.. math::
W[i_0, \ldots, i_M, j_0, \ldots, j_N] = \prod_{k=0}^N{V_k(x_k)[i_0, \ldots, i_M, j_k]}
where
.. math::
N &= \texttt{len(points)} = \texttt{len(degrees)} = \texttt{len(vander\_fs)} \\
M &= \texttt{points[k].ndim} \\
V_k &= \texttt{vander\_fs[k]} \\
x_k &= \texttt{points[k]} \\
0 \le j_k &\le \texttt{degrees[k]}
Expanding the one-dimensional :math:`V_k` functions gives:
.. math::
W[i_0, \ldots, i_M, j_0, \ldots, j_N] = \prod_{k=0}^N{B_{k, j_k}(x_k[i_0, \ldots, i_M])}
where :math:`B_{k,m}` is the m'th basis of the polynomial construction used along
dimension :math:`k`. For a regular polynomial, :math:`B_{k, m}(x) = P_m(x) = x^m`.
Parameters
----------
vander_fs : Sequence[function(array_like, int) -> ndarray]
The 1d vander function to use for each axis, such as ``polyvander``
points : Sequence[array_like]
Arrays of point coordinates, all of the same shape. The dtypes
will be converted to either float64 or complex128 depending on
whether any of the elements are complex. Scalars are converted to
1-D arrays.
This must be the same length as `vander_fs`.
degrees : Sequence[int]
The maximum degree (inclusive) to use for each axis.
This must be the same length as `vander_fs`.
Returns
-------
vander_nd : ndarray
An array of shape ``points[0].shape + tuple(d + 1 for d in degrees)``.
""" # noqa: E501
n_dims = len(vander_fs)
if n_dims != len(points):
raise ValueError(
f"Expected {n_dims} dimensions of sample points, got {len(points)}")
if n_dims != len(degrees):
raise ValueError(
f"Expected {n_dims} dimensions of degrees, got {len(degrees)}")
if n_dims == 0:
raise ValueError("Unable to guess a dtype or shape when no points are given")
# convert to the same shape and type
points = tuple(np.asarray(tuple(points)) + 0.0)
# produce the vandermonde matrix for each dimension, placing the last
# axis of each in an independent trailing axis of the output
vander_arrays = (
vander_fs[i](points[i], degrees[i])[(...,) + _nth_slice(i, n_dims)]
for i in range(n_dims)
)
# we checked this wasn't empty already, so no `initial` needed
return functools.reduce(operator.mul, vander_arrays)
|
r"""
A generalization of the Vandermonde matrix for N dimensions
The result is built by combining the results of 1d Vandermonde matrices,
.. math::
W[i_0, \ldots, i_M, j_0, \ldots, j_N] = \prod_{k=0}^N{V_k(x_k)[i_0, \ldots, i_M, j_k]}
where
.. math::
N &= \texttt{len(points)} = \texttt{len(degrees)} = \texttt{len(vander\_fs)} \\
M &= \texttt{points[k].ndim} \\
V_k &= \texttt{vander\_fs[k]} \\
x_k &= \texttt{points[k]} \\
0 \le j_k &\le \texttt{degrees[k]}
Expanding the one-dimensional :math:`V_k` functions gives:
.. math::
W[i_0, \ldots, i_M, j_0, \ldots, j_N] = \prod_{k=0}^N{B_{k, j_k}(x_k[i_0, \ldots, i_M])}
where :math:`B_{k,m}` is the m'th basis of the polynomial construction used along
dimension :math:`k`. For a regular polynomial, :math:`B_{k, m}(x) = P_m(x) = x^m`.
Parameters
----------
vander_fs : Sequence[function(array_like, int) -> ndarray]
The 1d vander function to use for each axis, such as ``polyvander``
points : Sequence[array_like]
Arrays of point coordinates, all of the same shape. The dtypes
will be converted to either float64 or complex128 depending on
whether any of the elements are complex. Scalars are converted to
1-D arrays.
This must be the same length as `vander_fs`.
degrees : Sequence[int]
The maximum degree (inclusive) to use for each axis.
This must be the same length as `vander_fs`.
Returns
-------
vander_nd : ndarray
An array of shape ``points[0].shape + tuple(d + 1 for d in degrees)``.
|
python
|
numpy/polynomial/polyutils.py
| 364
|
[
"vander_fs",
"points",
"degrees"
] | false
| 4
| 6
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
replaceIgnoreCase
|
@Deprecated
public static String replaceIgnoreCase(final String text, final String searchString, final String replacement, final int max) {
return Strings.CI.replace(text, searchString, replacement, max);
}
|
Case insensitively replaces a String with another String inside a larger String, for the first {@code max} values of the search String.
<p>
A {@code null} reference passed to this method is a no-op.
</p>
<pre>
StringUtils.replaceIgnoreCase(null, *, *, *) = null
StringUtils.replaceIgnoreCase("", *, *, *) = ""
StringUtils.replaceIgnoreCase("any", null, *, *) = "any"
StringUtils.replaceIgnoreCase("any", *, null, *) = "any"
StringUtils.replaceIgnoreCase("any", "", *, *) = "any"
StringUtils.replaceIgnoreCase("any", *, *, 0) = "any"
StringUtils.replaceIgnoreCase("abaa", "a", null, -1) = "abaa"
StringUtils.replaceIgnoreCase("abaa", "a", "", -1) = "b"
StringUtils.replaceIgnoreCase("abaa", "a", "z", 0) = "abaa"
StringUtils.replaceIgnoreCase("abaa", "A", "z", 1) = "zbaa"
StringUtils.replaceIgnoreCase("abAa", "a", "z", 2) = "zbza"
StringUtils.replaceIgnoreCase("abAa", "a", "z", -1) = "zbzz"
</pre>
@param text text to search and replace in, may be null.
@param searchString the String to search for (case-insensitive), may be null.
@param replacement the String to replace it with, may be null.
@param max maximum number of values to replace, or {@code -1} if no maximum.
@return the text with any replacements processed, {@code null} if null String input.
@since 3.5
@deprecated Use {@link Strings#replace(String, String, String, int) Strings.CI.replace(String, String, String, int)}.
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 6,665
|
[
"text",
"searchString",
"replacement",
"max"
] |
String
| true
| 1
| 6.48
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
set_table_attributes
|
def set_table_attributes(self, attributes: str) -> Styler:
"""
Set the table attributes added to the ``<table>`` HTML element.
These are items in addition to automatic (by default) ``id`` attribute.
Parameters
----------
attributes : str
Table attributes to be added to the ``<table>`` HTML element.
Returns
-------
Styler
Instance of class with specified table attributes set.
See Also
--------
Styler.set_table_styles: Set the table styles included within the ``<style>``
HTML element.
Styler.set_td_classes: Set the DataFrame of strings added to the ``class``
attribute of ``<td>`` HTML elements.
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_table_attributes('class="pure-table"') # doctest: +SKIP
# ... <table class="pure-table"> ...
"""
self.table_attributes = attributes
return self
|
Set the table attributes added to the ``<table>`` HTML element.
These are items in addition to automatic (by default) ``id`` attribute.
Parameters
----------
attributes : str
Table attributes to be added to the ``<table>`` HTML element.
Returns
-------
Styler
Instance of class with specified table attributes set.
See Also
--------
Styler.set_table_styles: Set the table styles included within the ``<style>``
HTML element.
Styler.set_td_classes: Set the DataFrame of strings added to the ``class``
attribute of ``<td>`` HTML elements.
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_table_attributes('class="pure-table"') # doctest: +SKIP
# ... <table class="pure-table"> ...
|
python
|
pandas/io/formats/style.py
| 2,197
|
[
"self",
"attributes"
] |
Styler
| true
| 1
| 6.96
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
throwIfNot
|
private static void throwIfNot(ConfigDataResource resource, boolean check) {
if (!check) {
throw new ConfigDataResourceNotFoundException(resource);
}
}
|
Throw a {@link ConfigDataNotFoundException} if the specified {@link Resource} does
not exist.
@param resource the config data resource
@param resourceToCheck the resource to check
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/config/ConfigDataResourceNotFoundException.java
| 144
|
[
"resource",
"check"
] |
void
| true
| 2
| 6.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
copyPropertySources
|
private void copyPropertySources(ConfigurableEnvironment source, ConfigurableEnvironment target) {
removePropertySources(target.getPropertySources(), isServletEnvironment(target.getClass(), this.classLoader));
for (PropertySource<?> propertySource : source.getPropertySources()) {
if (!SERVLET_ENVIRONMENT_SOURCE_NAMES.contains(propertySource.getName())) {
target.getPropertySources().addLast(propertySource);
}
}
}
|
Converts the given {@code environment} to the given {@link StandardEnvironment}
type. If the environment is already of the same type, no conversion is performed
and it is returned unchanged.
@param environment the Environment to convert
@param type the type to convert the Environment to
@return the converted Environment
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/EnvironmentConverter.java
| 101
|
[
"source",
"target"
] |
void
| true
| 2
| 7.6
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
extractSource
|
@Override
public Object extractSource(Object sourceCandidate, @Nullable Resource definingResource) {
return sourceCandidate;
}
|
Simply returns the supplied {@code sourceCandidate} as-is.
@param sourceCandidate the source metadata
@return the supplied {@code sourceCandidate}
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/parsing/PassThroughSourceExtractor.java
| 44
|
[
"sourceCandidate",
"definingResource"
] |
Object
| true
| 1
| 6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
_addsub_object_array
|
def _addsub_object_array(self, other: npt.NDArray[np.object_], op) -> np.ndarray:
"""
Add or subtract array-like of DateOffset objects
Parameters
----------
other : np.ndarray[object]
op : {operator.add, operator.sub}
Returns
-------
np.ndarray[object]
Except in fastpath case with length 1 where we operate on the
contained scalar.
"""
assert op in [operator.add, operator.sub]
if len(other) == 1 and self.ndim == 1:
# Note: without this special case, we could annotate return type
# as ndarray[object]
# If both 1D then broadcasting is unambiguous
return op(self, other[0])
if get_option("performance_warnings"):
warnings.warn(
"Adding/subtracting object-dtype array to "
f"{type(self).__name__} not vectorized.",
PerformanceWarning,
stacklevel=find_stack_level(),
)
# Caller is responsible for broadcasting if necessary
assert self.shape == other.shape, (self.shape, other.shape)
res_values = op(self.astype("O"), np.asarray(other))
return res_values
|
Add or subtract array-like of DateOffset objects
Parameters
----------
other : np.ndarray[object]
op : {operator.add, operator.sub}
Returns
-------
np.ndarray[object]
Except in fastpath case with length 1 where we operate on the
contained scalar.
|
python
|
pandas/core/arrays/datetimelike.py
| 1,343
|
[
"self",
"other",
"op"
] |
np.ndarray
| true
| 4
| 6.4
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
withHashes
|
public StandardStackTracePrinter withHashes(boolean hashes) {
return withHashes((!hashes) ? null : DEFAULT_FRAME_HASHER);
}
|
Return a new {@link StandardStackTracePrinter} from this one that changes if hashes
should be generated and printed for each stacktrace.
@param hashes if hashes should be added
@return a new {@link StandardStackTracePrinter} instance
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/logging/StandardStackTracePrinter.java
| 272
|
[
"hashes"
] |
StandardStackTracePrinter
| true
| 2
| 7.36
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
addAll
|
@CanIgnoreReturnValue
public static <T extends @Nullable Object> boolean addAll(
Collection<T> addTo, Iterator<? extends T> iterator) {
checkNotNull(addTo);
checkNotNull(iterator);
boolean wasModified = false;
while (iterator.hasNext()) {
wasModified |= addTo.add(iterator.next());
}
return wasModified;
}
|
Adds all elements in {@code iterator} to {@code collection}. The iterator will be left
exhausted: its {@code hasNext()} method will return {@code false}.
@return {@code true} if {@code collection} was modified as a result of this operation
|
java
|
android/guava/src/com/google/common/collect/Iterators.java
| 363
|
[
"addTo",
"iterator"
] | true
| 2
| 7.76
|
google/guava
| 51,352
|
javadoc
| false
|
|
createMimeMessage
|
MimeMessage createMimeMessage(InputStream contentStream) throws MailException;
|
Create a new JavaMail MimeMessage for the underlying JavaMail Session
of this sender, using the given input stream as the message source.
@param contentStream the raw MIME input stream for the message
@return the new MimeMessage instance
@throws org.springframework.mail.MailParseException
in case of message creation failure
|
java
|
spring-context-support/src/main/java/org/springframework/mail/javamail/JavaMailSender.java
| 88
|
[
"contentStream"
] |
MimeMessage
| true
| 1
| 6.16
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
removeAdvisor
|
@Override
public boolean removeAdvisor(Advisor advisor) {
int index = indexOf(advisor);
if (index == -1) {
return false;
}
else {
removeAdvisor(index);
return true;
}
}
|
Remove a proxied interface.
<p>Does nothing if the given interface isn't proxied.
@param ifc the interface to remove from the proxy
@return {@code true} if the interface was removed; {@code false}
if the interface was not found and hence could not be removed
|
java
|
spring-aop/src/main/java/org/springframework/aop/framework/AdvisedSupport.java
| 315
|
[
"advisor"
] | true
| 2
| 8.24
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
findImplementation
|
private static ErrorOnUnknown findImplementation() {
ErrorOnUnknown best = new ErrorOnUnknown() {
@Override
public String errorMessage(String parserName, String unknownField, Iterable<String> candidates) {
return "[" + parserName + "] unknown field [" + unknownField + "]";
}
@Override
public int priority() {
return Integer.MIN_VALUE;
}
};
for (ErrorOnUnknown c : ServiceLoader.load(ErrorOnUnknown.class)) {
if (best.priority() < c.priority()) {
best = c;
}
}
return best;
}
|
Priority that this error message handler should be used.
|
java
|
libs/x-content/src/main/java/org/elasticsearch/xcontent/ErrorOnUnknown.java
| 38
|
[] |
ErrorOnUnknown
| true
| 2
| 6.88
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
fit
|
def fit(self, X, y=None, sample_weight=None):
"""Perform DBSCAN clustering from features, or distance matrix.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features), or \
(n_samples, n_samples)
Training instances to cluster, or distances between instances if
``metric='precomputed'``. If a sparse matrix is provided, it will
be converted into a sparse ``csr_matrix``.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with a
negative weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
Returns
-------
self : object
Returns a fitted instance of self.
"""
X = validate_data(self, X, accept_sparse="csr")
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
# Calculate neighborhood for all samples. This leaves the original
# point in, which needs to be considered later (i.e. point i is in the
# neighborhood of point i. While True, its useless information)
if self.metric == "precomputed" and sparse.issparse(X):
# set the diagonal to explicit values, as a point is its own
# neighbor
X = X.copy() # copy to avoid in-place modification
with warnings.catch_warnings():
warnings.simplefilter("ignore", sparse.SparseEfficiencyWarning)
X.setdiag(X.diagonal())
neighbors_model = NearestNeighbors(
radius=self.eps,
algorithm=self.algorithm,
leaf_size=self.leaf_size,
metric=self.metric,
metric_params=self.metric_params,
p=self.p,
n_jobs=self.n_jobs,
)
neighbors_model.fit(X)
# This has worst case O(n^2) memory complexity
neighborhoods = neighbors_model.radius_neighbors(X, return_distance=False)
if sample_weight is None:
n_neighbors = np.array([len(neighbors) for neighbors in neighborhoods])
else:
n_neighbors = np.array(
[np.sum(sample_weight[neighbors]) for neighbors in neighborhoods]
)
# Initially, all samples are noise.
labels = np.full(X.shape[0], -1, dtype=np.intp)
# A list of all core samples found.
core_samples = np.asarray(n_neighbors >= self.min_samples, dtype=np.uint8)
dbscan_inner(core_samples, neighborhoods, labels)
self.core_sample_indices_ = np.where(core_samples)[0]
self.labels_ = labels
if len(self.core_sample_indices_):
# fix for scipy sparse indexing issue
self.components_ = X[self.core_sample_indices_].copy()
else:
# no core samples
self.components_ = np.empty((0, X.shape[1]))
return self
|
Perform DBSCAN clustering from features, or distance matrix.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features), or \
(n_samples, n_samples)
Training instances to cluster, or distances between instances if
``metric='precomputed'``. If a sparse matrix is provided, it will
be converted into a sparse ``csr_matrix``.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with a
negative weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
Returns
-------
self : object
Returns a fitted instance of self.
|
python
|
sklearn/cluster/_dbscan.py
| 397
|
[
"self",
"X",
"y",
"sample_weight"
] | false
| 8
| 6
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
ToggleInspectedElement
|
function ToggleInspectedElement({
dispatch,
state,
orientation,
}: {
dispatch: LayoutDispatch,
state: LayoutState,
orientation: 'horizontal' | 'vertical',
}) {
let iconType: IconType;
if (orientation === 'horizontal') {
iconType = state.inspectedElementHidden
? 'panel-right-open'
: 'panel-right-close';
} else {
iconType = state.inspectedElementHidden
? 'panel-bottom-open'
: 'panel-bottom-close';
}
return (
<Button
className={styles.ToggleInspectedElement}
data-orientation={orientation}
onClick={() =>
dispatch({
type: 'ACTION_SET_INSPECTED_ELEMENT_TOGGLE',
payload: null,
})
}
title={
state.inspectedElementHidden
? 'Show Inspected Element'
: 'Hide Inspected Element'
}>
<ButtonIcon type={iconType} />
</Button>
);
}
|
Copyright (c) Meta Platforms, Inc. and affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
@flow
|
javascript
|
packages/react-devtools-shared/src/devtools/views/SuspenseTab/SuspenseTab.js
| 123
|
[] | false
| 6
| 6.24
|
facebook/react
| 241,750
|
jsdoc
| false
|
|
postProcessBeanFactory
|
@Override
protected void postProcessBeanFactory(ConfigurableListableBeanFactory beanFactory) {
super.postProcessBeanFactory(beanFactory);
if (!ObjectUtils.isEmpty(this.basePackages)) {
this.scanner.scan(this.basePackages);
}
if (!this.annotatedClasses.isEmpty()) {
this.reader.register(ClassUtils.toClassArray(this.annotatedClasses));
}
}
|
Perform a scan within the specified base packages. Note that {@link #refresh()}
must be called in order for the context to fully process the new class.
@param basePackages the packages to check for annotated classes
@see #register(Class...)
@see #refresh()
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/web/context/servlet/AnnotationConfigServletWebApplicationContext.java
| 202
|
[
"beanFactory"
] |
void
| true
| 3
| 6.72
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
range
|
function range(from: number, to: number): number[] {
return _range(from, to)
}
|
Create a list of numbers from `from` to `to` (both inclusive).
@param from
@param to
@returns
|
typescript
|
helpers/blaze/range.ts
| 21
|
[
"from",
"to"
] | true
| 1
| 6.8
|
prisma/prisma
| 44,834
|
jsdoc
| false
|
|
synchronizedNavigableSet
|
@GwtIncompatible // NavigableSet
@J2ktIncompatible // Synchronized
public static <E extends @Nullable Object> NavigableSet<E> synchronizedNavigableSet(
NavigableSet<E> navigableSet) {
return Synchronized.navigableSet(navigableSet);
}
|
Returns a synchronized (thread-safe) navigable set backed by the specified navigable set. In
order to guarantee serial access, it is critical that <b>all</b> access to the backing
navigable set is accomplished through the returned navigable set (or its views).
<p>It is imperative that the user manually synchronize on the returned sorted set when
iterating over it or any of its {@code descendingSet}, {@code subSet}, {@code headSet}, or
{@code tailSet} views.
{@snippet :
NavigableSet<E> set = synchronizedNavigableSet(new TreeSet<E>());
...
synchronized (set) {
// Must be in the synchronized block
Iterator<E> it = set.iterator();
while (it.hasNext()) {
foo(it.next());
}
}
}
<p>or:
{@snippet :
NavigableSet<E> set = synchronizedNavigableSet(new TreeSet<E>());
NavigableSet<E> set2 = set.descendingSet().headSet(foo);
...
synchronized (set) { // Note: set, not set2!!!
// Must be in the synchronized block
Iterator<E> it = set2.descendingIterator();
while (it.hasNext()) {
foo(it.next());
}
}
}
<p>Failure to follow this advice may result in non-deterministic behavior.
<p>The returned navigable set will be serializable if the specified navigable set is
serializable.
<p><b>Java 8+ users and later:</b> Prefer {@link Collections#synchronizedNavigableSet}.
@param navigableSet the navigable set to be "wrapped" in a synchronized navigable set.
@return a synchronized view of the specified navigable set.
@since 13.0
|
java
|
android/guava/src/com/google/common/collect/Sets.java
| 2,046
|
[
"navigableSet"
] | true
| 1
| 6.24
|
google/guava
| 51,352
|
javadoc
| false
|
|
when
|
public Member<T> when(Predicate<? super @Nullable T> predicate) {
Assert.notNull(predicate, "'predicate' must not be null");
this.valueExtractor = this.valueExtractor.when(predicate);
return this;
}
|
Only include this member when the given predicate matches.
@param predicate the predicate to test
@return a {@link Member} which may be configured further
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/json/JsonWriter.java
| 440
|
[
"predicate"
] | true
| 1
| 6.88
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
infer_objects
|
def infer_objects(self, copy: bool | lib.NoDefault = lib.no_default) -> Self:
"""
Attempt to infer better dtypes for object columns.
Attempts soft conversion of object-dtyped
columns, leaving non-object and unconvertible
columns unchanged. The inference rules are the
same as during normal Series/DataFrame construction.
Parameters
----------
copy : bool, default False
This keyword is now ignored; changing its value will have no
impact on the method.
.. deprecated:: 3.0.0
This keyword is ignored and will be removed in pandas 4.0. Since
pandas 3.0, this method always returns a new object using a lazy
copy mechanism that defers copies until necessary
(Copy-on-Write). See the `user guide on Copy-on-Write
<https://pandas.pydata.org/docs/dev/user_guide/copy_on_write.html>`__
for more details.
Returns
-------
same type as input object
Returns an object of the same type as the input object.
See Also
--------
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to numeric type.
convert_dtypes : Convert argument to best possible dtype.
Examples
--------
>>> df = pd.DataFrame({"A": ["a", 1, 2, 3]})
>>> df = df.iloc[1:]
>>> df
A
1 1
2 2
3 3
>>> df.dtypes
A object
dtype: object
>>> df.infer_objects().dtypes
A int64
dtype: object
"""
self._check_copy_deprecation(copy)
new_mgr = self._mgr.convert()
res = self._constructor_from_mgr(new_mgr, axes=new_mgr.axes)
return res.__finalize__(self, method="infer_objects")
|
Attempt to infer better dtypes for object columns.
Attempts soft conversion of object-dtyped
columns, leaving non-object and unconvertible
columns unchanged. The inference rules are the
same as during normal Series/DataFrame construction.
Parameters
----------
copy : bool, default False
This keyword is now ignored; changing its value will have no
impact on the method.
.. deprecated:: 3.0.0
This keyword is ignored and will be removed in pandas 4.0. Since
pandas 3.0, this method always returns a new object using a lazy
copy mechanism that defers copies until necessary
(Copy-on-Write). See the `user guide on Copy-on-Write
<https://pandas.pydata.org/docs/dev/user_guide/copy_on_write.html>`__
for more details.
Returns
-------
same type as input object
Returns an object of the same type as the input object.
See Also
--------
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to numeric type.
convert_dtypes : Convert argument to best possible dtype.
Examples
--------
>>> df = pd.DataFrame({"A": ["a", 1, 2, 3]})
>>> df = df.iloc[1:]
>>> df
A
1 1
2 2
3 3
>>> df.dtypes
A object
dtype: object
>>> df.infer_objects().dtypes
A int64
dtype: object
|
python
|
pandas/core/generic.py
| 6,670
|
[
"self",
"copy"
] |
Self
| true
| 1
| 7.12
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
decoratorElidingVisitor
|
function decoratorElidingVisitor(node: Node): VisitResult<Node | undefined> {
return isDecorator(node) ? undefined : visitor(node);
}
|
Specialized visitor that visits the immediate children of a class with TypeScript syntax.
@param node The node to visit.
|
typescript
|
src/compiler/transformers/ts.ts
| 617
|
[
"node"
] | true
| 2
| 6.48
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
run_fwd_maybe_bwd
|
def run_fwd_maybe_bwd(
gm: torch.fx.GraphModule,
args: Sequence[Any],
only_fwd: bool = False,
disable_clone: bool = False,
) -> Any:
"""
Runs a forward and possibly backward iteration for a given mod and args.
When disable_clone is True, we will use args as-is without cloning.
This is higher fidelity but we may destroy the args in the process.
"""
from .testing import collect_results, reduce_to_scalar_loss, requires_bwd_pass
gm = copy.deepcopy(gm)
if not disable_clone:
args = clone_inputs_retaining_gradness(args)
if hasattr(gm, "zero_grad"):
gm.zero_grad(True)
# TorchInductor returned callable expects lists. So, may need a boxed calling convention.
out = gm(args) if getattr(gm, "_boxed_call", False) else gm(*args)
if only_fwd:
return out
if requires_bwd_pass(out):
loss = reduce_to_scalar_loss(out)
loss.backward()
return collect_results(gm, out, None, args)
|
Runs a forward and possibly backward iteration for a given mod and args.
When disable_clone is True, we will use args as-is without cloning.
This is higher fidelity but we may destroy the args in the process.
|
python
|
torch/_dynamo/debug_utils.py
| 364
|
[
"gm",
"args",
"only_fwd",
"disable_clone"
] |
Any
| true
| 6
| 6
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
wrap
|
function wrap(value, wrapper) {
return partial(castFunction(wrapper), value);
}
|
Creates a function that provides `value` to `wrapper` as its first
argument. Any additional arguments provided to the function are appended
to those provided to the `wrapper`. The wrapper is invoked with the `this`
binding of the created function.
@static
@memberOf _
@since 0.1.0
@category Function
@param {*} value The value to wrap.
@param {Function} [wrapper=identity] The wrapper function.
@returns {Function} Returns the new function.
@example
var p = _.wrap(_.escape, function(func, text) {
return '<p>' + func(text) + '</p>';
});
p('fred, barney, & pebbles');
// => '<p>fred, barney, & pebbles</p>'
|
javascript
|
lodash.js
| 11,063
|
[
"value",
"wrapper"
] | false
| 1
| 6.24
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
poke
|
def poke(self, context: Context):
"""
Check subscribed queue for messages and write them to xcom with the ``messages`` key.
:param context: the context object
:return: ``True`` if message is available or ``False``
"""
message_batch: list[Any] = []
# perform multiple SQS call to retrieve messages in series
for _ in range(self.num_batches):
response = self.poll_sqs(sqs_conn=self.hook.conn)
messages = process_response(
response,
self.message_filtering,
self.message_filtering_match_values,
self.message_filtering_config,
)
if not messages:
continue
message_batch.extend(messages)
if self.delete_message_on_reception:
self.log.info("Deleting %d messages", len(messages))
entries = [
{"Id": message["MessageId"], "ReceiptHandle": message["ReceiptHandle"]}
for message in messages
]
response = self.hook.conn.delete_message_batch(QueueUrl=self.sqs_queue, Entries=entries)
if "Successful" not in response:
raise AirflowException(f"Delete SQS Messages failed {response} for messages {messages}")
if message_batch:
context["ti"].xcom_push(key="messages", value=message_batch)
return True
return False
|
Check subscribed queue for messages and write them to xcom with the ``messages`` key.
:param context: the context object
:return: ``True`` if message is available or ``False``
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/sensors/sqs.py
| 186
|
[
"self",
"context"
] | true
| 6
| 8.4
|
apache/airflow
| 43,597
|
sphinx
| false
|
|
map
|
public static MappedByteBuffer map(File file, MapMode mode) throws IOException {
return mapInternal(file, mode, -1);
}
|
Fully maps a file in to memory as per {@link
FileChannel#map(java.nio.channels.FileChannel.MapMode, long, long)} using the requested {@link
MapMode}.
<p>Files are mapped from offset 0 to its length.
<p>This only works for files ≤ {@link Integer#MAX_VALUE} bytes.
@param file the file to map
@param mode the mode to use when mapping {@code file}
@return a buffer reflecting {@code file}
@throws FileNotFoundException if the {@code file} does not exist
@throws IOException if an I/O error occurs
@see FileChannel#map(MapMode, long, long)
@since 2.0
|
java
|
android/guava/src/com/google/common/io/Files.java
| 667
|
[
"file",
"mode"
] |
MappedByteBuffer
| true
| 1
| 6.8
|
google/guava
| 51,352
|
javadoc
| false
|
get
|
static ConfigurationPropertyCaching get(Iterable<ConfigurationPropertySource> sources,
@Nullable Object underlyingSource) {
Assert.notNull(sources, "'sources' must not be null");
if (underlyingSource == null) {
return new ConfigurationPropertySourcesCaching(sources);
}
for (ConfigurationPropertySource source : sources) {
if (source.getUnderlyingSource() == underlyingSource) {
ConfigurationPropertyCaching caching = CachingConfigurationPropertySource.find(source);
if (caching != null) {
return caching;
}
}
}
throw new IllegalStateException("Unable to find cache from configuration property sources");
}
|
Get for a specific configuration property source in the specified configuration
property sources.
@param sources the configuration property sources
@param underlyingSource the
{@link ConfigurationPropertySource#getUnderlyingSource() underlying source} that
must match
@return a caching instance that controls the matching source
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/source/ConfigurationPropertyCaching.java
| 104
|
[
"sources",
"underlyingSource"
] |
ConfigurationPropertyCaching
| true
| 4
| 7.12
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
maybeClose
|
private static void maybeClose(Object object, String name) {
if (object instanceof AutoCloseable) {
Utils.closeQuietly((AutoCloseable) object, name);
}
}
|
Get a list of configured instances of the given class specified by the given configuration key. The configuration
may specify either null or an empty string to indicate no configured instances. In both cases, this method
returns an empty list to indicate no configured instances.
@param classNames The list of class names of the instances to create
@param t The interface the class should implement
@param configOverrides Configuration overrides to use.
@return The list of configured instances
|
java
|
clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java
| 509
|
[
"object",
"name"
] |
void
| true
| 2
| 7.92
|
apache/kafka
| 31,560
|
javadoc
| false
|
dateFormat
|
public DateTimeFormatters dateFormat(@Nullable String pattern) {
if (isIso(pattern)) {
this.dateFormatter = DateTimeFormatter.ISO_LOCAL_DATE;
this.datePattern = "yyyy-MM-dd";
}
else {
this.dateFormatter = formatter(pattern);
this.datePattern = pattern;
}
return this;
}
|
Configures the date format using the given {@code pattern}.
@param pattern the pattern for formatting dates
@return {@code this} for chained method invocation
|
java
|
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/web/format/DateTimeFormatters.java
| 48
|
[
"pattern"
] |
DateTimeFormatters
| true
| 2
| 8.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
matrix_transpose
|
def matrix_transpose(x, /):
"""
Transposes a matrix (or a stack of matrices) ``x``.
This function is Array API compatible.
Parameters
----------
x : array_like
Input array having shape (..., M, N) and whose two innermost
dimensions form ``MxN`` matrices.
Returns
-------
out : ndarray
An array containing the transpose for each matrix and having shape
(..., N, M).
See Also
--------
transpose : Generic transpose method.
Examples
--------
>>> import numpy as np
>>> np.matrix_transpose([[1, 2], [3, 4]])
array([[1, 3],
[2, 4]])
>>> np.matrix_transpose([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
array([[[1, 3],
[2, 4]],
[[5, 7],
[6, 8]]])
"""
x = asanyarray(x)
if x.ndim < 2:
raise ValueError(
f"Input array must be at least 2-dimensional, but it is {x.ndim}"
)
return swapaxes(x, -1, -2)
|
Transposes a matrix (or a stack of matrices) ``x``.
This function is Array API compatible.
Parameters
----------
x : array_like
Input array having shape (..., M, N) and whose two innermost
dimensions form ``MxN`` matrices.
Returns
-------
out : ndarray
An array containing the transpose for each matrix and having shape
(..., N, M).
See Also
--------
transpose : Generic transpose method.
Examples
--------
>>> import numpy as np
>>> np.matrix_transpose([[1, 2], [3, 4]])
array([[1, 3],
[2, 4]])
>>> np.matrix_transpose([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
array([[[1, 3],
[2, 4]],
[[5, 7],
[6, 8]]])
|
python
|
numpy/_core/fromnumeric.py
| 684
|
[
"x"
] | false
| 2
| 7.68
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
getDependentBeans
|
public String[] getDependentBeans(String beanName) {
Set<String> dependentBeans = this.dependentBeanMap.get(beanName);
if (dependentBeans == null) {
return new String[0];
}
synchronized (this.dependentBeanMap) {
return StringUtils.toStringArray(dependentBeans);
}
}
|
Return the names of all beans which depend on the specified bean, if any.
@param beanName the name of the bean
@return the array of dependent bean names, or an empty array if none
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/DefaultSingletonBeanRegistry.java
| 667
|
[
"beanName"
] | true
| 2
| 7.92
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
increment
|
public CheckIntervalData increment(final int delta) {
return delta == 0 ? this : new CheckIntervalData(getEventCount() + delta,
getCheckIntervalStart());
}
|
Returns a new instance of {@link CheckIntervalData} with the event counter
incremented by the given delta. If the delta is 0, this object is returned.
@param delta the delta
@return the updated instance
|
java
|
src/main/java/org/apache/commons/lang3/concurrent/EventCountCircuitBreaker.java
| 230
|
[
"delta"
] |
CheckIntervalData
| true
| 2
| 7.68
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
JAVA_CTRL_CHARS_UNESCAPE
|
public static String[][] JAVA_CTRL_CHARS_UNESCAPE() {
return JAVA_CTRL_CHARS_UNESCAPE.clone();
}
|
Reverse of {@link #JAVA_CTRL_CHARS_ESCAPE()} for unescaping purposes.
@return the mapping table.
|
java
|
src/main/java/org/apache/commons/lang3/text/translate/EntityArrays.java
| 464
|
[] | true
| 1
| 6.32
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
ofExisting
|
static ConfigDataEnvironmentContributor ofExisting(PropertySource<?> propertySource,
ConversionService conversionService) {
return new ConfigDataEnvironmentContributor(Kind.EXISTING, null, null, false, propertySource,
asConfigurationPropertySource(propertySource), null, null, null, conversionService);
}
|
Factory method to create a contributor that wraps an {@link Kind#EXISTING existing}
property source. The contributor provides access to existing properties, but
doesn't actively import any additional contributors.
@param propertySource the property source to wrap
@param conversionService the conversion service to use
@return a new {@link ConfigDataEnvironmentContributor} instance
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/config/ConfigDataEnvironmentContributor.java
| 424
|
[
"propertySource",
"conversionService"
] |
ConfigDataEnvironmentContributor
| true
| 1
| 6.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
worker_direct
|
def worker_direct(hostname: str | Queue) -> Queue:
"""Return the :class:`kombu.Queue` being a direct route to a worker.
Arguments:
hostname (str, ~kombu.Queue): The fully qualified node name of
a worker (e.g., ``[email protected]``). If passed a
:class:`kombu.Queue` instance it will simply return
that instead.
"""
if isinstance(hostname, Queue):
return hostname
return Queue(
WORKER_DIRECT_QUEUE_FORMAT.format(hostname=hostname),
WORKER_DIRECT_EXCHANGE,
hostname,
)
|
Return the :class:`kombu.Queue` being a direct route to a worker.
Arguments:
hostname (str, ~kombu.Queue): The fully qualified node name of
a worker (e.g., ``[email protected]``). If passed a
:class:`kombu.Queue` instance it will simply return
that instead.
|
python
|
celery/utils/nodenames.py
| 38
|
[
"hostname"
] |
Queue
| true
| 2
| 8.16
|
celery/celery
| 27,741
|
google
| false
|
shouldInject
|
protected boolean shouldInject(@Nullable PropertyValues pvs) {
if (this.isField) {
return true;
}
return !checkPropertySkipping(pvs);
}
|
Whether the property values should be injected.
@param pvs property values to check
@return whether the property values should be injected
@since 6.0.10
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/annotation/InjectionMetadata.java
| 251
|
[
"pvs"
] | true
| 2
| 8.24
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
toString
|
@Override
public String toString() {
StringBuilder bld;
if (metadata.isFull()) {
bld = new StringBuilder("FullFetchRequest(toSend=(");
String prefix = "";
for (TopicPartition partition : toSend.keySet()) {
bld.append(prefix);
bld.append(partition);
prefix = ", ";
}
} else {
bld = new StringBuilder("IncrementalFetchRequest(toSend=(");
String prefix = "";
for (TopicPartition partition : toSend.keySet()) {
bld.append(prefix);
bld.append(partition);
prefix = ", ";
}
bld.append("), toForget=(");
prefix = "";
for (TopicIdPartition partition : toForget) {
bld.append(prefix);
bld.append(partition);
prefix = ", ";
}
bld.append("), toReplace=(");
prefix = "";
for (TopicIdPartition partition : toReplace) {
bld.append(prefix);
bld.append(partition);
prefix = ", ";
}
bld.append("), implied=(");
prefix = "";
for (TopicPartition partition : sessionPartitions.keySet()) {
if (!toSend.containsKey(partition)) {
bld.append(prefix);
bld.append(partition);
prefix = ", ";
}
}
}
if (canUseTopicIds) {
bld.append("), canUseTopicIds=True");
} else {
bld.append("), canUseTopicIds=False");
}
bld.append(")");
return bld.toString();
}
|
Get the full set of partitions involved in this fetch request.
|
java
|
clients/src/main/java/org/apache/kafka/clients/FetchSessionHandler.java
| 178
|
[] |
String
| true
| 4
| 6.8
|
apache/kafka
| 31,560
|
javadoc
| false
|
rhoInit
|
Ptr<RHO_HEST> rhoInit(void){
/* Select an optimized implementation of RHO here. */
#if 1
/**
* For now, only the generic C implementation is available. In the future,
* SSE2/AVX/AVX2/FMA/NEON versions may be added, and they will be selected
* depending on cv::checkHardwareSupport()'s return values.
*/
Ptr<RHO_HEST> p = Ptr<RHO_HEST>(new RHO_HEST_REFC);
#endif
/* Initialize it. */
if(p){
if(!p->initialize()){
p.release();
}
}
/* Return it. */
return p;
}
|
External access to context constructor.
@return A pointer to the context if successful; NULL if an error occurred.
|
cpp
|
modules/calib3d/src/rho.cpp
| 446
|
[] | true
| 3
| 8.24
|
opencv/opencv
| 85,374
|
doxygen
| false
|
|
resolve
|
@SuppressWarnings("unchecked")
public <T> @Nullable T resolve(RegisteredBean registeredBean) {
return (T) resolveObject(registeredBean);
}
|
Resolve the field value for the specified registered bean.
@param registeredBean the registered bean
@return the resolved field value
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/aot/AutowiredFieldValueResolver.java
| 137
|
[
"registeredBean"
] |
T
| true
| 1
| 6.16
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
_pre_import_airflow_modules
|
def _pre_import_airflow_modules(file_path: str, log: FilteringBoundLogger) -> None:
"""
Pre-import Airflow modules found in the given file.
This prevents modules from being re-imported in each processing process,
saving CPU time and memory.
(The default value of "parsing_pre_import_modules" is set to True)
:param file_path: Path to the file to scan for imports
:param log: Logger instance to use for warnings
"""
if not conf.getboolean("dag_processor", "parsing_pre_import_modules", fallback=True):
return
for module in iter_airflow_imports(file_path):
try:
importlib.import_module(module)
except Exception as e:
log.warning("Error when trying to pre-import module '%s' found in %s: %s", module, file_path, e)
|
Pre-import Airflow modules found in the given file.
This prevents modules from being re-imported in each processing process,
saving CPU time and memory.
(The default value of "parsing_pre_import_modules" is set to True)
:param file_path: Path to the file to scan for imports
:param log: Logger instance to use for warnings
|
python
|
airflow-core/src/airflow/dag_processing/processor.py
| 155
|
[
"file_path",
"log"
] |
None
| true
| 3
| 7.04
|
apache/airflow
| 43,597
|
sphinx
| false
|
wsgi_app
|
def wsgi_app(
self, environ: WSGIEnvironment, start_response: StartResponse
) -> cabc.Iterable[bytes]:
"""The actual WSGI application. This is not implemented in
:meth:`__call__` so that middlewares can be applied without
losing a reference to the app object. Instead of doing this::
app = MyMiddleware(app)
It's a better idea to do this instead::
app.wsgi_app = MyMiddleware(app.wsgi_app)
Then you still have the original application object around and
can continue to call methods on it.
.. versionchanged:: 0.7
Teardown events for the request and app contexts are called
even if an unhandled error occurs. Other events may not be
called depending on when an error occurs during dispatch.
:param environ: A WSGI environment.
:param start_response: A callable accepting a status code,
a list of headers, and an optional exception context to
start the response.
"""
ctx = self.request_context(environ)
error: BaseException | None = None
try:
try:
ctx.push()
response = self.full_dispatch_request(ctx)
except Exception as e:
error = e
response = self.handle_exception(ctx, e)
except: # noqa: B001
error = sys.exc_info()[1]
raise
return response(environ, start_response)
finally:
if "werkzeug.debug.preserve_context" in environ:
environ["werkzeug.debug.preserve_context"](ctx)
if error is not None and self.should_ignore_error(error):
error = None
ctx.pop(error)
|
The actual WSGI application. This is not implemented in
:meth:`__call__` so that middlewares can be applied without
losing a reference to the app object. Instead of doing this::
app = MyMiddleware(app)
It's a better idea to do this instead::
app.wsgi_app = MyMiddleware(app.wsgi_app)
Then you still have the original application object around and
can continue to call methods on it.
.. versionchanged:: 0.7
Teardown events for the request and app contexts are called
even if an unhandled error occurs. Other events may not be
called depending on when an error occurs during dispatch.
:param environ: A WSGI environment.
:param start_response: A callable accepting a status code,
a list of headers, and an optional exception context to
start the response.
|
python
|
src/flask/app.py
| 1,536
|
[
"self",
"environ",
"start_response"
] |
cabc.Iterable[bytes]
| true
| 4
| 6.72
|
pallets/flask
| 70,946
|
sphinx
| false
|
isKey
|
function isKey(value, object) {
if (isArray(value)) {
return false;
}
var type = typeof value;
if (type == 'number' || type == 'symbol' || type == 'boolean' ||
value == null || isSymbol(value)) {
return true;
}
return reIsPlainProp.test(value) || !reIsDeepProp.test(value) ||
(object != null && value in Object(object));
}
|
Checks if `value` is a property name and not a property path.
@private
@param {*} value The value to check.
@param {Object} [object] The object to query keys on.
@returns {boolean} Returns `true` if `value` is a property name, else `false`.
|
javascript
|
lodash.js
| 6,405
|
[
"value",
"object"
] | false
| 10
| 6.24
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
forCondition
|
public static Builder forCondition(Class<? extends Annotation> condition, Object... details) {
return new ConditionMessage().andCondition(condition, details);
}
|
Factory method for a builder to construct a new {@link ConditionMessage} for a
condition.
@param condition the condition
@param details details of the condition
@return a {@link Builder} builder
@see #forCondition(String, Object...)
@see #andCondition(String, Object...)
|
java
|
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/condition/ConditionMessage.java
| 182
|
[
"condition"
] |
Builder
| true
| 1
| 6.16
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
committed
|
@Override
public synchronized Map<TopicPartition, OffsetAndMetadata> committed(final Set<TopicPartition> partitions) {
ensureNotClosed();
return partitions.stream()
.filter(committed::containsKey)
.collect(Collectors.toMap(tp -> tp, tp -> subscriptions.isAssigned(tp) ?
committed.get(tp) : new OffsetAndMetadata(0)));
}
|
Sets the maximum number of records returned in a single call to {@link #poll(Duration)}.
@param maxPollRecords the max.poll.records.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/MockConsumer.java
| 404
|
[
"partitions"
] | true
| 2
| 6.4
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
getFixesInfoForNonUMDImport
|
function getFixesInfoForNonUMDImport({ sourceFile, program, cancellationToken, host, preferences }: CodeFixContextBase, symbolToken: Identifier, useAutoImportProvider: boolean): readonly (FixInfo & { fix: ImportFixWithModuleSpecifier; })[] | undefined {
const checker = program.getTypeChecker();
const compilerOptions = program.getCompilerOptions();
return flatMap(getSymbolNamesToImport(sourceFile, checker, symbolToken, compilerOptions), symbolName => {
// "default" is a keyword and not a legal identifier for the import, but appears as an identifier.
if (symbolName === InternalSymbolName.Default) {
return undefined;
}
const isValidTypeOnlyUseSite = isValidTypeOnlyAliasUseSite(symbolToken);
const useRequire = shouldUseRequire(sourceFile, program);
const exportInfo = getExportInfos(symbolName, isJSXTagName(symbolToken), getMeaningFromLocation(symbolToken), cancellationToken, sourceFile, program, useAutoImportProvider, host, preferences);
return arrayFrom(
flatMapIterator(exportInfo.values(), exportInfos => getImportFixes(exportInfos, symbolToken.getStart(sourceFile), isValidTypeOnlyUseSite, useRequire, program, sourceFile, host, preferences).fixes),
fix => ({ fix, symbolName, errorIdentifierText: symbolToken.text, isJsxNamespaceFix: symbolName !== symbolToken.text }),
);
});
}
|
@param forceImportKeyword Indicates that the user has already typed `import`, so the result must start with `import`.
(In other words, do not allow `const x = require("...")` for JS files.)
@internal
|
typescript
|
src/services/codefixes/importFixes.ts
| 1,570
|
[
"{ sourceFile, program, cancellationToken, host, preferences }",
"symbolToken",
"useAutoImportProvider"
] | true
| 2
| 6.56
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
extractMessage
|
private @Nullable String extractMessage(@Nullable HttpEntity entity) {
if (entity != null) {
try {
JSONObject error = getContentAsJson(entity);
if (error.has("message")) {
return error.getString("message");
}
}
catch (Exception ex) {
// Ignore
}
}
return null;
}
|
Retrieves the meta-data of the service at the specified URL.
@param url the URL
@return the response
|
java
|
cli/spring-boot-cli/src/main/java/org/springframework/boot/cli/command/init/InitializrService.java
| 211
|
[
"entity"
] |
String
| true
| 4
| 8.4
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
get_previous_ti
|
def get_previous_ti(
self,
state: DagRunState | None = None,
session: Session = NEW_SESSION,
) -> TaskInstance | None:
"""
Return the task instance for the task that ran before this task instance.
:param session: SQLAlchemy ORM Session
:param state: If passed, it only take into account instances of a specific state.
"""
dagrun = self.get_previous_dagrun(state, session=session)
if dagrun is None:
return None
return dagrun.get_task_instance(self.task_id, session=session)
|
Return the task instance for the task that ran before this task instance.
:param session: SQLAlchemy ORM Session
:param state: If passed, it only take into account instances of a specific state.
|
python
|
airflow-core/src/airflow/models/taskinstance.py
| 866
|
[
"self",
"state",
"session"
] |
TaskInstance | None
| true
| 2
| 7.04
|
apache/airflow
| 43,597
|
sphinx
| false
|
computeReplacement
|
public static @Nullable String computeReplacement(UnicodeEscaper escaper, int cp) {
return stringOrNull(escaper.escape(cp));
}
|
Returns a string that would replace the given character in the specified escaper, or {@code
null} if no replacement should be made. This method is intended for use in tests through the
{@code EscaperAsserts} class; production users of {@link UnicodeEscaper} should limit
themselves to its public interface.
@param cp the Unicode code point to escape if necessary
@return the replacement string, or {@code null} if no escaping was needed
|
java
|
android/guava/src/com/google/common/escape/Escapers.java
| 184
|
[
"escaper",
"cp"
] |
String
| true
| 1
| 6.48
|
google/guava
| 51,352
|
javadoc
| false
|
updateIndexSetting
|
private void updateIndexSetting(ProjectId projectId, UpdateSettingsRequest updateSettingsRequest, ActionListener<Void> listener) {
assert updateSettingsRequest.indices() != null && updateSettingsRequest.indices().length == 1
: "Data stream lifecycle service updates the settings for one index at a time";
// "saving" the index name here so we don't capture the entire request
String targetIndex = updateSettingsRequest.indices()[0];
logger.trace(
"Data stream lifecycle service issues request to update settings [{}] for index [{}]",
updateSettingsRequest.settings().keySet(),
targetIndex
);
client.projectClient(projectId).admin().indices().updateSettings(updateSettingsRequest, new ActionListener<>() {
@Override
public void onResponse(AcknowledgedResponse acknowledgedResponse) {
logger.info(
"Data stream lifecycle service successfully updated settings [{}] for index index [{}]",
updateSettingsRequest.settings().keySet(),
targetIndex
);
listener.onResponse(null);
}
@Override
public void onFailure(Exception e) {
if (e instanceof IndexNotFoundException) {
// index was already deleted, treat this as a success
logger.trace("Clearing recorded error for index [{}] because the index was deleted", targetIndex);
errorStore.clearRecordedError(projectId, targetIndex);
listener.onResponse(null);
return;
}
listener.onFailure(e);
}
});
}
|
This method sends requests to delete any indices in the datastream that exceed its retention policy. It returns the set of indices
it has sent delete requests for.
@param project The project metadata from which to get index metadata
@param dataStream The data stream
@param indicesToExcludeForRemainingRun Indices to exclude from retention even if it would be time for them to be deleted
@return The set of indices that delete requests have been sent for
|
java
|
modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java
| 1,146
|
[
"projectId",
"updateSettingsRequest",
"listener"
] |
void
| true
| 3
| 7.92
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
next
|
public String next(int length) throws JSONException {
if (this.pos + length > this.in.length()) {
throw syntaxError(length + " is out of bounds");
}
String result = this.in.substring(this.pos, this.pos + length);
this.pos += length;
return result;
}
|
Returns the current position and the entire input string.
@return the current position and the entire input string.
|
java
|
cli/spring-boot-cli/src/json-shade/java/org/springframework/boot/cli/json/JSONTokener.java
| 498
|
[
"length"
] |
String
| true
| 2
| 6.88
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
_raise_or_return
|
def _raise_or_return():
"""Depending on the value of raise_unknown, either raise an error or return
'unknown'.
"""
if raise_unknown:
input = input_name if input_name else "data"
raise ValueError(f"Unknown label type for {input}: {y!r}")
else:
return "unknown"
|
Depending on the value of raise_unknown, either raise an error or return
'unknown'.
|
python
|
sklearn/utils/multiclass.py
| 319
|
[] | false
| 4
| 6.08
|
scikit-learn/scikit-learn
| 64,340
|
unknown
| false
|
|
round
|
def round(self, decimals: int = 0) -> Self | Index: # type: ignore[override]
"""
Round each value in the Index to the given number of decimals.
Parameters
----------
decimals : int, optional
Number of decimal places to round to. If decimals is negative,
it specifies the number of positions to the left of the decimal point
e.g. ``round(11.0, -1) == 10.0``.
Returns
-------
Index or RangeIndex
A new Index with the rounded values.
Examples
--------
>>> import pandas as pd
>>> idx = pd.RangeIndex(10, 30, 10)
>>> idx.round(decimals=-1)
RangeIndex(start=10, stop=30, step=10)
>>> idx = pd.RangeIndex(10, 15, 1)
>>> idx.round(decimals=-1)
Index([10, 10, 10, 10, 10], dtype='int64')
"""
if decimals >= 0:
return self.copy()
elif self.start % 10**-decimals == 0 and self.step % 10**-decimals == 0:
# e.g. RangeIndex(10, 30, 10).round(-1) doesn't need rounding
return self.copy()
else:
return super().round(decimals=decimals)
|
Round each value in the Index to the given number of decimals.
Parameters
----------
decimals : int, optional
Number of decimal places to round to. If decimals is negative,
it specifies the number of positions to the left of the decimal point
e.g. ``round(11.0, -1) == 10.0``.
Returns
-------
Index or RangeIndex
A new Index with the rounded values.
Examples
--------
>>> import pandas as pd
>>> idx = pd.RangeIndex(10, 30, 10)
>>> idx.round(decimals=-1)
RangeIndex(start=10, stop=30, step=10)
>>> idx = pd.RangeIndex(10, 15, 1)
>>> idx.round(decimals=-1)
Index([10, 10, 10, 10, 10], dtype='int64')
|
python
|
pandas/core/indexes/range.py
| 1,349
|
[
"self",
"decimals"
] |
Self | Index
| true
| 5
| 8.48
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
isDependent
|
private boolean isDependent(String beanName, String dependentBeanName, @Nullable Set<String> alreadySeen) {
if (alreadySeen != null && alreadySeen.contains(beanName)) {
return false;
}
String canonicalName = canonicalName(beanName);
Set<String> dependentBeans = this.dependentBeanMap.get(canonicalName);
if (dependentBeans == null || dependentBeans.isEmpty()) {
return false;
}
if (dependentBeans.contains(dependentBeanName)) {
return true;
}
if (alreadySeen == null) {
alreadySeen = new HashSet<>();
}
alreadySeen.add(beanName);
for (String transitiveDependency : dependentBeans) {
if (isDependent(transitiveDependency, dependentBeanName, alreadySeen)) {
return true;
}
}
return false;
}
|
Determine whether the specified dependent bean has been registered as
dependent on the given bean or on any of its transitive dependencies.
@param beanName the name of the bean to check
@param dependentBeanName the name of the dependent bean
@since 4.0
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/DefaultSingletonBeanRegistry.java
| 630
|
[
"beanName",
"dependentBeanName",
"alreadySeen"
] | true
| 8
| 6.56
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
baseKeys
|
function baseKeys(object) {
if (!isPrototype(object)) {
return nativeKeys(object);
}
var result = [];
for (var key in Object(object)) {
if (hasOwnProperty.call(object, key) && key != 'constructor') {
result.push(key);
}
}
return result;
}
|
The base implementation of `_.keys` which doesn't treat sparse arrays as dense.
@private
@param {Object} object The object to query.
@returns {Array} Returns the array of property names.
|
javascript
|
lodash.js
| 3,524
|
[
"object"
] | false
| 4
| 6.24
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
prepareMethodOverride
|
protected void prepareMethodOverride(MethodOverride mo) throws BeanDefinitionValidationException {
int count = ClassUtils.getMethodCountForName(getBeanClass(), mo.getMethodName());
if (count == 0) {
throw new BeanDefinitionValidationException(
"Invalid method override: no method with name '" + mo.getMethodName() +
"' on class [" + getBeanClassName() + "]");
}
else if (count == 1) {
// Mark override as not overloaded, to avoid the overhead of arg type checking.
mo.setOverloaded(false);
}
}
|
Validate and prepare the given method override.
Checks for existence of a method with the specified name,
marking it as not overloaded if none found.
@param mo the MethodOverride object to validate
@throws BeanDefinitionValidationException in case of validation failure
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/AbstractBeanDefinition.java
| 1,265
|
[
"mo"
] |
void
| true
| 3
| 6.56
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
setThreadFactory
|
@CanIgnoreReturnValue
public ThreadFactoryBuilder setThreadFactory(ThreadFactory backingThreadFactory) {
this.backingThreadFactory = checkNotNull(backingThreadFactory);
return this;
}
|
Sets the backing {@link ThreadFactory} for new threads created with this ThreadFactory. Threads
will be created by invoking #newThread(Runnable) on this backing {@link ThreadFactory}.
@param backingThreadFactory the backing {@link ThreadFactory} which will be delegated to during
thread creation.
@return this for the builder pattern
@see MoreExecutors
|
java
|
android/guava/src/com/google/common/util/concurrent/ThreadFactoryBuilder.java
| 164
|
[
"backingThreadFactory"
] |
ThreadFactoryBuilder
| true
| 1
| 6.24
|
google/guava
| 51,352
|
javadoc
| false
|
resolveAutowiredArgument
|
private @Nullable Object resolveAutowiredArgument(RegisteredBean registeredBean, DependencyDescriptor descriptor,
@Nullable ValueHolder argumentValue, Set<String> autowiredBeanNames) {
TypeConverter typeConverter = registeredBean.getBeanFactory().getTypeConverter();
if (argumentValue != null) {
return (argumentValue.isConverted() ? argumentValue.getConvertedValue() :
typeConverter.convertIfNecessary(argumentValue.getValue(),
descriptor.getDependencyType(), descriptor.getMethodParameter()));
}
try {
return registeredBean.resolveAutowiredArgument(descriptor, typeConverter, autowiredBeanNames);
}
catch (BeansException ex) {
throw new UnsatisfiedDependencyException(null, registeredBean.getBeanName(), descriptor, ex);
}
}
|
Resolve arguments for the specified registered bean.
@param registeredBean the registered bean
@return the resolved constructor or factory method arguments
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/aot/BeanInstanceSupplier.java
| 328
|
[
"registeredBean",
"descriptor",
"argumentValue",
"autowiredBeanNames"
] |
Object
| true
| 4
| 7.28
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
fastIpv6ToBufferUnsafe
|
inline size_t fastIpv6ToBufferUnsafe(const in6_addr& in6Addr, char* str) {
#ifdef _MSC_VER
const uint16_t* bytes = reinterpret_cast<const uint16_t*>(&in6Addr.u.Word);
#else
const uint16_t* bytes = reinterpret_cast<const uint16_t*>(&in6Addr.s6_addr16);
#endif
char* buf = str;
for (int i = 0; i < 8; ++i) {
writeIntegerString<
uint16_t,
4, // at most 4 hex digits per ushort
16, // base 16 (hex)
true>(htons(bytes[i]), &buf);
if (i != 7) {
*(buf++) = ':';
}
}
return buf - str;
}
|
Helper for working with unsigned char* or uint8_t* ByteArray values
|
cpp
|
folly/detail/IPAddressSource.h
| 246
|
[] | true
| 3
| 6.4
|
facebook/folly
| 30,157
|
doxygen
| false
|
|
flat
|
public <T> BiConsumer<T, BiConsumer<String, Object>> flat(Joiner joiner, Consumer<Pairs<T>> pairs) {
return (!this.include) ? none() : new Pairs<>(joiner, pairs)::flat;
}
|
Add pairs using flat naming.
@param <T> the item type
@param joiner the function used to join the prefix and name
@param pairs callback to add all the pairs
@return a {@link BiConsumer} for use with the {@link JsonWriter}
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/logging/structured/ContextPairs.java
| 70
|
[
"joiner",
"pairs"
] | true
| 2
| 7.84
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
mean_tweedie_deviance
|
def mean_tweedie_deviance(y_true, y_pred, *, sample_weight=None, power=0):
"""Mean Tweedie deviance regression loss.
Read more in the :ref:`User Guide <mean_tweedie_deviance>`.
Parameters
----------
y_true : array-like of shape (n_samples,)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,)
Estimated target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
power : float, default=0
Tweedie power parameter. Either power <= 0 or power >= 1.
The higher `p` the less weight is given to extreme
deviations between true and predicted targets.
- power < 0: Extreme stable distribution. Requires: y_pred > 0.
- power = 0 : Normal distribution, output corresponds to
mean_squared_error. y_true and y_pred can be any real numbers.
- power = 1 : Poisson distribution. Requires: y_true >= 0 and
y_pred > 0.
- 1 < p < 2 : Compound Poisson distribution. Requires: y_true >= 0
and y_pred > 0.
- power = 2 : Gamma distribution. Requires: y_true > 0 and y_pred > 0.
- power = 3 : Inverse Gaussian distribution. Requires: y_true > 0
and y_pred > 0.
- otherwise : Positive stable distribution. Requires: y_true > 0
and y_pred > 0.
Returns
-------
loss : float
A non-negative floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import mean_tweedie_deviance
>>> y_true = [2, 0, 1, 4]
>>> y_pred = [0.5, 0.5, 2., 2.]
>>> mean_tweedie_deviance(y_true, y_pred, power=1)
1.4260...
"""
xp, _ = get_namespace(y_true, y_pred)
y_type, y_true, y_pred, sample_weight, _ = _check_reg_targets_with_floating_dtype(
y_true, y_pred, sample_weight, multioutput=None, xp=xp
)
if y_type == "continuous-multioutput":
raise ValueError("Multioutput not supported in mean_tweedie_deviance")
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
sample_weight = sample_weight[:, np.newaxis]
message = f"Mean Tweedie deviance error with power={power} can only be used on "
if power < 0:
# 'Extreme stable', y any real number, y_pred > 0
if xp.any(y_pred <= 0):
raise ValueError(message + "strictly positive y_pred.")
elif power == 0:
# Normal, y and y_pred can be any real number
pass
elif 1 <= power < 2:
# Poisson and compound Poisson distribution, y >= 0, y_pred > 0
if xp.any(y_true < 0) or xp.any(y_pred <= 0):
raise ValueError(message + "non-negative y and strictly positive y_pred.")
elif power >= 2:
# Gamma and Extreme stable distribution, y and y_pred > 0
if xp.any(y_true <= 0) or xp.any(y_pred <= 0):
raise ValueError(message + "strictly positive y and y_pred.")
else: # pragma: nocover
# Unreachable statement
raise ValueError
return _mean_tweedie_deviance(
y_true, y_pred, sample_weight=sample_weight, power=power
)
|
Mean Tweedie deviance regression loss.
Read more in the :ref:`User Guide <mean_tweedie_deviance>`.
Parameters
----------
y_true : array-like of shape (n_samples,)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,)
Estimated target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
power : float, default=0
Tweedie power parameter. Either power <= 0 or power >= 1.
The higher `p` the less weight is given to extreme
deviations between true and predicted targets.
- power < 0: Extreme stable distribution. Requires: y_pred > 0.
- power = 0 : Normal distribution, output corresponds to
mean_squared_error. y_true and y_pred can be any real numbers.
- power = 1 : Poisson distribution. Requires: y_true >= 0 and
y_pred > 0.
- 1 < p < 2 : Compound Poisson distribution. Requires: y_true >= 0
and y_pred > 0.
- power = 2 : Gamma distribution. Requires: y_true > 0 and y_pred > 0.
- power = 3 : Inverse Gaussian distribution. Requires: y_true > 0
and y_pred > 0.
- otherwise : Positive stable distribution. Requires: y_true > 0
and y_pred > 0.
Returns
-------
loss : float
A non-negative floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import mean_tweedie_deviance
>>> y_true = [2, 0, 1, 4]
>>> y_pred = [0.5, 0.5, 2., 2.]
>>> mean_tweedie_deviance(y_true, y_pred, power=1)
1.4260...
|
python
|
sklearn/metrics/_regression.py
| 1,402
|
[
"y_true",
"y_pred",
"sample_weight",
"power"
] | false
| 13
| 7.6
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
_get_single_internal
|
def _get_single_internal(self, index):
"""
Return the ring at the specified index. The first index, 0, will
always return the exterior ring. Indices > 0 will return the
interior ring at the given index (e.g., poly[1] and poly[2] would
return the first and second interior ring, respectively).
CAREFUL: Internal/External are not the same as Interior/Exterior!
Return a pointer from the existing geometries for use internally by the
object's methods. _get_single_external() returns a clone of the same
geometry for use by external code.
"""
if index == 0:
return capi.get_extring(self.ptr)
else:
# Getting the interior ring, have to subtract 1 from the index.
return capi.get_intring(self.ptr, index - 1)
|
Return the ring at the specified index. The first index, 0, will
always return the exterior ring. Indices > 0 will return the
interior ring at the given index (e.g., poly[1] and poly[2] would
return the first and second interior ring, respectively).
CAREFUL: Internal/External are not the same as Interior/Exterior!
Return a pointer from the existing geometries for use internally by the
object's methods. _get_single_external() returns a clone of the same
geometry for use by external code.
|
python
|
django/contrib/gis/geos/polygon.py
| 127
|
[
"self",
"index"
] | false
| 3
| 6.4
|
django/django
| 86,204
|
unknown
| false
|
|
_unique
|
def _unique(values, *, return_inverse=False, return_counts=False):
"""Helper function to find unique values with support for python objects.
Uses pure python method for object dtype, and numpy method for
all other dtypes.
Parameters
----------
values : ndarray
Values to check for unknowns.
return_inverse : bool, default=False
If True, also return the indices of the unique values.
return_counts : bool, default=False
If True, also return the number of times each unique item appears in
values.
Returns
-------
unique : ndarray
The sorted unique values.
unique_inverse : ndarray
The indices to reconstruct the original array from the unique array.
Only provided if `return_inverse` is True.
unique_counts : ndarray
The number of times each of the unique values comes up in the original
array. Only provided if `return_counts` is True.
"""
if values.dtype == object:
return _unique_python(
values, return_inverse=return_inverse, return_counts=return_counts
)
# numerical
return _unique_np(
values, return_inverse=return_inverse, return_counts=return_counts
)
|
Helper function to find unique values with support for python objects.
Uses pure python method for object dtype, and numpy method for
all other dtypes.
Parameters
----------
values : ndarray
Values to check for unknowns.
return_inverse : bool, default=False
If True, also return the indices of the unique values.
return_counts : bool, default=False
If True, also return the number of times each unique item appears in
values.
Returns
-------
unique : ndarray
The sorted unique values.
unique_inverse : ndarray
The indices to reconstruct the original array from the unique array.
Only provided if `return_inverse` is True.
unique_counts : ndarray
The number of times each of the unique values comes up in the original
array. Only provided if `return_counts` is True.
|
python
|
sklearn/utils/_encode.py
| 14
|
[
"values",
"return_inverse",
"return_counts"
] | false
| 2
| 6.08
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
topK
|
public List<T> topK() {
@SuppressWarnings("nullness") // safe because we pass sort() a range that contains real Ts
T[] castBuffer = (T[]) buffer;
sort(castBuffer, 0, bufferSize, comparator);
if (bufferSize > k) {
Arrays.fill(buffer, k, buffer.length, null);
bufferSize = k;
threshold = buffer[k - 1];
}
// Up to bufferSize, all elements of buffer are real Ts (not null unless T includes null)
T[] topK = Arrays.copyOf(castBuffer, bufferSize);
// we have to support null elements, so no ImmutableList for us
return unmodifiableList(asList(topK));
}
|
Returns the top {@code k} elements offered to this {@code TopKSelector}, or all elements if
fewer than {@code k} have been offered, in the order specified by the factory used to create
this {@code TopKSelector}.
<p>The returned list is an unmodifiable copy and will not be affected by further changes to
this {@code TopKSelector}. This method returns in O(k log k) time.
|
java
|
android/guava/src/com/google/common/collect/TopKSelector.java
| 282
|
[] | true
| 2
| 6.88
|
google/guava
| 51,352
|
javadoc
| false
|
|
getGroup
|
private ConfigurationMetadataGroup getGroup(ConfigurationMetadataSource source) {
if (source == null) {
return this.allGroups.computeIfAbsent(ROOT_GROUP, (key) -> new ConfigurationMetadataGroup(ROOT_GROUP));
}
return this.allGroups.get(source.getGroupId());
}
|
Merge the content of the specified repository to this repository.
@param repository the repository to include
|
java
|
configuration-metadata/spring-boot-configuration-metadata/src/main/java/org/springframework/boot/configurationmetadata/SimpleConfigurationMetadataRepository.java
| 99
|
[
"source"
] |
ConfigurationMetadataGroup
| true
| 2
| 6.72
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
createApplicationContext
|
protected ConfigurableApplicationContext createApplicationContext() {
ConfigurableApplicationContext context = this.applicationContextFactory
.create(this.properties.getWebApplicationType());
Assert.state(context != null, "ApplicationContextFactory created null context");
return context;
}
|
Strategy method used to create the {@link ApplicationContext}. By default this
method will respect any explicitly set application context class or factory before
falling back to a suitable default.
@return the application context (not yet refreshed)
@see #setApplicationContextFactory(ApplicationContextFactory)
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/SpringApplication.java
| 579
|
[] |
ConfigurableApplicationContext
| true
| 1
| 6.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
coo_to_sparse_series
|
def coo_to_sparse_series(
A: scipy.sparse.coo_matrix, dense_index: bool = False
) -> Series:
"""
Convert a scipy.sparse.coo_matrix to a Series with type sparse.
Parameters
----------
A : scipy.sparse.coo_matrix
dense_index : bool, default False
Returns
-------
Series
Raises
------
TypeError if A is not a coo_matrix
"""
from pandas import SparseDtype
try:
ser = Series(A.data, MultiIndex.from_arrays((A.row, A.col)), copy=False)
except AttributeError as err:
raise TypeError(
f"Expected coo_matrix. Got {type(A).__name__} instead."
) from err
ser = ser.sort_index()
ser = ser.astype(SparseDtype(ser.dtype))
if dense_index:
ind = MultiIndex.from_product([A.row, A.col])
ser = ser.reindex(ind)
return ser
|
Convert a scipy.sparse.coo_matrix to a Series with type sparse.
Parameters
----------
A : scipy.sparse.coo_matrix
dense_index : bool, default False
Returns
-------
Series
Raises
------
TypeError if A is not a coo_matrix
|
python
|
pandas/core/arrays/sparse/scipy_sparse.py
| 176
|
[
"A",
"dense_index"
] |
Series
| true
| 2
| 6.24
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
_push_lazy
|
def _push_lazy(self, node: int, start: int, end: int) -> None:
"""
Push lazy updates down to children.
Args:
node: Current node index
start: Start index of the segment
end: End index of the segment
"""
lazy_node = self.lazy[node]
if lazy_node is None:
return
# Apply lazy update to current node
self.tree[node] = self.update_op(self.tree[node], lazy_node)
if start != end: # Not a leaf node
# Propagate to children
for child in self._children(node):
self.lazy[child] = self.update_op(
_value_or(self.lazy[child], self.identity), lazy_node
)
# Clear the lazy value
self.lazy[node] = None
|
Push lazy updates down to children.
Args:
node: Current node index
start: Start index of the segment
end: End index of the segment
|
python
|
torch/_inductor/codegen/segmented_tree.py
| 93
|
[
"self",
"node",
"start",
"end"
] |
None
| true
| 4
| 6.88
|
pytorch/pytorch
| 96,034
|
google
| false
|
compare
|
public static int compare(byte a, byte b) {
return Byte.compare(a, b);
}
|
Compares the two specified {@code byte} values. The sign of the value returned is the same as
that of {@code ((Byte) a).compareTo(b)}.
<p><b>Note:</b> this method behaves identically to {@link Byte#compare}.
@param a the first {@code byte} to compare
@param b the second {@code byte} to compare
@return a negative value if {@code a} is less than {@code b}; a positive value if {@code a} is
greater than {@code b}; or zero if they are equal
|
java
|
android/guava/src/com/google/common/primitives/SignedBytes.java
| 91
|
[
"a",
"b"
] | true
| 1
| 6.8
|
google/guava
| 51,352
|
javadoc
| false
|
|
checkInterceptorNames
|
private void checkInterceptorNames() {
if (!ObjectUtils.isEmpty(this.interceptorNames)) {
String finalName = this.interceptorNames[this.interceptorNames.length - 1];
if (this.targetName == null && this.targetSource == EMPTY_TARGET_SOURCE) {
// The last name in the chain may be an Advisor/Advice or a target/TargetSource.
// Unfortunately we don't know; we must look at type of the bean.
if (!finalName.endsWith(GLOBAL_SUFFIX) && !isNamedBeanAnAdvisorOrAdvice(finalName)) {
// The target isn't an interceptor.
this.targetName = finalName;
if (logger.isDebugEnabled()) {
logger.debug("Bean with name '" + finalName + "' concluding interceptor chain " +
"is not an advisor class: treating it as a target or TargetSource");
}
this.interceptorNames = Arrays.copyOf(this.interceptorNames, this.interceptorNames.length - 1);
}
}
}
}
|
Check the interceptorNames list whether it contains a target name as final element.
If found, remove the final name from the list and set it as targetName.
|
java
|
spring-aop/src/main/java/org/springframework/aop/framework/ProxyFactoryBean.java
| 362
|
[] |
void
| true
| 7
| 6.88
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
asfreq
|
def asfreq(self, fill_value=None):
"""
Return the values at the new freq, essentially a reindex.
Parameters
----------
fill_value : scalar, optional
Value to use for missing values, applied during upsampling (note
this does not fill NaNs that already were present).
Returns
-------
DataFrame or Series
Values at the specified freq.
See Also
--------
Series.asfreq: Convert TimeSeries to specified frequency.
DataFrame.asfreq: Convert TimeSeries to specified frequency.
Examples
--------
>>> ser = pd.Series(
... [1, 2, 3, 4],
... index=pd.DatetimeIndex(
... ["2023-01-01", "2023-01-31", "2023-02-01", "2023-02-28"]
... ),
... )
>>> ser
2023-01-01 1
2023-01-31 2
2023-02-01 3
2023-02-28 4
dtype: int64
>>> ser.resample("MS").asfreq()
2023-01-01 1
2023-02-01 3
Freq: MS, dtype: int64
"""
return self._upsample("asfreq", fill_value=fill_value)
|
Return the values at the new freq, essentially a reindex.
Parameters
----------
fill_value : scalar, optional
Value to use for missing values, applied during upsampling (note
this does not fill NaNs that already were present).
Returns
-------
DataFrame or Series
Values at the specified freq.
See Also
--------
Series.asfreq: Convert TimeSeries to specified frequency.
DataFrame.asfreq: Convert TimeSeries to specified frequency.
Examples
--------
>>> ser = pd.Series(
... [1, 2, 3, 4],
... index=pd.DatetimeIndex(
... ["2023-01-01", "2023-01-31", "2023-02-01", "2023-02-28"]
... ),
... )
>>> ser
2023-01-01 1
2023-01-31 2
2023-02-01 3
2023-02-28 4
dtype: int64
>>> ser.resample("MS").asfreq()
2023-01-01 1
2023-02-01 3
Freq: MS, dtype: int64
|
python
|
pandas/core/resample.py
| 1,055
|
[
"self",
"fill_value"
] | false
| 1
| 6
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
register_extension_dtype
|
def register_extension_dtype(cls: type_t[ExtensionDtypeT]) -> type_t[ExtensionDtypeT]:
"""
Register an ExtensionType with pandas as class decorator.
This enables operations like ``.astype(name)`` for the name
of the ExtensionDtype.
Returns
-------
callable
A class decorator.
See Also
--------
api.extensions.ExtensionDtype : The base class for creating custom pandas
data types.
Series : One-dimensional array with axis labels.
DataFrame : Two-dimensional, size-mutable, potentially heterogeneous
tabular data.
Examples
--------
>>> from pandas.api.extensions import register_extension_dtype, ExtensionDtype
>>> @register_extension_dtype
... class MyExtensionDtype(ExtensionDtype):
... name = "myextension"
"""
_registry.register(cls)
return cls
|
Register an ExtensionType with pandas as class decorator.
This enables operations like ``.astype(name)`` for the name
of the ExtensionDtype.
Returns
-------
callable
A class decorator.
See Also
--------
api.extensions.ExtensionDtype : The base class for creating custom pandas
data types.
Series : One-dimensional array with axis labels.
DataFrame : Two-dimensional, size-mutable, potentially heterogeneous
tabular data.
Examples
--------
>>> from pandas.api.extensions import register_extension_dtype, ExtensionDtype
>>> @register_extension_dtype
... class MyExtensionDtype(ExtensionDtype):
... name = "myextension"
|
python
|
pandas/core/dtypes/base.py
| 488
|
[
"cls"
] |
type_t[ExtensionDtypeT]
| true
| 1
| 6.48
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
getMergedBeanDefinition
|
protected RootBeanDefinition getMergedBeanDefinition(String beanName, BeanDefinition bd)
throws BeanDefinitionStoreException {
return getMergedBeanDefinition(beanName, bd, null);
}
|
Return a RootBeanDefinition for the given top-level bean, by merging with
the parent if the given bean's definition is a child bean definition.
@param beanName the name of the bean definition
@param bd the original bean definition (Root/ChildBeanDefinition)
@return a (potentially merged) RootBeanDefinition for the given bean
@throws BeanDefinitionStoreException in case of an invalid bean definition
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/AbstractBeanFactory.java
| 1,380
|
[
"beanName",
"bd"
] |
RootBeanDefinition
| true
| 1
| 6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
sourcesToAbsolute
|
function sourcesToAbsolute(baseURL, data) {
data.sources = data.sources.map((source) => {
source = (data.sourceRoot || '') + source;
if (isAbsolute(source)) {
return pathToFileURL(source).href;
}
return new URL(source, baseURL).href;
});
// The sources array is now resolved to absolute URLs, sourceRoot should
// be updated to noop.
data.sourceRoot = '';
return data;
}
|
Read source map from file.
@param {string} mapURL - file url of the source map
@returns {object} deserialized source map JSON object
|
javascript
|
lib/internal/source_map/source_map_cache.js
| 322
|
[
"baseURL",
"data"
] | false
| 3
| 6.08
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
max
|
def max(
self,
numeric_only: bool = False,
*args,
engine: Literal["cython", "numba"] | None = None,
engine_kwargs: dict[str, bool] | None = None,
**kwargs,
):
"""
Calculate the rolling maximum.
Parameters
----------
numeric_only : bool, default False
Include only float, int, boolean columns.
*args : iterable, optional
Positional arguments passed into ``func``.
engine : str, default None
* ``'cython'`` : Runs the operation through C-extensions from cython.
* ``'numba'`` : Runs the operation through JIT compiled code from numba.
* ``None`` : Defaults to ``'cython'`` or
globally setting ``compute.use_numba``
engine_kwargs : dict, default None
* For ``'cython'`` engine, there are no accepted ``engine_kwargs``
* For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
and ``parallel`` dictionary keys. The values must either be ``True`` or
``False``.
The default ``engine_kwargs`` for the ``'numba'`` engine is
``{'nopython': True, 'nogil': False, 'parallel': False}``.
**kwargs : mapping, optional
A dictionary of keyword arguments passed into ``func``.
Returns
-------
Series or DataFrame
Return type is the same as the original object with ``np.float64`` dtype.
See Also
--------
Series.rolling : Calling rolling with Series data.
DataFrame.rolling : Calling rolling with DataFrames.
Series.max : Aggregating max for Series.
DataFrame.max : Aggregating max for DataFrame.
Notes
-----
See :ref:`window.numba_engine` and :ref:`enhancingperf.numba`
for extended documentation and performance considerations
for the Numba engine.
Examples
--------
>>> ser = pd.Series([1, 2, 3, 4])
>>> ser.rolling(2).max()
0 NaN
1 2.0
2 3.0
3 4.0
dtype: float64
"""
return super().max(
numeric_only=numeric_only,
engine=engine,
engine_kwargs=engine_kwargs,
)
|
Calculate the rolling maximum.
Parameters
----------
numeric_only : bool, default False
Include only float, int, boolean columns.
*args : iterable, optional
Positional arguments passed into ``func``.
engine : str, default None
* ``'cython'`` : Runs the operation through C-extensions from cython.
* ``'numba'`` : Runs the operation through JIT compiled code from numba.
* ``None`` : Defaults to ``'cython'`` or
globally setting ``compute.use_numba``
engine_kwargs : dict, default None
* For ``'cython'`` engine, there are no accepted ``engine_kwargs``
* For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
and ``parallel`` dictionary keys. The values must either be ``True`` or
``False``.
The default ``engine_kwargs`` for the ``'numba'`` engine is
``{'nopython': True, 'nogil': False, 'parallel': False}``.
**kwargs : mapping, optional
A dictionary of keyword arguments passed into ``func``.
Returns
-------
Series or DataFrame
Return type is the same as the original object with ``np.float64`` dtype.
See Also
--------
Series.rolling : Calling rolling with Series data.
DataFrame.rolling : Calling rolling with DataFrames.
Series.max : Aggregating max for Series.
DataFrame.max : Aggregating max for DataFrame.
Notes
-----
See :ref:`window.numba_engine` and :ref:`enhancingperf.numba`
for extended documentation and performance considerations
for the Numba engine.
Examples
--------
>>> ser = pd.Series([1, 2, 3, 4])
>>> ser.rolling(2).max()
0 NaN
1 2.0
2 3.0
3 4.0
dtype: float64
|
python
|
pandas/core/window/rolling.py
| 2,449
|
[
"self",
"numeric_only",
"engine",
"engine_kwargs"
] | true
| 1
| 6.72
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
getSource
|
public Source getSource(T service) {
Source source = this.sources.get(service);
Assert.state(source != null,
() -> "Unable to find service " + ObjectUtils.identityToString(source));
return source;
}
|
Get the source of the given service.
@param service the service instance
@return the source of the service
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/aot/AotServices.java
| 179
|
[
"service"
] |
Source
| true
| 1
| 6.88
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
topicPartitionsAwaitingReconciliation
|
Map<Uuid, SortedSet<Integer>> topicPartitionsAwaitingReconciliation() {
if (currentTargetAssignment == LocalAssignment.NONE) {
return Collections.emptyMap();
}
if (currentAssignment == LocalAssignment.NONE) {
return currentTargetAssignment.partitions;
}
final Map<Uuid, SortedSet<Integer>> topicPartitionMap = new HashMap<>();
currentTargetAssignment.partitions.forEach((topicId, targetPartitions) -> {
final SortedSet<Integer> reconciledPartitions = currentAssignment.partitions.get(topicId);
if (!targetPartitions.equals(reconciledPartitions)) {
final TreeSet<Integer> missingPartitions = new TreeSet<>(targetPartitions);
if (reconciledPartitions != null) {
missingPartitions.removeAll(reconciledPartitions);
}
topicPartitionMap.put(topicId, missingPartitions);
}
});
return Collections.unmodifiableMap(topicPartitionMap);
}
|
@return Map of topics partitions received in a target assignment that have not been
reconciled yet because topic names are not in metadata or reconciliation hasn't finished.
The values in the map are the sets of partitions contained in the target assignment but
missing from the currently reconciled assignment, for each topic.
Visible for testing.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractMembershipManager.java
| 1,350
|
[] | true
| 5
| 8.24
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
_node_metadata_hook
|
def _node_metadata_hook(
node: torch.fx.Node,
metadata: Optional[dict[str, Any]] = None,
fake_mode: Optional[FakeTensorMode] = None,
) -> None:
"""
Hook for adding the appropriate metadata to nodes that are created during a
pass using graph.create_node. An example of how to use it:
```
with _set_node_metadata_hook(gm,
functools.partial(_node_metadata_hook, metadata={"stack_trace": "file"})
):
pass(gm)
```
This hook should not work for all generic cases -- specifically it assumes
that nodes being added are only call_function nodes, and copies over the
first argument node's nn_module_stack.
"""
# pyrefly: ignore [bad-assignment]
fake_mode = fake_mode or contextlib.nullcontext()
assert node.op == "call_function" and callable(node.target), (
f"node: {node}, target: {node.target}"
)
if (
isinstance(node.target, torch._ops.OpOverload)
and len(node.target._schema.returns) == 0
):
node.meta["val"] = None
else:
fake_args, fake_kwargs = pytree.tree_map_only(
torch.fx.Node, lambda arg: arg.meta["val"], (node.args, node.kwargs)
)
# pyrefly: ignore [bad-context-manager]
with fake_mode, enable_python_dispatcher():
fake_res = node.target(*fake_args, **fake_kwargs)
node.meta["val"] = fake_res
if metadata is not None:
for k, v in metadata.items():
node.meta[k] = v
# Copy over metadata from argument nodes
arg_meta = [
arg.meta
for arg in pytree.tree_flatten((node.args, node.kwargs))[0]
if isinstance(arg, torch.fx.Node)
]
if len(arg_meta) == 0:
return
arg_meta = arg_meta[0]
node.meta["nn_module_stack"] = node.meta.get(
"nn_module_stack",
arg_meta.get(
"nn_module_stack",
{
_EMPTY_NN_MODULE_STACK_KEY: (
_EMPTY_NN_MODULE_STACK_KEY,
_EMPTY_NN_MODULE_STACK_KEY,
)
},
),
)
node.meta["torch_fn"] = node.meta.get(
"torch_fn",
(
f"{node.target.__name__}_0",
# pyrefly: ignore [missing-attribute]
f"{node.target.__class__.__name__}.{node.target.__name__}",
),
)
|
Hook for adding the appropriate metadata to nodes that are created during a
pass using graph.create_node. An example of how to use it:
```
with _set_node_metadata_hook(gm,
functools.partial(_node_metadata_hook, metadata={"stack_trace": "file"})
):
pass(gm)
```
This hook should not work for all generic cases -- specifically it assumes
that nodes being added are only call_function nodes, and copies over the
first argument node's nn_module_stack.
|
python
|
torch/_export/passes/_node_metadata_hook.py
| 15
|
[
"node",
"metadata",
"fake_mode"
] |
None
| true
| 9
| 6.72
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
validateRange
|
protected int validateRange(final int startIndex, int endIndex) {
if (startIndex < 0) {
throw new StringIndexOutOfBoundsException(startIndex);
}
if (endIndex > size) {
endIndex = size;
}
if (startIndex > endIndex) {
throw new StringIndexOutOfBoundsException("end < start");
}
return endIndex;
}
|
Validates parameters defining a range of the builder.
@param startIndex the start index, inclusive, must be valid
@param endIndex the end index, exclusive, must be valid except
that if too large it is treated as end of string
@return the new string
@throws IndexOutOfBoundsException if the index is invalid
|
java
|
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
| 3,047
|
[
"startIndex",
"endIndex"
] | true
| 4
| 7.76
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
_extended_gcd
|
def _extended_gcd(self, a: int, b: int) -> tuple[int, int, int]:
"""
Extended Euclidean algorithms to solve Bezout's identity:
a*x + b*y = gcd(x, y)
Finds one particular solution for x, y: s, t
Returns: gcd, s, t
"""
s, old_s = 0, 1
t, old_t = 1, 0
r, old_r = b, a
while r:
quotient = old_r // r
old_r, r = r, old_r - quotient * r
old_s, s = s, old_s - quotient * s
old_t, t = t, old_t - quotient * t
return old_r, old_s, old_t
|
Extended Euclidean algorithms to solve Bezout's identity:
a*x + b*y = gcd(x, y)
Finds one particular solution for x, y: s, t
Returns: gcd, s, t
|
python
|
pandas/core/indexes/range.py
| 885
|
[
"self",
"a",
"b"
] |
tuple[int, int, int]
| true
| 2
| 6.08
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
run
|
@SuppressWarnings("InfiniteLoopStatement")
@Override
public void run() {
while (true) {
try {
if (!cleanUp(queue.remove())) {
break;
}
} catch (InterruptedException e) {
// ignore
}
}
}
|
Loops continuously, pulling references off the queue and cleaning them up.
|
java
|
android/guava/src/com/google/common/base/internal/Finalizer.java
| 139
|
[] |
void
| true
| 4
| 7.04
|
google/guava
| 51,352
|
javadoc
| false
|
ofInnerBean
|
public static RegisteredBean ofInnerBean(RegisteredBean parent, BeanDefinitionHolder innerBean) {
Assert.notNull(innerBean, "'innerBean' must not be null");
return ofInnerBean(parent, innerBean.getBeanName(), innerBean.getBeanDefinition());
}
|
Create a new {@link RegisteredBean} instance for an inner-bean.
@param parent the parent of the inner-bean
@param innerBean a {@link BeanDefinitionHolder} for the inner bean
@return a new {@link RegisteredBean} instance
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/RegisteredBean.java
| 108
|
[
"parent",
"innerBean"
] |
RegisteredBean
| true
| 1
| 6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
is_constant_match
|
def is_constant_match(self, *values: Any) -> bool:
"""
Check if this variable is a python constant matching one of the given values.
Examples:
var.is_constant_match(None) # True if var is constant None
var.is_constant_match(True, False) # True if var is constant True or False
var.is_constant_match(NotImplemented) # True if var is constant NotImplemented
"""
return False
|
Check if this variable is a python constant matching one of the given values.
Examples:
var.is_constant_match(None) # True if var is constant None
var.is_constant_match(True, False) # True if var is constant True or False
var.is_constant_match(NotImplemented) # True if var is constant NotImplemented
|
python
|
torch/_dynamo/variables/base.py
| 386
|
[
"self"
] |
bool
| true
| 1
| 6.8
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
wrapAndThrow
|
public static <R> R wrapAndThrow(final Throwable throwable) {
throw new UndeclaredThrowableException(throwUnchecked(throwable));
}
|
Throws a checked exception without adding the exception to the throws
clause of the calling method. For checked exceptions, this method throws
an UndeclaredThrowableException wrapping the checked exception. For
Errors and RuntimeExceptions, the original exception is rethrown.
<p>
The downside to using this approach is that invoking code which needs to
handle specific checked exceptions must sniff up the exception chain to
determine if the caught exception was caused by the checked exception.
</p>
@param throwable
The throwable to rethrow.
@param <R> The type of the returned value.
@return Never actually returned, this generic type matches any type
which the calling site requires. "Returning" the results of this
method will satisfy the Java compiler requirement that all code
paths return a value.
@since 3.5
@see #asRuntimeException(Throwable)
@see #hasCause(Throwable, Class)
|
java
|
src/main/java/org/apache/commons/lang3/exception/ExceptionUtils.java
| 1,075
|
[
"throwable"
] |
R
| true
| 1
| 6.64
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
isVariableName
|
@Contract("null -> false")
static boolean isVariableName(@Nullable String name) {
if (!StringUtils.hasLength(name)) {
return false;
}
if (!Character.isJavaIdentifierStart(name.charAt(0))) {
return false;
}
for (int i = 1; i < name.length(); i++) {
if (!Character.isJavaIdentifierPart(name.charAt(i))) {
return false;
}
}
return true;
}
|
Determine whether the given Advisor contains an AspectJ advice.
@param advisor the Advisor to check
|
java
|
spring-aop/src/main/java/org/springframework/aop/aspectj/AspectJProxyUtils.java
| 80
|
[
"name"
] | true
| 5
| 6.4
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.