function_name
stringlengths 1
57
| function_code
stringlengths 20
4.99k
| documentation
stringlengths 50
2k
| language
stringclasses 5
values | file_path
stringlengths 8
166
| line_number
int32 4
16.7k
| parameters
listlengths 0
20
| return_type
stringlengths 0
131
| has_type_hints
bool 2
classes | complexity
int32 1
51
| quality_score
float32 6
9.68
| repo_name
stringclasses 34
values | repo_stars
int32 2.9k
242k
| docstring_style
stringclasses 7
values | is_async
bool 2
classes |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
createWithExpectedSize
|
static <K extends @Nullable Object> ObjectCountHashMap<K> createWithExpectedSize(
int expectedSize) {
return new ObjectCountHashMap<K>(expectedSize);
}
|
Creates a {@code ObjectCountHashMap} instance, with a high enough "initial capacity" that it
<i>should</i> hold {@code expectedSize} elements without growth.
@param expectedSize the number of elements you expect to add to the returned set
@return a new, empty {@code ObjectCountHashMap} with enough capacity to hold {@code
expectedSize} elements without resizing
@throws IllegalArgumentException if {@code expectedSize} is negative
|
java
|
android/guava/src/com/google/common/collect/ObjectCountHashMap.java
| 59
|
[
"expectedSize"
] | true
| 1
| 6
|
google/guava
| 51,352
|
javadoc
| false
|
|
unrollVariableAssignments
|
private static Type unrollVariableAssignments(TypeVariable<?> typeVariable, final Map<TypeVariable<?>, Type> typeVarAssigns) {
Type result;
do {
result = typeVarAssigns.get(typeVariable);
if (!(result instanceof TypeVariable<?>) || result.equals(typeVariable)) {
break;
}
typeVariable = (TypeVariable<?>) result;
} while (true);
return result;
}
|
Looks up {@code typeVariable} in {@code typeVarAssigns} <em>transitively</em>, i.e. keep looking until the value found is <em>not</em> a type variable.
@param typeVariable the type variable to look up.
@param typeVarAssigns the map used for the look-up.
@return Type or {@code null} if some variable was not in the map.
|
java
|
src/main/java/org/apache/commons/lang3/reflect/TypeUtils.java
| 1,640
|
[
"typeVariable",
"typeVarAssigns"
] |
Type
| true
| 3
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
isEmpty
|
boolean isEmpty() {
// The casts are safe because of the has*Bound() checks.
return (hasUpperBound() && tooLow(uncheckedCastNullableTToT(getUpperEndpoint())))
|| (hasLowerBound() && tooHigh(uncheckedCastNullableTToT(getLowerEndpoint())));
}
|
Returns everything between the endpoints relative to the specified comparator, with the
specified endpoint behavior.
|
java
|
android/guava/src/com/google/common/collect/GeneralRange.java
| 160
|
[] | true
| 4
| 6.56
|
google/guava
| 51,352
|
javadoc
| false
|
|
of
|
static <T> InstanceSupplier<T> of(InstanceSupplier<T> instanceSupplier) {
Assert.notNull(instanceSupplier, "InstanceSupplier must not be null");
return instanceSupplier;
}
|
Lambda friendly method that can be used to create an
{@link InstanceSupplier} and add post processors in a single call. For
example: {@code InstanceSupplier.of(registeredBean -> ...).andThen(...)}.
@param <T> the type of instance supplied by this supplier
@param instanceSupplier the source instance supplier
@return a new {@link InstanceSupplier}
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/InstanceSupplier.java
| 143
|
[
"instanceSupplier"
] | true
| 1
| 6.32
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
load_package_data
|
def load_package_data(include_suspended: bool = False) -> list[dict[str, Any]]:
"""
Load all data from providers files
:return: A list containing the contents of all provider.yaml files - old and new structure.
"""
schema = provider_yaml_schema()
result = []
for provider_yaml_path in get_all_provider_yaml_paths():
with open(provider_yaml_path) as yaml_file:
provider = yaml.safe_load(yaml_file)
try:
jsonschema.validate(provider, schema=schema)
except jsonschema.ValidationError as ex:
msg = f"Unable to parse: {provider_yaml_path}. Original error {type(ex).__name__}: {ex}"
raise RuntimeError(msg)
if provider["state"] == "suspended" and not include_suspended:
continue
provider_yaml_dir_str = os.path.dirname(provider_yaml_path)
module = provider["package-name"][len("apache-") :].replace("-", ".")
module_folder = module[len("airflow-providers-") :].replace(".", "/")
provider["python-module"] = module
provider["package-dir"] = f"{provider_yaml_dir_str}/src/{module.replace('.', '/')}"
provider["docs-dir"] = os.path.dirname(provider_yaml_path.parent / "docs")
provider["system-tests-dir"] = f"{provider_yaml_dir_str}/tests/system/{module_folder}"
result.append(provider)
return result
|
Load all data from providers files
:return: A list containing the contents of all provider.yaml files - old and new structure.
|
python
|
devel-common/src/sphinx_exts/provider_yaml_utils.py
| 68
|
[
"include_suspended"
] |
list[dict[str, Any]]
| true
| 4
| 7.04
|
apache/airflow
| 43,597
|
unknown
| false
|
createHistogram
|
function createHistogram(options = kEmptyObject) {
validateObject(options, 'options');
const {
lowest = 1,
highest = NumberMAX_SAFE_INTEGER,
figures = 3,
} = options;
if (typeof lowest !== 'bigint')
validateInteger(lowest, 'options.lowest', 1, NumberMAX_SAFE_INTEGER);
if (typeof highest !== 'bigint') {
validateInteger(highest, 'options.highest',
2 * lowest, NumberMAX_SAFE_INTEGER);
} else if (highest < 2n * lowest) {
throw new ERR_INVALID_ARG_VALUE.RangeError('options.highest', highest);
}
validateInteger(figures, 'options.figures', 1, 5);
return createRecordableHistogram(new _Histogram(lowest, highest, figures));
}
|
@param {{
lowest? : number,
highest? : number,
figures? : number
}} [options]
@returns {RecordableHistogram}
|
javascript
|
lib/internal/histogram.js
| 367
|
[] | false
| 5
| 6.32
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
_update_ctx_header
|
def _update_ctx_header(self, attrs: DataFrame, axis: AxisInt) -> None:
"""
Update the state of the ``Styler`` for header cells.
Collects a mapping of {index_label: [('<property>', '<value>'), ..]}.
Parameters
----------
attrs : Series
Should contain strings of '<property>: <value>;<prop2>: <val2>', and an
integer index.
Whitespace shouldn't matter and the final trailing ';' shouldn't
matter.
axis : int
Identifies whether the ctx object being updated is the index or columns
"""
for j in attrs.columns:
ser = attrs[j]
for i, c in ser.items():
if not c or pd.isna(c):
continue
css_list = maybe_convert_css_to_tuples(c)
if axis == 0:
self.ctx_index[(i, j)].extend(css_list)
else:
self.ctx_columns[(j, i)].extend(css_list)
|
Update the state of the ``Styler`` for header cells.
Collects a mapping of {index_label: [('<property>', '<value>'), ..]}.
Parameters
----------
attrs : Series
Should contain strings of '<property>: <value>;<prop2>: <val2>', and an
integer index.
Whitespace shouldn't matter and the final trailing ';' shouldn't
matter.
axis : int
Identifies whether the ctx object being updated is the index or columns
|
python
|
pandas/io/formats/style.py
| 1,701
|
[
"self",
"attrs",
"axis"
] |
None
| true
| 7
| 6.72
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
parseSign
|
private static boolean parseSign(final String group) {
return group != null && group.charAt(0) == '-';
}
|
Gets a TimeZone, looking first for GMT custom ids, then falling back to Olson ids.
A GMT custom id can be 'Z', or 'UTC', or has an optional prefix of GMT,
followed by sign, hours digit(s), optional colon(':'), and optional minutes digits.
i.e. <em>[GMT] (+|-) Hours [[:] Minutes]</em>
@param id A GMT custom id (or Olson id
@return A time zone
|
java
|
src/main/java/org/apache/commons/lang3/time/FastTimeZone.java
| 87
|
[
"group"
] | true
| 2
| 8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
withExistingValue
|
public Bindable<T> withExistingValue(@Nullable T existingValue) {
Assert.isTrue(existingValue == null || this.type.isArray() || boxedTypeIsInstanceOf(existingValue),
() -> "'existingValue' must be an instance of " + this.type);
Assert.state(this.bindMethod != BindMethod.VALUE_OBJECT,
() -> "An existing value cannot be provided when binding as a value object");
Supplier<T> value = (existingValue != null) ? () -> existingValue : null;
return new Bindable<>(this.type, this.boxedType, value, this.annotations, this.bindRestrictions,
BindMethod.JAVA_BEAN);
}
|
Create an updated {@link Bindable} instance with an existing value. Implies that
Java Bean binding will be used.
@param existingValue the existing value
@return an updated {@link Bindable}
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/bind/Bindable.java
| 194
|
[
"existingValue"
] | true
| 4
| 7.76
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
areRequestStatesInProgress
|
private boolean areRequestStatesInProgress(Queue<AcknowledgeRequestState> acknowledgeRequestStates) {
if (acknowledgeRequestStates == null) return false;
for (AcknowledgeRequestState acknowledgeRequestState : acknowledgeRequestStates) {
if (isRequestStateInProgress(acknowledgeRequestState)) {
return true;
}
}
return false;
}
|
Prunes the empty acknowledgementRequestStates in {@link #acknowledgeRequestStates}
@return Returns true if there are still any acknowledgements left to be processed.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManager.java
| 521
|
[
"acknowledgeRequestStates"
] | true
| 3
| 6.08
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
toZonedDateTime
|
public ZonedDateTime toZonedDateTime() {
return toZonedDateTime(calendar);
}
|
Converts this instance to a {@link ZonedDateTime}.
@return a ZonedDateTime.
@since 3.17.0
|
java
|
src/main/java/org/apache/commons/lang3/time/CalendarUtils.java
| 233
|
[] |
ZonedDateTime
| true
| 1
| 6.48
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
len
|
def len(self):
"""
Compute the length of each element in the Series/Index.
The element may be a sequence (such as a string, tuple or list) or a collection
(such as a dictionary).
Returns
-------
Series or Index of int
A Series or Index of integer values indicating the length of each
element in the Series or Index.
See Also
--------
str.len : Python built-in function returning the length of an object.
Series.size : Returns the length of the Series.
Examples
--------
Returns the length (number of characters) in a string. Returns the
number of entries for dictionaries, lists or tuples.
>>> s = pd.Series(
... ["dog", "", 5, {"foo": "bar"}, [2, 3, 5, 7], ("one", "two", "three")]
... )
>>> s
0 dog
1
2 5
3 {'foo': 'bar'}
4 [2, 3, 5, 7]
5 (one, two, three)
dtype: object
>>> s.str.len()
0 3.0
1 0.0
2 NaN
3 1.0
4 4.0
5 3.0
dtype: float64
"""
result = self._data.array._str_len()
return self._wrap_result(result, returns_string=False)
|
Compute the length of each element in the Series/Index.
The element may be a sequence (such as a string, tuple or list) or a collection
(such as a dictionary).
Returns
-------
Series or Index of int
A Series or Index of integer values indicating the length of each
element in the Series or Index.
See Also
--------
str.len : Python built-in function returning the length of an object.
Series.size : Returns the length of the Series.
Examples
--------
Returns the length (number of characters) in a string. Returns the
number of entries for dictionaries, lists or tuples.
>>> s = pd.Series(
... ["dog", "", 5, {"foo": "bar"}, [2, 3, 5, 7], ("one", "two", "three")]
... )
>>> s
0 dog
1
2 5
3 {'foo': 'bar'}
4 [2, 3, 5, 7]
5 (one, two, three)
dtype: object
>>> s.str.len()
0 3.0
1 0.0
2 NaN
3 1.0
4 4.0
5 3.0
dtype: float64
|
python
|
pandas/core/strings/accessor.py
| 3,316
|
[
"self"
] | false
| 1
| 6.32
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
|
_slice_at_axis
|
def _slice_at_axis(sl, axis):
"""
Construct tuple of slices to slice an array in the given dimension.
Parameters
----------
sl : slice
The slice for the given dimension.
axis : int
The axis to which `sl` is applied. All other dimensions are left
"unsliced".
Returns
-------
sl : tuple of slices
A tuple with slices matching `shape` in length.
Examples
--------
>>> np._slice_at_axis(slice(None, 3, -1), 1)
(slice(None, None, None), slice(None, 3, -1), (...,))
"""
return (slice(None),) * axis + (sl,) + (...,)
|
Construct tuple of slices to slice an array in the given dimension.
Parameters
----------
sl : slice
The slice for the given dimension.
axis : int
The axis to which `sl` is applied. All other dimensions are left
"unsliced".
Returns
-------
sl : tuple of slices
A tuple with slices matching `shape` in length.
Examples
--------
>>> np._slice_at_axis(slice(None, 3, -1), 1)
(slice(None, None, None), slice(None, 3, -1), (...,))
|
python
|
numpy/lib/_arraypad_impl.py
| 34
|
[
"sl",
"axis"
] | false
| 1
| 6.48
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
writeEntry
|
private void writeEntry(JarFile jarFile, EntryTransformer entryTransformer, UnpackHandler unpackHandler,
JarArchiveEntry entry, @Nullable Library library) throws IOException {
setUpEntry(jarFile, entry, unpackHandler);
try (ZipHeaderPeekInputStream inputStream = new ZipHeaderPeekInputStream(jarFile.getInputStream(entry))) {
EntryWriter entryWriter = new InputStreamEntryWriter(inputStream);
JarArchiveEntry transformedEntry = entryTransformer.transform(entry);
if (transformedEntry != null) {
writeEntry(transformedEntry, library, entryWriter);
}
}
}
|
Write the specified manifest.
@param manifest the manifest to write
@throws IOException of the manifest cannot be written
|
java
|
loader/spring-boot-loader-tools/src/main/java/org/springframework/boot/loader/tools/AbstractJarWriter.java
| 100
|
[
"jarFile",
"entryTransformer",
"unpackHandler",
"entry",
"library"
] |
void
| true
| 2
| 6.88
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
throwIfNoAssignorsConfigured
|
private void throwIfNoAssignorsConfigured() {
if (assignors.isEmpty())
throw new IllegalStateException("Must configure at least one partition assigner class name to " +
ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG + " configuration property");
}
|
Release the light lock protecting the consumer from multi-threaded access.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ClassicKafkaConsumer.java
| 1,268
|
[] |
void
| true
| 2
| 6.4
|
apache/kafka
| 31,560
|
javadoc
| false
|
entrySetIterator
|
Iterator<Entry<K, V>> entrySetIterator() {
Iterator<Entry<K, V>> iterator = delegate.entrySet().iterator();
return new Iterator<Entry<K, V>>() {
@Nullable Entry<K, V> entry;
@Override
public boolean hasNext() {
return iterator.hasNext();
}
@Override
public Entry<K, V> next() {
entry = iterator.next();
return new BiMapEntry(entry);
}
@Override
public void remove() {
if (entry == null) {
throw new IllegalStateException("no calls to next() since the last call to remove()");
}
V value = entry.getValue();
iterator.remove();
removeFromInverseMap(value);
entry = null;
}
};
}
|
Specifies the delegate maps going in each direction. Called by subclasses during
deserialization.
|
java
|
android/guava/src/com/google/common/collect/AbstractBiMap.java
| 331
|
[] | true
| 2
| 6.4
|
google/guava
| 51,352
|
javadoc
| false
|
|
generateSetBeanDefinitionPropertiesCode
|
@Override
public CodeBlock generateSetBeanDefinitionPropertiesCode(
GenerationContext generationContext, BeanRegistrationCode beanRegistrationCode,
RootBeanDefinition beanDefinition, Predicate<String> attributeFilter) {
Loader loader = AotServices.factories(this.registeredBean.getBeanFactory().getBeanClassLoader());
List<Delegate> additionalDelegates = loader.load(Delegate.class).asList();
return new BeanDefinitionPropertiesCodeGenerator(
generationContext.getRuntimeHints(), attributeFilter,
beanRegistrationCode.getMethods(), additionalDelegates,
(name, value) -> generateValueCode(generationContext, name, value))
.generateCode(beanDefinition);
}
|
Extract the target class of a public {@link FactoryBean} based on its
constructor. If the implementation does not resolve the target class
because it itself uses a generic, attempt to extract it from the bean type.
@param factoryBeanType the factory bean type
@param beanType the bean type
@return the target class to use
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/aot/DefaultBeanRegistrationCodeFragments.java
| 165
|
[
"generationContext",
"beanRegistrationCode",
"beanDefinition",
"attributeFilter"
] |
CodeBlock
| true
| 1
| 6.56
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
xContentType
|
@Deprecated
public static XContentType xContentType(InputStream si) throws IOException {
/*
* We need to guess the content type. To do this, we look for the first non-whitespace character and then try to guess the content
* type on the GUESS_HEADER_LENGTH bytes that follow. We do this in a way that does not modify the initial read position in the
* underlying input stream. This is why the input stream must support mark/reset and why we repeatedly mark the read position and
* reset.
*/
if (si.markSupported() == false) {
throw new IllegalArgumentException("Cannot guess the xcontent type without mark/reset support on " + si.getClass());
}
si.mark(Integer.MAX_VALUE);
try {
// scan until we find the first non-whitespace character or the end of the stream
int current;
do {
current = si.read();
if (current == -1) {
return null;
}
} while (Character.isWhitespace((char) current));
// now guess the content type off the next GUESS_HEADER_LENGTH bytes including the current byte
final byte[] firstBytes = new byte[GUESS_HEADER_LENGTH];
firstBytes[0] = (byte) current;
int read = 1;
while (read < GUESS_HEADER_LENGTH) {
final int r = si.read(firstBytes, read, GUESS_HEADER_LENGTH - read);
if (r == -1) {
break;
}
read += r;
}
return xContentType(firstBytes, 0, read);
} finally {
si.reset();
}
}
|
Guesses the content type based on the provided input stream without consuming it.
@deprecated the content type should not be guessed except for few cases where we effectively don't know the content type.
The REST layer should move to reading the Content-Type header instead. There are other places where auto-detection may be needed.
This method is deprecated to prevent usages of it from spreading further without specific reasons.
|
java
|
libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentFactory.java
| 220
|
[
"si"
] |
XContentType
| true
| 5
| 6
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
optBoolean
|
public boolean optBoolean(String name) {
return optBoolean(name, false);
}
|
Returns the value mapped by {@code name} if it exists and is a boolean or can be
coerced to a boolean. Returns false otherwise.
@param name the name of the property
@return the value or {@code null}
|
java
|
cli/spring-boot-cli/src/json-shade/java/org/springframework/boot/cli/json/JSONObject.java
| 410
|
[
"name"
] | true
| 1
| 6.96
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
appendTo
|
public void appendTo(final Appendable appendable) throws IOException {
if (appendable instanceof Writer) {
((Writer) appendable).write(buffer, 0, size);
} else if (appendable instanceof StringBuilder) {
((StringBuilder) appendable).append(buffer, 0, size);
} else if (appendable instanceof StringBuffer) {
((StringBuffer) appendable).append(buffer, 0, size);
} else if (appendable instanceof CharBuffer) {
((CharBuffer) appendable).put(buffer, 0, size);
} else {
appendable.append(this);
}
}
|
Appends current contents of this {@link StrBuilder} to the
provided {@link Appendable}.
<p>
This method tries to avoid doing any extra copies of contents.
</p>
@param appendable the appendable to append data to
@throws IOException if an I/O error occurs
@since 3.4
@see #readFrom(Readable)
|
java
|
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
| 1,379
|
[
"appendable"
] |
void
| true
| 5
| 6.72
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
toUtf8Bytes
|
public static byte[] toUtf8Bytes(char[] chars) {
final CharBuffer charBuffer = CharBuffer.wrap(chars);
final ByteBuffer byteBuffer = StandardCharsets.UTF_8.encode(charBuffer);
final byte[] bytes;
if (byteBuffer.hasArray()) {
// there is no guarantee that the byte buffers backing array is the right size
// so we need to make a copy
bytes = Arrays.copyOfRange(byteBuffer.array(), byteBuffer.position(), byteBuffer.limit());
Arrays.fill(byteBuffer.array(), (byte) 0); // clear sensitive data
} else {
final int length = byteBuffer.limit() - byteBuffer.position();
bytes = new byte[length];
byteBuffer.get(bytes);
// if the buffer is not read only we can reset and fill with 0's
if (byteBuffer.isReadOnly() == false) {
byteBuffer.clear(); // reset
for (int i = 0; i < byteBuffer.limit(); i++) {
byteBuffer.put((byte) 0);
}
}
}
return bytes;
}
|
Encodes the provided char[] to a UTF-8 byte[]. This is done while avoiding
conversions to String. The provided char[] is not modified by this method, so
the caller needs to take care of clearing the value if it is sensitive.
|
java
|
libs/core/src/main/java/org/elasticsearch/core/CharArrays.java
| 63
|
[
"chars"
] | true
| 4
| 6
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
writeReplace
|
@Serial
protected Object writeReplace() throws ObjectStreamException {
if (this.serializationId != null) {
return new SerializedBeanFactoryReference(this.serializationId);
}
else {
throw new NotSerializableException("DefaultListableBeanFactory has no serialization id");
}
}
|
Public method to determine the applicable order value for a given bean.
@param beanName the name of the bean
@param beanInstance the bean instance to check
@return the corresponding order value (default is {@link Ordered#LOWEST_PRECEDENCE})
@since 7.0
@see #getOrder(String)
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/DefaultListableBeanFactory.java
| 2,410
|
[] |
Object
| true
| 2
| 7.44
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
handleSpecificFailure
|
public boolean handleSpecificFailure(Throwable exception) {
return false;
}
|
Error handling specific failure to a group type when sending the request
and no response has been received.
@param exception The exception thrown building the request
@return true if the error was handled, else false
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractHeartbeatRequestManager.java
| 467
|
[
"exception"
] | true
| 1
| 6.96
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
unsentOffsetFetchRequests
|
private List<OffsetFetchRequestState> unsentOffsetFetchRequests() {
return pendingRequests.unsentOffsetFetches;
}
|
Enqueue a request to fetch committed offsets, that will be sent on the next call to {@link #poll(long)}.
@param partitions Partitions to fetch offsets for.
@param deadlineMs Time until which the request should be retried if it fails
with expected retriable errors.
@return Future that will complete when a successful response is received, or the request
fails and cannot be retried. Note that the request is retried whenever it fails with
retriable expected error and the retry time hasn't expired.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java
| 586
|
[] | true
| 1
| 6.8
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
inferred_freq
|
def inferred_freq(self) -> str | None:
"""
Tries to return a string representing a frequency generated by infer_freq.
Returns None if it can't autodetect the frequency.
See Also
--------
DatetimeIndex.freqstr : Return the frequency object as a string if it's set,
otherwise None.
Examples
--------
For DatetimeIndex:
>>> idx = pd.DatetimeIndex(["2018-01-01", "2018-01-03", "2018-01-05"])
>>> idx.inferred_freq
'2D'
For TimedeltaIndex:
>>> tdelta_idx = pd.to_timedelta(["0 days", "10 days", "20 days"])
>>> tdelta_idx
TimedeltaIndex(['0 days', '10 days', '20 days'],
dtype='timedelta64[us]', freq=None)
>>> tdelta_idx.inferred_freq
'10D'
"""
if self.ndim != 1:
return None
try:
return frequencies.infer_freq(self)
except ValueError:
return None
|
Tries to return a string representing a frequency generated by infer_freq.
Returns None if it can't autodetect the frequency.
See Also
--------
DatetimeIndex.freqstr : Return the frequency object as a string if it's set,
otherwise None.
Examples
--------
For DatetimeIndex:
>>> idx = pd.DatetimeIndex(["2018-01-01", "2018-01-03", "2018-01-05"])
>>> idx.inferred_freq
'2D'
For TimedeltaIndex:
>>> tdelta_idx = pd.to_timedelta(["0 days", "10 days", "20 days"])
>>> tdelta_idx
TimedeltaIndex(['0 days', '10 days', '20 days'],
dtype='timedelta64[us]', freq=None)
>>> tdelta_idx.inferred_freq
'10D'
|
python
|
pandas/core/arrays/datetimelike.py
| 906
|
[
"self"
] |
str | None
| true
| 2
| 6.8
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
capitalizeFully
|
public static String capitalizeFully(final String str) {
return capitalizeFully(str, null);
}
|
Converts all the whitespace separated words in a String into capitalized words,
that is each word is made up of a titlecase character and then a series of
lowercase characters.
<p>Whitespace is defined by {@link Character#isWhitespace(char)}.
A {@code null} input String returns {@code null}.
Capitalization uses the Unicode title case, normally equivalent to
upper case.</p>
<pre>
WordUtils.capitalizeFully(null) = null
WordUtils.capitalizeFully("") = ""
WordUtils.capitalizeFully("i am FINE") = "I Am Fine"
</pre>
@param str the String to capitalize, may be null.
@return capitalized String, {@code null} if null String input.
|
java
|
src/main/java/org/apache/commons/lang3/text/WordUtils.java
| 133
|
[
"str"
] |
String
| true
| 1
| 6.32
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
writeToArchive
|
@Override
protected void writeToArchive(ZipEntry entry, @Nullable EntryWriter entryWriter) throws IOException {
JarArchiveEntry jarEntry = asJarArchiveEntry(entry);
if (this.lastModifiedTime != null) {
jarEntry.setTime(DefaultTimeZoneOffset.INSTANCE.removeFrom(this.lastModifiedTime).toMillis());
}
this.jarOutputStream.putArchiveEntry(jarEntry);
if (entryWriter != null) {
entryWriter.write(this.jarOutputStream);
}
this.jarOutputStream.closeArchiveEntry();
}
|
Create a new {@link JarWriter} instance.
@param file the file to write
@param lastModifiedTime an optional last modified time to apply to the written
entries
@throws IOException if the file cannot be opened
@throws FileNotFoundException if the file cannot be found
@since 4.0.0
|
java
|
loader/spring-boot-loader-tools/src/main/java/org/springframework/boot/loader/tools/JarWriter.java
| 72
|
[
"entry",
"entryWriter"
] |
void
| true
| 3
| 6.72
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
rename
|
def rename(self, name, *, inplace: bool = False) -> Self | None:
"""
Alter Index or MultiIndex name.
Able to set new names without level. Defaults to returning new index.
Length of names must match number of levels in MultiIndex.
Parameters
----------
name : Hashable or a sequence of the previous
Name(s) to set.
inplace : bool, default False
Modifies the object directly, instead of creating a new Index or
MultiIndex.
Returns
-------
Index or None
The same type as the caller or None if ``inplace=True``.
See Also
--------
Index.set_names : Able to set new names partially and by level.
Examples
--------
>>> idx = pd.Index(["A", "C", "A", "B"], name="score")
>>> idx.rename("grade")
Index(['A', 'C', 'A', 'B'], dtype='object', name='grade')
>>> idx = pd.MultiIndex.from_product(
... [["python", "cobra"], [2018, 2019]], names=["kind", "year"]
... )
>>> idx
MultiIndex([('python', 2018),
('python', 2019),
( 'cobra', 2018),
( 'cobra', 2019)],
names=['kind', 'year'])
>>> idx.rename(["species", "year"])
MultiIndex([('python', 2018),
('python', 2019),
( 'cobra', 2018),
( 'cobra', 2019)],
names=['species', 'year'])
>>> idx.rename("species")
Traceback (most recent call last):
TypeError: Must pass list-like as `names`.
"""
return self.set_names([name], inplace=inplace)
|
Alter Index or MultiIndex name.
Able to set new names without level. Defaults to returning new index.
Length of names must match number of levels in MultiIndex.
Parameters
----------
name : Hashable or a sequence of the previous
Name(s) to set.
inplace : bool, default False
Modifies the object directly, instead of creating a new Index or
MultiIndex.
Returns
-------
Index or None
The same type as the caller or None if ``inplace=True``.
See Also
--------
Index.set_names : Able to set new names partially and by level.
Examples
--------
>>> idx = pd.Index(["A", "C", "A", "B"], name="score")
>>> idx.rename("grade")
Index(['A', 'C', 'A', 'B'], dtype='object', name='grade')
>>> idx = pd.MultiIndex.from_product(
... [["python", "cobra"], [2018, 2019]], names=["kind", "year"]
... )
>>> idx
MultiIndex([('python', 2018),
('python', 2019),
( 'cobra', 2018),
( 'cobra', 2019)],
names=['kind', 'year'])
>>> idx.rename(["species", "year"])
MultiIndex([('python', 2018),
('python', 2019),
( 'cobra', 2018),
( 'cobra', 2019)],
names=['species', 'year'])
>>> idx.rename("species")
Traceback (most recent call last):
TypeError: Must pass list-like as `names`.
|
python
|
pandas/core/indexes/base.py
| 2,063
|
[
"self",
"name",
"inplace"
] |
Self | None
| true
| 1
| 6.8
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
toCharArray
|
public static char[] toCharArray(final CharSequence source) {
final int len = StringUtils.length(source);
if (len == 0) {
return ArrayUtils.EMPTY_CHAR_ARRAY;
}
if (source instanceof String) {
return ((String) source).toCharArray();
}
if (source instanceof StringBuilder) {
final char[] array = new char[len];
((StringBuilder) source).getChars(0, len, array, 0);
return array;
}
if (source instanceof StringBuffer) {
final char[] array = new char[len];
((StringBuffer) source).getChars(0, len, array, 0);
return array;
}
final char[] array = new char[len];
for (int i = 0; i < len; i++) {
array[i] = source.charAt(i);
}
return array;
}
|
Converts the given CharSequence to a char[].
@param source the {@link CharSequence} to be processed.
@return the resulting char array, never null.
@since 3.11
|
java
|
src/main/java/org/apache/commons/lang3/CharSequenceUtils.java
| 366
|
[
"source"
] | true
| 6
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
addAttachment
|
public void addAttachment(String attachmentFilename, DataSource dataSource) throws MessagingException {
Assert.notNull(attachmentFilename, "Attachment filename must not be null");
Assert.notNull(dataSource, "DataSource must not be null");
try {
MimeBodyPart mimeBodyPart = new MimeBodyPart();
mimeBodyPart.setDisposition(Part.ATTACHMENT);
mimeBodyPart.setFileName(isEncodeFilenames() ?
MimeUtility.encodeText(attachmentFilename) : attachmentFilename);
mimeBodyPart.setDataHandler(new DataHandler(dataSource));
getRootMimeMultipart().addBodyPart(mimeBodyPart);
}
catch (UnsupportedEncodingException ex) {
throw new MessagingException("Failed to encode attachment filename", ex);
}
}
|
Add an attachment to the MimeMessage, taking the content from a
{@code jakarta.activation.DataSource}.
<p>Note that the InputStream returned by the DataSource implementation
needs to be a <i>fresh one on each call</i>, as JavaMail will invoke
{@code getInputStream()} multiple times.
@param attachmentFilename the name of the attachment as it will
appear in the mail (the content type will be determined by this)
@param dataSource the {@code jakarta.activation.DataSource} to take
the content from, determining the InputStream and the content type
@throws MessagingException in case of errors
@see #addAttachment(String, org.springframework.core.io.InputStreamSource)
@see #addAttachment(String, java.io.File)
|
java
|
spring-context-support/src/main/java/org/springframework/mail/javamail/MimeMessageHelper.java
| 1,109
|
[
"attachmentFilename",
"dataSource"
] |
void
| true
| 3
| 6.4
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
_divide_by_count
|
def _divide_by_count(a, b, out=None):
"""
Compute a/b ignoring invalid results. If `a` is an array the division
is done in place. If `a` is a scalar, then its type is preserved in the
output. If out is None, then a is used instead so that the division
is in place. Note that this is only called with `a` an inexact type.
Parameters
----------
a : {ndarray, numpy scalar}
Numerator. Expected to be of inexact type but not checked.
b : {ndarray, numpy scalar}
Denominator.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
expected output, but the type will be cast if necessary.
Returns
-------
ret : {ndarray, numpy scalar}
The return value is a/b. If `a` was an ndarray the division is done
in place. If `a` is a numpy scalar, the division preserves its type.
"""
with np.errstate(invalid='ignore', divide='ignore'):
if isinstance(a, np.ndarray):
if out is None:
return np.divide(a, b, out=a, casting='unsafe')
else:
return np.divide(a, b, out=out, casting='unsafe')
elif out is None:
# Precaution against reduced object arrays
try:
return a.dtype.type(a / b)
except AttributeError:
return a / b
else:
# This is questionable, but currently a numpy scalar can
# be output to a zero dimensional array.
return np.divide(a, b, out=out, casting='unsafe')
|
Compute a/b ignoring invalid results. If `a` is an array the division
is done in place. If `a` is a scalar, then its type is preserved in the
output. If out is None, then a is used instead so that the division
is in place. Note that this is only called with `a` an inexact type.
Parameters
----------
a : {ndarray, numpy scalar}
Numerator. Expected to be of inexact type but not checked.
b : {ndarray, numpy scalar}
Denominator.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
expected output, but the type will be cast if necessary.
Returns
-------
ret : {ndarray, numpy scalar}
The return value is a/b. If `a` was an ndarray the division is done
in place. If `a` is a numpy scalar, the division preserves its type.
|
python
|
numpy/lib/_nanfunctions_impl.py
| 204
|
[
"a",
"b",
"out"
] | false
| 6
| 6.24
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
find_class_methods_with_specific_calls
|
def find_class_methods_with_specific_calls(
class_node: ast.ClassDef, target_calls: set[str], import_mappings: dict[str, str]
) -> set[str]:
"""
Identifies class methods that make specific calls.
This function only tracks target calls within the class scope. Method calling some function defined
will not be taken into consideration even if this function performs a target call.
Method calling other method that performs a target call will also be included.
This function performs a two-pass analysis of the AST:
1. It first identifies methods containing direct calls to the specified functions
and records method calls on `self`.
2. It then identifies methods that indirectly make such calls by invoking the
methods identified in the first pass.
:param class_node: The root node of the AST representing the class to analyze.
:param target_calls: A set of full paths to the method names to track when called.
:param import_mappings: A mapping of import names to fully qualified module names.
:return: Method names within the class that either directly or indirectly make the specified calls.
Examples:
> source_code = '''
... class Example:
... def method1(self):
... my_method().ok()
... def method2(self):
... self.method1()
... def method3(self):
... my_method().not_ok()
... def method4(self):
... self.some_other_method()
... '''
> find_methods_with_specific_calls(
ast.parse(source_code),
{"airflow.my_method.not_ok", "airflow.my_method.ok"},
{"my_method": "airflow.my_method"}
)
{'method1', 'method2', 'method3'}
"""
method_call_map: dict[str, set[str]] = {}
methods_with_calls: set[str] = set()
# First pass: Collect all calls and identify methods with specific calls we are looking for
for node in ast.walk(class_node):
if not isinstance(node, ast.FunctionDef):
continue
method_call_map[node.name] = set()
for sub_node in ast.walk(node):
if not isinstance(sub_node, ast.Call):
continue
called_function = sub_node.func
if not isinstance(called_function, ast.Attribute):
continue
if isinstance(called_function.value, ast.Call) and isinstance(
called_function.value.func, ast.Name
):
full_method_call = (
f"{import_mappings.get(called_function.value.func.id)}.{called_function.attr}"
)
if full_method_call in target_calls:
methods_with_calls.add(node.name)
elif isinstance(called_function.value, ast.Name) and called_function.value.id == "self":
method_call_map[node.name].add(called_function.attr)
# Second pass: Identify all methods that call the ones in `methods_with_calls`
def find_calling_methods(method_name):
for caller, callees in method_call_map.items():
if method_name in callees and caller not in methods_with_calls:
methods_with_calls.add(caller)
find_calling_methods(caller)
for method in list(methods_with_calls):
find_calling_methods(method)
return methods_with_calls
|
Identifies class methods that make specific calls.
This function only tracks target calls within the class scope. Method calling some function defined
will not be taken into consideration even if this function performs a target call.
Method calling other method that performs a target call will also be included.
This function performs a two-pass analysis of the AST:
1. It first identifies methods containing direct calls to the specified functions
and records method calls on `self`.
2. It then identifies methods that indirectly make such calls by invoking the
methods identified in the first pass.
:param class_node: The root node of the AST representing the class to analyze.
:param target_calls: A set of full paths to the method names to track when called.
:param import_mappings: A mapping of import names to fully qualified module names.
:return: Method names within the class that either directly or indirectly make the specified calls.
Examples:
> source_code = '''
... class Example:
... def method1(self):
... my_method().ok()
... def method2(self):
... self.method1()
... def method3(self):
... my_method().not_ok()
... def method4(self):
... self.some_other_method()
... '''
> find_methods_with_specific_calls(
ast.parse(source_code),
{"airflow.my_method.not_ok", "airflow.my_method.ok"},
{"my_method": "airflow.my_method"}
)
{'method1', 'method2', 'method3'}
|
python
|
devel-common/src/sphinx_exts/providers_extensions.py
| 38
|
[
"class_node",
"target_calls",
"import_mappings"
] |
set[str]
| true
| 15
| 9.28
|
apache/airflow
| 43,597
|
sphinx
| false
|
_get_smtp_connection
|
def _get_smtp_connection(host: str, port: int, timeout: int, with_ssl: bool) -> smtplib.SMTP:
"""
Return an SMTP connection to the specified host and port, with optional SSL encryption.
:param host: The hostname or IP address of the SMTP server.
:param port: The port number to connect to on the SMTP server.
:param timeout: The timeout in seconds for the connection.
:param with_ssl: Whether to use SSL encryption for the connection.
:return: An SMTP connection to the specified host and port.
"""
if not with_ssl:
return smtplib.SMTP(host=host, port=port, timeout=timeout)
ssl_context_string = conf.get("email", "SSL_CONTEXT")
if ssl_context_string == "default":
ssl_context = ssl.create_default_context()
elif ssl_context_string == "none":
ssl_context = None
else:
raise RuntimeError(
f"The email.ssl_context configuration variable must "
f"be set to 'default' or 'none' and is '{ssl_context_string}."
)
return smtplib.SMTP_SSL(host=host, port=port, timeout=timeout, context=ssl_context)
|
Return an SMTP connection to the specified host and port, with optional SSL encryption.
:param host: The hostname or IP address of the SMTP server.
:param port: The port number to connect to on the SMTP server.
:param timeout: The timeout in seconds for the connection.
:param with_ssl: Whether to use SSL encryption for the connection.
:return: An SMTP connection to the specified host and port.
|
python
|
airflow-core/src/airflow/utils/email.py
| 295
|
[
"host",
"port",
"timeout",
"with_ssl"
] |
smtplib.SMTP
| true
| 5
| 8.24
|
apache/airflow
| 43,597
|
sphinx
| false
|
getAsText
|
@Override
public String getAsText() {
URI value = (URI) getValue();
return (value != null ? value.toString() : "");
}
|
Create a URI instance for the given user-specified String value.
<p>The default implementation encodes the value into an RFC-2396 compliant URI.
@param value the value to convert into a URI instance
@return the URI instance
@throws java.net.URISyntaxException if URI conversion failed
|
java
|
spring-beans/src/main/java/org/springframework/beans/propertyeditors/URIEditor.java
| 154
|
[] |
String
| true
| 2
| 7.76
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
peekCurrentPartitionInfo
|
StickyPartitionInfo peekCurrentPartitionInfo(Cluster cluster) {
StickyPartitionInfo partitionInfo = stickyPartitionInfo.get();
if (partitionInfo != null)
return partitionInfo;
// We're the first to create it.
partitionInfo = new StickyPartitionInfo(nextPartition(cluster));
if (stickyPartitionInfo.compareAndSet(null, partitionInfo))
return partitionInfo;
// Someone has raced us.
return stickyPartitionInfo.get();
}
|
Peek currently chosen sticky partition. This method works in conjunction with {@link #isPartitionChanged}
and {@link #updatePartitionInfo}. The workflow is the following:
1. peekCurrentPartitionInfo is called to know which partition to lock.
2. Lock partition's batch queue.
3. isPartitionChanged under lock to make sure that nobody raced us.
4. Append data to buffer.
5. updatePartitionInfo to update produced bytes and maybe switch partition.
It's important that steps 3-5 are under partition's batch queue lock.
@param cluster The cluster information (needed if there is no current partition)
@return sticky partition info object
|
java
|
clients/src/main/java/org/apache/kafka/clients/producer/internals/BuiltInPartitioner.java
| 143
|
[
"cluster"
] |
StickyPartitionInfo
| true
| 3
| 7.76
|
apache/kafka
| 31,560
|
javadoc
| false
|
transform
|
def transform(self, X, **params):
"""Transform the data, and apply `transform` with the final estimator.
Call `transform` of each transformer in the pipeline. The transformed
data are finally passed to the final estimator that calls
`transform` method. Only valid if the final estimator
implements `transform`.
This also works where final estimator is `None` in which case all prior
transformations are applied.
Parameters
----------
X : iterable
Data to transform. Must fulfill input requirements of first step
of the pipeline.
**params : dict of str -> object
Parameters requested and accepted by steps. Each step must have
requested certain metadata for these parameters to be forwarded to
them.
.. versionadded:: 1.4
Only available if `enable_metadata_routing=True`. See
:ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Returns
-------
Xt : ndarray of shape (n_samples, n_transformed_features)
Transformed data.
"""
check_is_fitted(self)
_raise_for_params(params, self, "transform")
# not branching here since params is only available if
# enable_metadata_routing=True
routed_params = process_routing(self, "transform", **params)
Xt = X
for _, name, transform in self._iter():
Xt = transform.transform(Xt, **routed_params[name].transform)
return Xt
|
Transform the data, and apply `transform` with the final estimator.
Call `transform` of each transformer in the pipeline. The transformed
data are finally passed to the final estimator that calls
`transform` method. Only valid if the final estimator
implements `transform`.
This also works where final estimator is `None` in which case all prior
transformations are applied.
Parameters
----------
X : iterable
Data to transform. Must fulfill input requirements of first step
of the pipeline.
**params : dict of str -> object
Parameters requested and accepted by steps. Each step must have
requested certain metadata for these parameters to be forwarded to
them.
.. versionadded:: 1.4
Only available if `enable_metadata_routing=True`. See
:ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Returns
-------
Xt : ndarray of shape (n_samples, n_transformed_features)
Transformed data.
|
python
|
sklearn/pipeline.py
| 1,004
|
[
"self",
"X"
] | false
| 2
| 6.08
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
parse
|
static NestedLocation parse(String location) {
if (location == null || location.isEmpty()) {
throw new IllegalArgumentException("'location' must not be empty");
}
return locationCache.computeIfAbsent(location, (key) -> create(location));
}
|
Create a new {@link NestedLocation} from the given URI.
@param uri the nested URI
@return a new {@link NestedLocation} instance
@throws IllegalArgumentException if the URI is not valid
|
java
|
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/net/protocol/nested/NestedLocation.java
| 94
|
[
"location"
] |
NestedLocation
| true
| 3
| 7.6
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
getObjectType
|
@Override
public @Nullable Class<?> getObjectType() {
if (!isPrepared()) {
// Not fully initialized yet -> return null to indicate "not known yet".
return null;
}
return getPreparedMethod().getReturnType();
}
|
Return the type of object that this FactoryBean creates,
or {@code null} if not known in advance.
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/config/MethodInvokingFactoryBean.java
| 137
|
[] | true
| 2
| 6.88
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
parse
|
public Map<String, Object> parse(Map<?, ?> props) {
// Check all configurations are defined
List<String> undefinedConfigKeys = undefinedDependentConfigs();
if (!undefinedConfigKeys.isEmpty()) {
String joined = undefinedConfigKeys.stream().map(String::toString).collect(Collectors.joining(","));
throw new ConfigException("Some configurations in are referred in the dependents, but not defined: " + joined);
}
// parse all known keys
Map<String, Object> values = new HashMap<>();
for (ConfigKey key : configKeys.values())
values.put(key.name, parseValue(key, props.get(key.name), props.containsKey(key.name)));
return values;
}
|
Parse and validate configs against this configuration definition. The input is a map of configs. It is expected
that the keys of the map are strings, but the values can either be strings or they may already be of the
appropriate type (int, string, etc). This will work equally well with either java.util.Properties instances or a
programmatically constructed map.
@param props The configs to parse and validate.
@return Parsed and validated configs. The key will be the config name and the value will be the value parsed into
the appropriate type (int, string, etc).
|
java
|
clients/src/main/java/org/apache/kafka/common/config/ConfigDef.java
| 517
|
[
"props"
] | true
| 2
| 8.4
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
equals
|
@Override
public boolean equals(final Object obj2) {
if (obj2 instanceof Token) {
final Token tok2 = (Token) obj2;
if (this.value.getClass() != tok2.value.getClass()) {
return false;
}
if (this.count != tok2.count) {
return false;
}
if (this.value instanceof StringBuilder) {
return this.value.toString().equals(tok2.value.toString());
}
if (this.value instanceof Number) {
return this.value.equals(tok2.value);
}
return this.value == tok2.value;
}
return false;
}
|
Supports equality of this Token to another Token.
@param obj2 Object to consider equality of
@return boolean {@code true} if equal
|
java
|
src/main/java/org/apache/commons/lang3/time/DurationFormatUtils.java
| 127
|
[
"obj2"
] | true
| 6
| 7.92
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
getDWOName
|
static std::string
getDWOName(llvm::DWARFUnit &CU,
std::unordered_map<std::string, uint32_t> &NameToIndexMap,
std::optional<StringRef> &DwarfOutputPath) {
assert(CU.getDWOId() && "DWO ID not found.");
std::string DWOName = dwarf::toString(
CU.getUnitDIE().find({dwarf::DW_AT_dwo_name, dwarf::DW_AT_GNU_dwo_name}),
"");
assert(!DWOName.empty() &&
"DW_AT_dwo_name/DW_AT_GNU_dwo_name does not exist.");
if (DwarfOutputPath) {
DWOName = std::string(sys::path::filename(DWOName));
uint32_t &Index = NameToIndexMap[DWOName];
DWOName.append(std::to_string(Index));
++Index;
}
DWOName.append(".dwo");
return DWOName;
}
|
directory, and there are duplicate names. Assumes DWO ID is unique.
|
cpp
|
bolt/lib/Core/DIEBuilder.cpp
| 48
|
[] | true
| 4
| 7.04
|
llvm/llvm-project
| 36,021
|
doxygen
| false
|
|
h3ToStringList
|
private static String[] h3ToStringList(long[] h3s) {
return Arrays.stream(h3s).mapToObj(H3::h3ToString).toArray(String[]::new);
}
|
_ipow does integer exponentiation efficiently. Taken from StackOverflow.
@param base the integer base (can be positive or negative)
@param exp the integer exponent (should be nonnegative)
@return the exponentiated value
|
java
|
libs/h3/src/main/java/org/elasticsearch/h3/H3.java
| 598
|
[
"h3s"
] | true
| 1
| 6
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
job_completion
|
def job_completion(self, job_name: str, run_id: str, delay: int = 10, max_attempts: int = 60) -> str:
"""
Wait until Glue DataBrew job reaches terminal status.
:param job_name: The name of the job being processed during this run.
:param run_id: The unique identifier of the job run.
:param delay: Time in seconds to delay between polls
:param maxAttempts: Maximum number of attempts to poll for completion
:return: job status
"""
self.get_waiter("job_complete").wait(
Name=job_name,
RunId=run_id,
WaiterConfig={"Delay": delay, "maxAttempts": max_attempts},
)
status = self.get_job_state(job_name, run_id)
return status
|
Wait until Glue DataBrew job reaches terminal status.
:param job_name: The name of the job being processed during this run.
:param run_id: The unique identifier of the job run.
:param delay: Time in seconds to delay between polls
:param maxAttempts: Maximum number of attempts to poll for completion
:return: job status
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/glue_databrew.py
| 39
|
[
"self",
"job_name",
"run_id",
"delay",
"max_attempts"
] |
str
| true
| 1
| 6.88
|
apache/airflow
| 43,597
|
sphinx
| false
|
getEnvironmentVariable
|
public static String getEnvironmentVariable(final String name, final String defaultValue) {
try {
final String value = System.getenv(name);
return value == null ? defaultValue : value;
} catch (final SecurityException ex) {
// we are not allowed to look at this property
// System.err.println("Caught a SecurityException reading the environment variable '" + name + "'.");
return defaultValue;
}
}
|
Gets an environment variable, defaulting to {@code defaultValue} if the variable cannot be read.
<p>
If a {@link SecurityException} is caught, the return value is {@code defaultValue} and a message is written to {@code System.err}.
</p>
@param name the environment variable name.
@param defaultValue the default value.
@return the environment variable value or {@code defaultValue} if a security problem occurs.
@since 3.8
|
java
|
src/main/java/org/apache/commons/lang3/SystemUtils.java
| 2,128
|
[
"name",
"defaultValue"
] |
String
| true
| 3
| 7.92
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
__call__
|
def __call__(self, body_fn, args, kwargs, hints):
r"""
Call implementation of hints_wrapper
Args:
body_fn (Callable): A callable function that is within the scope
that is being traced.
args (Tuple of torch.Tensor/int/float/bool): A tuple of inputs to
body_fn.
kwargs (dict): Keyword argument to the body_fn.
hints (dict): A dict of context hints which could be passed to
backend compiler.
"""
if not isinstance(args, tuple):
args = tuple(args)
if not all(isinstance(t, (torch.Tensor, int, float, bool)) for t in args):
raise RuntimeError(
f"args must be a tuple of tensors, ints, floats, or bools, got {args}"
)
if not isinstance(kwargs, dict):
raise RuntimeError(f"kwargs must be a dict, got {type(kwargs)}")
if len(kwargs) > 0:
raise RuntimeError(
f"kwargs except for hints are not supported, got {kwargs}"
)
if not isinstance(hints, dict):
raise RuntimeError(f"hints must be a dict, got {type(hints)}")
for k, v in hints.items():
if not isinstance(k, str):
raise RuntimeError(f"hints key must be a str, got {k}.")
if not isinstance(v, (int, float, bool, str)):
raise RuntimeError(
"hints must be a dict containing int, float, bool or str "
f"value, got value {v} for key {k}."
)
# pyrefly: ignore [missing-attribute]
return super().__call__(body_fn, args, kwargs, hints)
|
r"""
Call implementation of hints_wrapper
Args:
body_fn (Callable): A callable function that is within the scope
that is being traced.
args (Tuple of torch.Tensor/int/float/bool): A tuple of inputs to
body_fn.
kwargs (dict): Keyword argument to the body_fn.
hints (dict): A dict of context hints which could be passed to
backend compiler.
|
python
|
torch/_higher_order_ops/hints_wrap.py
| 20
|
[
"self",
"body_fn",
"args",
"kwargs",
"hints"
] | false
| 9
| 6.24
|
pytorch/pytorch
| 96,034
|
google
| false
|
|
of
|
public static DoubleRange of(final double fromInclusive, final double toInclusive) {
return of(Double.valueOf(fromInclusive), Double.valueOf(toInclusive));
}
|
Creates a range with the specified minimum and maximum values (both inclusive).
<p>
The range uses the natural ordering of the elements to determine where values lie in the range.
</p>
<p>
The arguments may be passed in the order (min,max) or (max,min). The getMinimum and getMaximum methods will return the correct values.
</p>
@param fromInclusive the first value that defines the edge of the range, inclusive.
@param toInclusive the second value that defines the edge of the range, inclusive.
@return the range object, not null.
|
java
|
src/main/java/org/apache/commons/lang3/DoubleRange.java
| 48
|
[
"fromInclusive",
"toInclusive"
] |
DoubleRange
| true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
relative_luminance
|
def relative_luminance(rgba) -> float:
"""
Calculate relative luminance of a color.
The calculation adheres to the W3C standards
(https://www.w3.org/WAI/GL/wiki/Relative_luminance)
Parameters
----------
color : rgb or rgba tuple
Returns
-------
float
The relative luminance as a value from 0 to 1
"""
r, g, b = (
x / 12.92 if x <= 0.04045 else ((x + 0.055) / 1.055) ** 2.4
for x in rgba[:3]
)
return 0.2126 * r + 0.7152 * g + 0.0722 * b
|
Calculate relative luminance of a color.
The calculation adheres to the W3C standards
(https://www.w3.org/WAI/GL/wiki/Relative_luminance)
Parameters
----------
color : rgb or rgba tuple
Returns
-------
float
The relative luminance as a value from 0 to 1
|
python
|
pandas/io/formats/style.py
| 3,968
|
[
"rgba"
] |
float
| true
| 2
| 6.24
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
cancel_running_jobs
|
def cancel_running_jobs(
self, application_id: str, waiter_config: dict | None = None, wait_for_completion: bool = True
) -> int:
"""
Cancel jobs in an intermediate state, and return the number of cancelled jobs.
If wait_for_completion is True, then the method will wait until all jobs are
cancelled before returning.
Note: if new jobs are triggered while this operation is ongoing,
it's going to time out and return an error.
"""
paginator = self.conn.get_paginator("list_job_runs")
results_per_response = 50
iterator = paginator.paginate(
applicationId=application_id,
states=list(self.JOB_INTERMEDIATE_STATES),
PaginationConfig={
"PageSize": results_per_response,
},
)
count = 0
for r in iterator:
job_ids = [jr["id"] for jr in r["jobRuns"]]
count += len(job_ids)
if job_ids:
self.log.info(
"Cancelling %s pending job(s) for the application %s so that it can be stopped",
len(job_ids),
application_id,
)
for job_id in job_ids:
self.conn.cancel_job_run(applicationId=application_id, jobRunId=job_id)
if wait_for_completion:
if count > 0:
self.log.info("now waiting for the %s cancelled job(s) to terminate", count)
self.get_waiter("no_job_running").wait(
applicationId=application_id,
states=list(self.JOB_INTERMEDIATE_STATES.union({"CANCELLING"})),
WaiterConfig=waiter_config or {},
)
return count
|
Cancel jobs in an intermediate state, and return the number of cancelled jobs.
If wait_for_completion is True, then the method will wait until all jobs are
cancelled before returning.
Note: if new jobs are triggered while this operation is ongoing,
it's going to time out and return an error.
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/emr.py
| 270
|
[
"self",
"application_id",
"waiter_config",
"wait_for_completion"
] |
int
| true
| 7
| 7.04
|
apache/airflow
| 43,597
|
unknown
| false
|
orElse
|
@Contract("!null -> !null")
public @Nullable T orElse(@Nullable T other) {
return (this.value != null) ? this.value : other;
}
|
Return the object that was bound, or {@code other} if no value has been bound.
@param other the value to be returned if there is no bound value (may be
{@code null})
@return the value, if bound, otherwise {@code other}
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/bind/BindResult.java
| 103
|
[
"other"
] |
T
| true
| 2
| 8.16
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
parseObjectTypeMembers
|
function parseObjectTypeMembers(): NodeArray<TypeElement> {
let members: NodeArray<TypeElement>;
if (parseExpected(SyntaxKind.OpenBraceToken)) {
members = parseList(ParsingContext.TypeMembers, parseTypeMember);
parseExpected(SyntaxKind.CloseBraceToken);
}
else {
members = createMissingList<TypeElement>();
}
return members;
}
|
Reports a diagnostic error for the current token being an invalid name.
@param blankDiagnostic Diagnostic to report for the case of the name being blank (matched tokenIfBlankName).
@param nameDiagnostic Diagnostic to report for all other cases.
@param tokenIfBlankName Current token if the name was invalid for being blank (not provided / skipped).
|
typescript
|
src/compiler/parser.ts
| 4,378
|
[] | true
| 3
| 6.72
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
CancellationCallback
|
CancellationCallback(CancellationCallback&&) = delete;
|
Deregisters the callback from the CancellationToken.
If cancellation has been requested concurrently on another thread and the
callback is currently executing then the destructor will block until after
the callback has returned (otherwise it might be left with a dangling
reference).
You should generally try to implement your callback functions to be lock
free to avoid deadlocks between the callback executing and the
CancellationCallback destructor trying to deregister the callback.
If the callback has not started executing yet then the callback will be
deregistered from the CancellationToken before the destructor completes.
Once the destructor returns you can be guaranteed that the callback will
not be called by a subsequent call to 'requestCancellation()' on a
CancellationSource associated with the CancellationToken passed to the
constructor.
|
cpp
|
folly/CancellationToken.h
| 318
|
[] | true
| 2
| 6.32
|
facebook/folly
| 30,157
|
doxygen
| false
|
|
toString
|
@Override
public String toString() {
return getName() + ":" + getCode();
}
|
Convert the existing code to a hangup.
@return a new ExitStatus with hangup=true
|
java
|
cli/spring-boot-cli/src/main/java/org/springframework/boot/cli/command/status/ExitStatus.java
| 99
|
[] |
String
| true
| 1
| 6.48
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
listPartitionReassignments
|
default ListPartitionReassignmentsResult listPartitionReassignments(ListPartitionReassignmentsOptions options) {
return listPartitionReassignments(Optional.empty(), options);
}
|
List all of the current partition reassignments
<p>The following exceptions can be anticipated when calling {@code get()} on the futures obtained from
the returned {@code ListPartitionReassignmentsResult}:</p>
<ul>
<li>{@link org.apache.kafka.common.errors.ClusterAuthorizationException}
If the authenticated user doesn't have alter access to the cluster.</li>
<li>{@link org.apache.kafka.common.errors.UnknownTopicOrPartitionException}
If a given topic or partition does not exist.</li>
<li>{@link org.apache.kafka.common.errors.TimeoutException}
If the request timed out before the controller could list the current reassignments.</li>
</ul>
@param options The options to use.
@return The result.
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
| 1,246
|
[
"options"
] |
ListPartitionReassignmentsResult
| true
| 1
| 6.16
|
apache/kafka
| 31,560
|
javadoc
| false
|
toString
|
@Override
public String toString() {
return asMap().toString();
}
|
Returns a string representation of the multimap, generated by calling {@code toString} on the
map returned by {@link Multimap#asMap}.
@return a string representation of the multimap
|
java
|
android/guava/src/com/google/common/collect/AbstractMultimap.java
| 241
|
[] |
String
| true
| 1
| 6.16
|
google/guava
| 51,352
|
javadoc
| false
|
BASIC_ESCAPE
|
public static String[][] BASIC_ESCAPE() {
return BASIC_ESCAPE.clone();
}
|
Mapping to escape the basic XML and HTML character entities.
Namely: {@code " & < >}
@return the mapping table.
|
java
|
src/main/java/org/apache/commons/lang3/text/translate/EntityArrays.java
| 382
|
[] | true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
parseAssignmentExpressionOrHigher
|
function parseAssignmentExpressionOrHigher(allowReturnTypeInArrowFunction: boolean): Expression {
// AssignmentExpression[in,yield]:
// 1) ConditionalExpression[?in,?yield]
// 2) LeftHandSideExpression = AssignmentExpression[?in,?yield]
// 3) LeftHandSideExpression AssignmentOperator AssignmentExpression[?in,?yield]
// 4) ArrowFunctionExpression[?in,?yield]
// 5) AsyncArrowFunctionExpression[in,yield,await]
// 6) [+Yield] YieldExpression[?In]
//
// Note: for ease of implementation we treat productions '2' and '3' as the same thing.
// (i.e. they're both BinaryExpressions with an assignment operator in it).
// First, do the simple check if we have a YieldExpression (production '6').
if (isYieldExpression()) {
return parseYieldExpression();
}
// Then, check if we have an arrow function (production '4' and '5') that starts with a parenthesized
// parameter list or is an async arrow function.
// AsyncArrowFunctionExpression:
// 1) async[no LineTerminator here]AsyncArrowBindingIdentifier[?Yield][no LineTerminator here]=>AsyncConciseBody[?In]
// 2) CoverCallExpressionAndAsyncArrowHead[?Yield, ?Await][no LineTerminator here]=>AsyncConciseBody[?In]
// Production (1) of AsyncArrowFunctionExpression is parsed in "tryParseAsyncSimpleArrowFunctionExpression".
// And production (2) is parsed in "tryParseParenthesizedArrowFunctionExpression".
//
// If we do successfully parse arrow-function, we must *not* recurse for productions 1, 2 or 3. An ArrowFunction is
// not a LeftHandSideExpression, nor does it start a ConditionalExpression. So we are done
// with AssignmentExpression if we see one.
const arrowExpression = tryParseParenthesizedArrowFunctionExpression(allowReturnTypeInArrowFunction) || tryParseAsyncSimpleArrowFunctionExpression(allowReturnTypeInArrowFunction);
if (arrowExpression) {
return arrowExpression;
}
// Now try to see if we're in production '1', '2' or '3'. A conditional expression can
// start with a LogicalOrExpression, while the assignment productions can only start with
// LeftHandSideExpressions.
//
// So, first, we try to just parse out a BinaryExpression. If we get something that is a
// LeftHandSide or higher, then we can try to parse out the assignment expression part.
// Otherwise, we try to parse out the conditional expression bit. We want to allow any
// binary expression here, so we pass in the 'lowest' precedence here so that it matches
// and consumes anything.
const pos = getNodePos();
const hasJSDoc = hasPrecedingJSDocComment();
const expr = parseBinaryExpressionOrHigher(OperatorPrecedence.Lowest);
// To avoid a look-ahead, we did not handle the case of an arrow function with a single un-parenthesized
// parameter ('x => ...') above. We handle it here by checking if the parsed expression was a single
// identifier and the current token is an arrow.
if (expr.kind === SyntaxKind.Identifier && token() === SyntaxKind.EqualsGreaterThanToken) {
return parseSimpleArrowFunctionExpression(pos, expr as Identifier, allowReturnTypeInArrowFunction, hasJSDoc, /*asyncModifier*/ undefined);
}
// Now see if we might be in cases '2' or '3'.
// If the expression was a LHS expression, and we have an assignment operator, then
// we're in '2' or '3'. Consume the assignment and return.
//
// Note: we call reScanGreaterToken so that we get an appropriately merged token
// for cases like `> > =` becoming `>>=`
if (isLeftHandSideExpression(expr) && isAssignmentOperator(reScanGreaterToken())) {
return makeBinaryExpression(expr, parseTokenNode(), parseAssignmentExpressionOrHigher(allowReturnTypeInArrowFunction), pos);
}
// It wasn't an assignment or a lambda. This is a conditional expression:
return parseConditionalExpressionRest(expr, pos, allowReturnTypeInArrowFunction);
}
|
Reports a diagnostic error for the current token being an invalid name.
@param blankDiagnostic Diagnostic to report for the case of the name being blank (matched tokenIfBlankName).
@param nameDiagnostic Diagnostic to report for all other cases.
@param tokenIfBlankName Current token if the name was invalid for being blank (not provided / skipped).
|
typescript
|
src/compiler/parser.ts
| 5,069
|
[
"allowReturnTypeInArrowFunction"
] | true
| 8
| 6.8
|
microsoft/TypeScript
| 107,154
|
jsdoc
| true
|
|
isDigits
|
private static boolean isDigits(String s) {
if (hasLength(s) == false) {
return false; // the absence of digits is not digits
}
int i = 0;
// allow a leading + or - (but only if there are digits following that)
if (s.length() > 1 && (s.charAt(0) == '+' || s.charAt(0) == '-')) {
i++;
}
// verify that there are no non-digits
for (; i < s.length(); i++) {
if (Character.isDigit(s.charAt(i)) == false) {
return false;
}
}
return true;
}
|
A utility method for determining whether a string contains only digits, possibly with a leading '+' or '-'.
That is, does this string have any hope of being parse-able as a Long?
|
java
|
modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/CefParser.java
| 504
|
[
"s"
] | true
| 7
| 6
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
is_re
|
def is_re(obj: object) -> TypeGuard[Pattern]:
"""
Check if the object is a regex pattern instance.
Parameters
----------
obj : object
The object to check for being a regex pattern. Typically,
this would be an object that you expect to be a compiled
pattern from the `re` module.
Returns
-------
bool
Whether `obj` is a regex pattern.
See Also
--------
api.types.is_float : Return True if given object is float.
api.types.is_iterator : Check if the object is an iterator.
api.types.is_integer : Return True if given object is integer.
api.types.is_re_compilable : Check if the object can be compiled
into a regex pattern instance.
Examples
--------
>>> from pandas.api.types import is_re
>>> import re
>>> is_re(re.compile(".*"))
True
>>> is_re("foo")
False
"""
return isinstance(obj, Pattern)
|
Check if the object is a regex pattern instance.
Parameters
----------
obj : object
The object to check for being a regex pattern. Typically,
this would be an object that you expect to be a compiled
pattern from the `re` module.
Returns
-------
bool
Whether `obj` is a regex pattern.
See Also
--------
api.types.is_float : Return True if given object is float.
api.types.is_iterator : Check if the object is an iterator.
api.types.is_integer : Return True if given object is integer.
api.types.is_re_compilable : Check if the object can be compiled
into a regex pattern instance.
Examples
--------
>>> from pandas.api.types import is_re
>>> import re
>>> is_re(re.compile(".*"))
True
>>> is_re("foo")
False
|
python
|
pandas/core/dtypes/inference.py
| 155
|
[
"obj"
] |
TypeGuard[Pattern]
| true
| 1
| 7.12
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
getGenericSuperclass
|
final @Nullable TypeToken<? super T> getGenericSuperclass() {
if (runtimeType instanceof TypeVariable) {
// First bound is always the super class, if one exists.
return boundAsSuperclass(((TypeVariable<?>) runtimeType).getBounds()[0]);
}
if (runtimeType instanceof WildcardType) {
// wildcard has one and only one upper bound.
return boundAsSuperclass(((WildcardType) runtimeType).getUpperBounds()[0]);
}
Type superclass = getRawType().getGenericSuperclass();
if (superclass == null) {
return null;
}
@SuppressWarnings("unchecked") // super class of T
TypeToken<? super T> superToken = (TypeToken<? super T>) resolveSupertype(superclass);
return superToken;
}
|
Returns the generic superclass of this type or {@code null} if the type represents {@link
Object} or an interface. This method is similar but different from {@link
Class#getGenericSuperclass}. For example, {@code new TypeToken<StringArrayList>()
{}.getGenericSuperclass()} will return {@code new TypeToken<ArrayList<String>>() {}}; while
{@code StringArrayList.class.getGenericSuperclass()} will return {@code ArrayList<E>}, where
{@code E} is the type variable declared by class {@code ArrayList}.
<p>If this type is a type variable or wildcard, its first upper bound is examined and returned
if the bound is a class or extends from a class. This means that the returned type could be a
type variable too.
|
java
|
android/guava/src/com/google/common/reflect/TypeToken.java
| 315
|
[] | true
| 4
| 6.56
|
google/guava
| 51,352
|
javadoc
| false
|
|
afterSingletonCreation
|
protected void afterSingletonCreation(String beanName) {
if (!this.inCreationCheckExclusions.contains(beanName) && !this.singletonsCurrentlyInCreation.remove(beanName)) {
throw new IllegalStateException("Singleton '" + beanName + "' isn't currently in creation");
}
}
|
Callback after singleton creation.
<p>The default implementation marks the singleton as not in creation anymore.
@param beanName the name of the singleton that has been created
@see #isSingletonCurrentlyInCreation
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/DefaultSingletonBeanRegistry.java
| 551
|
[
"beanName"
] |
void
| true
| 3
| 6.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
get_loc
|
def get_loc(self, key):
"""
Get location for a label or a tuple of labels. The location is returned \
as an integer/slice or boolean mask.
This method returns the integer location, slice object, or boolean mask
corresponding to the specified key, which can be a single label or a tuple
of labels. The key represents a position in the MultiIndex, and the location
indicates where the key is found within the index.
Parameters
----------
key : label or tuple of labels (one for each level)
A label or tuple of labels that correspond to the levels of the MultiIndex.
The key must match the structure of the MultiIndex.
Returns
-------
int, slice object or boolean mask
If the key is past the lexsort depth, the return may be a
boolean mask array, otherwise it is always a slice or int.
See Also
--------
Index.get_loc : The get_loc method for (single-level) index.
MultiIndex.slice_locs : Get slice location given start label(s) and
end label(s).
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
Notes
-----
The key cannot be a slice, list of same-level labels, a boolean mask,
or a sequence of such. If you want to use those, use
:meth:`MultiIndex.get_locs` instead.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list("abb"), list("def")])
>>> mi.get_loc("b")
slice(1, 3, None)
>>> mi.get_loc(("b", "e"))
1
"""
self._check_indexing_error(key)
def _maybe_to_slice(loc):
"""convert integer indexer to boolean mask or slice if possible"""
if not isinstance(loc, np.ndarray) or loc.dtype != np.intp:
return loc
loc = lib.maybe_indices_to_slice(loc, len(self))
if isinstance(loc, slice):
return loc
mask = np.empty(len(self), dtype="bool")
mask.fill(False)
mask[loc] = True
return mask
if not isinstance(key, tuple):
loc = self._get_level_indexer(key, level=0)
return _maybe_to_slice(loc)
keylen = len(key)
if self.nlevels < keylen:
raise KeyError(
f"Key length ({keylen}) exceeds index depth ({self.nlevels})"
)
if keylen == self.nlevels and self.is_unique:
# TODO: what if we have an IntervalIndex level?
# i.e. do we need _index_as_unique on that level?
try:
return self._engine.get_loc(key)
except KeyError as err:
raise KeyError(key) from err
except TypeError:
# e.g. test_partial_slicing_with_multiindex partial string slicing
loc, _ = self.get_loc_level(key, range(self.nlevels))
return loc
# -- partial selection or non-unique index
# break the key into 2 parts based on the lexsort_depth of the index;
# the first part returns a continuous slice of the index; the 2nd part
# needs linear search within the slice
i = self._lexsort_depth
lead_key, follow_key = key[:i], key[i:]
if not lead_key:
start = 0
stop = len(self)
else:
try:
start, stop = self.slice_locs(lead_key, lead_key)
except TypeError as err:
# e.g. test_groupby_example key = ((0, 0, 1, 2), "new_col")
# when self has 5 integer levels
raise KeyError(key) from err
if start == stop:
raise KeyError(key)
if not follow_key:
return slice(start, stop)
if get_option("performance_warnings"):
warnings.warn(
"indexing past lexsort depth may impact performance.",
PerformanceWarning,
stacklevel=find_stack_level(),
)
loc = np.arange(start, stop, dtype=np.intp)
for i, k in enumerate(follow_key, len(lead_key)):
mask = self.codes[i][loc] == self._get_loc_single_level_index(
self.levels[i], k
)
if not mask.all():
loc = loc[mask]
if not len(loc):
raise KeyError(key)
return _maybe_to_slice(loc) if len(loc) != stop - start else slice(start, stop)
|
Get location for a label or a tuple of labels. The location is returned \
as an integer/slice or boolean mask.
This method returns the integer location, slice object, or boolean mask
corresponding to the specified key, which can be a single label or a tuple
of labels. The key represents a position in the MultiIndex, and the location
indicates where the key is found within the index.
Parameters
----------
key : label or tuple of labels (one for each level)
A label or tuple of labels that correspond to the levels of the MultiIndex.
The key must match the structure of the MultiIndex.
Returns
-------
int, slice object or boolean mask
If the key is past the lexsort depth, the return may be a
boolean mask array, otherwise it is always a slice or int.
See Also
--------
Index.get_loc : The get_loc method for (single-level) index.
MultiIndex.slice_locs : Get slice location given start label(s) and
end label(s).
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
Notes
-----
The key cannot be a slice, list of same-level labels, a boolean mask,
or a sequence of such. If you want to use those, use
:meth:`MultiIndex.get_locs` instead.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list("abb"), list("def")])
>>> mi.get_loc("b")
slice(1, 3, None)
>>> mi.get_loc(("b", "e"))
1
|
python
|
pandas/core/indexes/multi.py
| 3,267
|
[
"self",
"key"
] | false
| 17
| 6.4
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
longValue
|
public long longValue(Duration value) {
return this.longValue.apply(value);
}
|
Convert the given {@link Duration} to a long value in the resolution
of this unit.
<p>Note that this can be lossy if the current unit is bigger than the
actual resolution of the duration. For example,
{@code Duration.ofMillis(5).plusNanos(1234)} would get truncated to
{@code 5} for unit {@code MILLIS}.
@param value the {@code Duration} to convert to a long
@return the long value for the {@code Duration} in this {@code Unit}
|
java
|
spring-context/src/main/java/org/springframework/format/annotation/DurationFormat.java
| 199
|
[
"value"
] | true
| 1
| 6.8
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
firstNonNull
|
@SafeVarargs
public static <T> T firstNonNull(final T... values) {
return Streams.of(values).filter(Objects::nonNull).findFirst().orElse(null);
}
|
Returns the first value in the array which is not {@code null}.
If all the values are {@code null} or the array is {@code null}
or empty then {@code null} is returned.
<pre>
ObjectUtils.firstNonNull(null, null) = null
ObjectUtils.firstNonNull(null, "") = ""
ObjectUtils.firstNonNull(null, null, "") = ""
ObjectUtils.firstNonNull(null, "zz") = "zz"
ObjectUtils.firstNonNull("abc", *) = "abc"
ObjectUtils.firstNonNull(null, "xyz", *) = "xyz"
ObjectUtils.firstNonNull(Boolean.TRUE, *) = Boolean.TRUE
ObjectUtils.firstNonNull() = null
</pre>
@param <T> the component type of the array.
@param values the values to test, may be {@code null} or empty.
@return the first value from {@code values} which is not {@code null},
or {@code null} if there are no non-null values.
@since 3.0
|
java
|
src/main/java/org/apache/commons/lang3/ObjectUtils.java
| 591
|
[] |
T
| true
| 1
| 6.32
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
factorize
|
def factorize(
self,
use_na_sentinel: bool = True,
) -> tuple[np.ndarray, ExtensionArray]:
"""
Encode the extension array as an enumerated type.
Parameters
----------
use_na_sentinel : bool, default True
If True, the sentinel -1 will be used for NaN values. If False,
NaN values will be encoded as non-negative integers and will not drop the
NaN from the uniques of the values.
Returns
-------
codes : ndarray
An integer NumPy array that's an indexer into the original
ExtensionArray.
uniques : ExtensionArray
An ExtensionArray containing the unique values of `self`.
.. note::
uniques will *not* contain an entry for the NA value of
the ExtensionArray if there are any missing values present
in `self`.
See Also
--------
factorize : Top-level factorize method that dispatches here.
Notes
-----
:meth:`pandas.factorize` offers a `sort` keyword as well.
Examples
--------
>>> idx1 = pd.PeriodIndex(
... ["2014-01", "2014-01", "2014-02", "2014-02", "2014-03", "2014-03"],
... freq="M",
... )
>>> arr, idx = idx1.factorize()
>>> arr
array([0, 0, 1, 1, 2, 2])
>>> idx
PeriodIndex(['2014-01', '2014-02', '2014-03'], dtype='period[M]')
"""
# Implementer note: There are two ways to override the behavior of
# pandas.factorize
# 1. _values_for_factorize and _from_factorize.
# Specify the values passed to pandas' internal factorization
# routines, and how to convert from those values back to the
# original ExtensionArray.
# 2. ExtensionArray.factorize.
# Complete control over factorization.
arr, na_value = self._values_for_factorize()
codes, uniques = factorize_array(
arr, use_na_sentinel=use_na_sentinel, na_value=na_value
)
uniques_ea = self._from_factorized(uniques, self)
return codes, uniques_ea
|
Encode the extension array as an enumerated type.
Parameters
----------
use_na_sentinel : bool, default True
If True, the sentinel -1 will be used for NaN values. If False,
NaN values will be encoded as non-negative integers and will not drop the
NaN from the uniques of the values.
Returns
-------
codes : ndarray
An integer NumPy array that's an indexer into the original
ExtensionArray.
uniques : ExtensionArray
An ExtensionArray containing the unique values of `self`.
.. note::
uniques will *not* contain an entry for the NA value of
the ExtensionArray if there are any missing values present
in `self`.
See Also
--------
factorize : Top-level factorize method that dispatches here.
Notes
-----
:meth:`pandas.factorize` offers a `sort` keyword as well.
Examples
--------
>>> idx1 = pd.PeriodIndex(
... ["2014-01", "2014-01", "2014-02", "2014-02", "2014-03", "2014-03"],
... freq="M",
... )
>>> arr, idx = idx1.factorize()
>>> arr
array([0, 0, 1, 1, 2, 2])
>>> idx
PeriodIndex(['2014-01', '2014-02', '2014-03'], dtype='period[M]')
|
python
|
pandas/core/arrays/base.py
| 1,638
|
[
"self",
"use_na_sentinel"
] |
tuple[np.ndarray, ExtensionArray]
| true
| 1
| 7.2
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
neighbor
|
public void neighbor(int digit) {
if (digit > Direction.CENTER_DIGIT.digit() && digit < Direction.NUM_DIGITS.digit()) {
ijkAdd(UNIT_VECS[digit][0], UNIT_VECS[digit][1], UNIT_VECS[digit][2]);
ijkNormalize();
}
}
|
Find the normalized ijk coordinates of the hex in the specified digit
direction from the current ijk coordinates.
@param digit The digit direction from the original ijk coordinates.
|
java
|
libs/h3/src/main/java/org/elasticsearch/h3/CoordIJK.java
| 273
|
[
"digit"
] |
void
| true
| 3
| 6.72
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
parseLocale
|
private static Locale parseLocale(final String str) {
if (isISO639LanguageCode(str)) {
return new Locale(str);
}
final int limit = 3;
final char separator = str.indexOf(UNDERSCORE) != -1 ? UNDERSCORE : DASH;
final String[] segments = str.split(String.valueOf(separator), 3);
final String language = segments[0];
if (segments.length == 2) {
final String country = segments[1];
if (isISO639LanguageCode(language) && isISO3166CountryCode(country) || isNumericAreaCode(country)) {
return new Locale(language, country);
}
} else if (segments.length == limit) {
final String country = segments[1];
final String variant = segments[2];
if (isISO639LanguageCode(language) && (country.isEmpty() || isISO3166CountryCode(country) || isNumericAreaCode(country)) && !variant.isEmpty()) {
return new Locale(language, country, variant);
}
}
if (ArrayUtils.contains(Locale.getISOCountries(), str)) {
return new Locale(StringUtils.EMPTY, str);
}
throw new IllegalArgumentException("Invalid locale format: " + str);
}
|
Tries to parse a Locale from the given String.
<p>
See {@link Locale} for the format.
</p>
@param str the String to parse as a Locale.
@return a Locale parsed from the given String.
@throws IllegalArgumentException if the given String cannot be parsed.
@see Locale
|
java
|
src/main/java/org/apache/commons/lang3/LocaleUtils.java
| 330
|
[
"str"
] |
Locale
| true
| 14
| 7.92
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
load_library
|
def load_library(libname, loader_path):
"""
It is possible to load a library using
>>> lib = ctypes.cdll[<full_path_name>] # doctest: +SKIP
But there are cross-platform considerations, such as library file extensions,
plus the fact Windows will just load the first library it finds with that name.
NumPy supplies the load_library function as a convenience.
.. versionchanged:: 1.20.0
Allow libname and loader_path to take any
:term:`python:path-like object`.
Parameters
----------
libname : path-like
Name of the library, which can have 'lib' as a prefix,
but without an extension.
loader_path : path-like
Where the library can be found.
Returns
-------
ctypes.cdll[libpath] : library object
A ctypes library object
Raises
------
OSError
If there is no library with the expected extension, or the
library is defective and cannot be loaded.
"""
# Convert path-like objects into strings
libname = os.fsdecode(libname)
loader_path = os.fsdecode(loader_path)
ext = os.path.splitext(libname)[1]
if not ext:
import sys
import sysconfig
# Try to load library with platform-specific name, otherwise
# default to libname.[so|dll|dylib]. Sometimes, these files are
# built erroneously on non-linux platforms.
base_ext = ".so"
if sys.platform.startswith("darwin"):
base_ext = ".dylib"
elif sys.platform.startswith("win"):
base_ext = ".dll"
libname_ext = [libname + base_ext]
so_ext = sysconfig.get_config_var("EXT_SUFFIX")
if not so_ext == base_ext:
libname_ext.insert(0, libname + so_ext)
else:
libname_ext = [libname]
loader_path = os.path.abspath(loader_path)
if not os.path.isdir(loader_path):
libdir = os.path.dirname(loader_path)
else:
libdir = loader_path
for ln in libname_ext:
libpath = os.path.join(libdir, ln)
if os.path.exists(libpath):
try:
return ctypes.cdll[libpath]
except OSError:
# defective lib file
raise
# if no successful return in the libname_ext loop:
raise OSError("no file with expected extension")
|
It is possible to load a library using
>>> lib = ctypes.cdll[<full_path_name>] # doctest: +SKIP
But there are cross-platform considerations, such as library file extensions,
plus the fact Windows will just load the first library it finds with that name.
NumPy supplies the load_library function as a convenience.
.. versionchanged:: 1.20.0
Allow libname and loader_path to take any
:term:`python:path-like object`.
Parameters
----------
libname : path-like
Name of the library, which can have 'lib' as a prefix,
but without an extension.
loader_path : path-like
Where the library can be found.
Returns
-------
ctypes.cdll[libpath] : library object
A ctypes library object
Raises
------
OSError
If there is no library with the expected extension, or the
library is defective and cannot be loaded.
|
python
|
numpy/ctypeslib/_ctypeslib.py
| 94
|
[
"libname",
"loader_path"
] | false
| 10
| 7.6
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
readFirstLine
|
@Override
public @Nullable String readFirstLine() {
Iterator<String> lines = linesIterator();
return lines.hasNext() ? lines.next() : null;
}
|
Returns an iterator over the lines in the string. If the string ends in a newline, a final
empty string is not included, to match the behavior of BufferedReader/LineReader.readLine().
|
java
|
android/guava/src/com/google/common/io/CharSource.java
| 606
|
[] |
String
| true
| 2
| 6.72
|
google/guava
| 51,352
|
javadoc
| false
|
_upsample
|
def _upsample(self, method, limit: int | None = None, fill_value=None):
"""
Parameters
----------
method : {'backfill', 'bfill', 'pad', 'ffill'}
Method for upsampling.
limit : int, default None
Maximum size gap to fill when reindexing.
fill_value : scalar, default None
Value to use for missing values.
"""
ax = self.ax
obj = self.obj
new_index = self.binner
# Start vs. end of period
memb = ax.asfreq(self.freq, how=self.convention)
# Get the fill indexer
if method == "asfreq":
method = None
indexer = memb.get_indexer(new_index, method=method, limit=limit)
new_obj = _take_new_index(
obj,
indexer,
new_index,
)
return self._wrap_result(new_obj)
|
Parameters
----------
method : {'backfill', 'bfill', 'pad', 'ffill'}
Method for upsampling.
limit : int, default None
Maximum size gap to fill when reindexing.
fill_value : scalar, default None
Value to use for missing values.
|
python
|
pandas/core/resample.py
| 2,241
|
[
"self",
"method",
"limit",
"fill_value"
] | true
| 2
| 6.88
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
|
wrapMessage
|
function wrapMessage(status, body) {
if (status === 'success' || body === null ||
(typeof body !== 'object' &&
typeof body !== 'function' &&
typeof body !== 'symbol')) {
return { status, body };
}
let serialized;
let serializationFailed;
try {
const { serializeError } = require('internal/error_serdes');
serialized = serializeError(body);
} catch {
serializationFailed = true;
}
return {
status,
body: {
serialized,
serializationFailed,
},
};
}
|
Wraps a message with a status and body, and serializes the body if necessary.
@param {string} status - The status of the message.
@param {any} body - The body of the message.
@returns {{status: string, body: any}}
|
javascript
|
lib/internal/modules/esm/worker.js
| 109
|
[
"status",
"body"
] | false
| 7
| 6.24
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
readLong
|
@CanIgnoreReturnValue // to skip some bytes
@Override
public long readLong() throws IOException {
byte b1 = readAndCheckByte();
byte b2 = readAndCheckByte();
byte b3 = readAndCheckByte();
byte b4 = readAndCheckByte();
byte b5 = readAndCheckByte();
byte b6 = readAndCheckByte();
byte b7 = readAndCheckByte();
byte b8 = readAndCheckByte();
return Longs.fromBytes(b8, b7, b6, b5, b4, b3, b2, b1);
}
|
Reads a {@code long} as specified by {@link DataInputStream#readLong()}, except using
little-endian byte order.
@return the next eight bytes of the input stream, interpreted as a {@code long} in
little-endian byte order
@throws IOException if an I/O error occurs
|
java
|
android/guava/src/com/google/common/io/LittleEndianDataInputStream.java
| 133
|
[] | true
| 1
| 6.32
|
google/guava
| 51,352
|
javadoc
| false
|
|
clear
|
@Override
public void clear() {
size = 0;
Arrays.fill(hashTableKToV, null);
Arrays.fill(hashTableVToK, null);
firstInKeyInsertionOrder = null;
lastInKeyInsertionOrder = null;
modCount++;
}
|
Returns {@code true} if this BiMap contains an entry whose value is equal to {@code value} (or,
equivalently, if this inverse view contains a key that is equal to {@code value}).
<p>Due to the property that values in a BiMap are unique, this will tend to execute in
faster-than-linear time.
@param value the object to search for in the values of this BiMap
@return true if a mapping exists from a key to the specified value
|
java
|
guava/src/com/google/common/collect/HashBiMap.java
| 429
|
[] |
void
| true
| 1
| 6.72
|
google/guava
| 51,352
|
javadoc
| false
|
assertCanOverwrite
|
private void assertCanOverwrite(File file) throws IOException {
if (!file.canWrite() || !canWritePosixFile(file)) {
throw new FileNotFoundException(file + " (permission denied)");
}
}
|
Write the PID to the specified file.
@param file the PID file
@throws IllegalStateException if no PID is available.
@throws IOException if the file cannot be written
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/system/ApplicationPid.java
| 125
|
[
"file"
] |
void
| true
| 3
| 6.72
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
get_default
|
def get_default():
"""
Return the first DjangoTemplates backend that's configured, or raise
ImproperlyConfigured if none are configured.
This is required for preserving historical APIs that rely on a
globally available, implicitly configured engine such as:
>>> from django.template import Context, Template
>>> template = Template("Hello {{ name }}!")
>>> context = Context({'name': "world"})
>>> template.render(context)
'Hello world!'
"""
# Since Engine is imported in django.template and since
# DjangoTemplates is a wrapper around this Engine class,
# local imports are required to avoid import loops.
from django.template import engines
from django.template.backends.django import DjangoTemplates
for engine in engines.all():
if isinstance(engine, DjangoTemplates):
return engine.engine
raise ImproperlyConfigured("No DjangoTemplates backend is configured.")
|
Return the first DjangoTemplates backend that's configured, or raise
ImproperlyConfigured if none are configured.
This is required for preserving historical APIs that rely on a
globally available, implicitly configured engine such as:
>>> from django.template import Context, Template
>>> template = Template("Hello {{ name }}!")
>>> context = Context({'name': "world"})
>>> template.render(context)
'Hello world!'
|
python
|
django/template/engine.py
| 89
|
[] | false
| 3
| 7.2
|
django/django
| 86,204
|
unknown
| false
|
|
parseAdvice
|
private AbstractBeanDefinition parseAdvice(
String aspectName, int order, Element aspectElement, Element adviceElement, ParserContext parserContext,
List<BeanDefinition> beanDefinitions, List<BeanReference> beanReferences) {
try {
this.parseState.push(new AdviceEntry(parserContext.getDelegate().getLocalName(adviceElement)));
// create the method factory bean
RootBeanDefinition methodDefinition = new RootBeanDefinition(MethodLocatingFactoryBean.class);
methodDefinition.getPropertyValues().add("targetBeanName", aspectName);
methodDefinition.getPropertyValues().add("methodName", adviceElement.getAttribute("method"));
methodDefinition.setSynthetic(true);
// create instance factory definition
RootBeanDefinition aspectFactoryDef =
new RootBeanDefinition(SimpleBeanFactoryAwareAspectInstanceFactory.class);
aspectFactoryDef.getPropertyValues().add("aspectBeanName", aspectName);
aspectFactoryDef.setSynthetic(true);
// register the pointcut
AbstractBeanDefinition adviceDef = createAdviceDefinition(
adviceElement, parserContext, aspectName, order, methodDefinition, aspectFactoryDef,
beanDefinitions, beanReferences);
// configure the advisor
RootBeanDefinition advisorDefinition = new RootBeanDefinition(AspectJPointcutAdvisor.class);
advisorDefinition.setSource(parserContext.extractSource(adviceElement));
advisorDefinition.getConstructorArgumentValues().addGenericArgumentValue(adviceDef);
if (aspectElement.hasAttribute(ORDER_PROPERTY)) {
advisorDefinition.getPropertyValues().add(
ORDER_PROPERTY, aspectElement.getAttribute(ORDER_PROPERTY));
}
// register the final advisor
parserContext.getReaderContext().registerWithGeneratedName(advisorDefinition);
return advisorDefinition;
}
finally {
this.parseState.pop();
}
}
|
Parses one of '{@code before}', '{@code after}', '{@code after-returning}',
'{@code after-throwing}' or '{@code around}' and registers the resulting
BeanDefinition with the supplied BeanDefinitionRegistry.
@return the generated advice RootBeanDefinition
|
java
|
spring-aop/src/main/java/org/springframework/aop/config/ConfigBeanDefinitionParser.java
| 309
|
[
"aspectName",
"order",
"aspectElement",
"adviceElement",
"parserContext",
"beanDefinitions",
"beanReferences"
] |
AbstractBeanDefinition
| true
| 2
| 6.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
addAndGet
|
public double addAndGet(final double operand) {
this.value += operand;
return value;
}
|
Increments this instance's value by {@code operand}; this method returns the value associated with the instance
immediately after the addition operation. This method is not thread safe.
@param operand the quantity to add, not null.
@return the value associated with this instance after adding the operand.
@since 3.5
|
java
|
src/main/java/org/apache/commons/lang3/mutable/MutableDouble.java
| 113
|
[
"operand"
] | true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
interleaved_dtype
|
def interleaved_dtype(dtypes: list[DtypeObj]) -> DtypeObj | None:
"""
Find the common dtype for `blocks`.
Parameters
----------
blocks : List[DtypeObj]
Returns
-------
dtype : np.dtype, ExtensionDtype, or None
None is returned when `blocks` is empty.
"""
if not len(dtypes):
return None
return find_common_type(dtypes)
|
Find the common dtype for `blocks`.
Parameters
----------
blocks : List[DtypeObj]
Returns
-------
dtype : np.dtype, ExtensionDtype, or None
None is returned when `blocks` is empty.
|
python
|
pandas/core/internals/managers.py
| 111
|
[
"dtypes"
] |
DtypeObj | None
| true
| 2
| 6.88
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
withAnnotations
|
public Bindable<T> withAnnotations(Annotation @Nullable ... annotations) {
return new Bindable<>(this.type, this.boxedType, this.value,
(annotations != null) ? annotations : NO_ANNOTATIONS, NO_BIND_RESTRICTIONS, this.bindMethod);
}
|
Create an updated {@link Bindable} instance with the specified annotations.
@param annotations the annotations
@return an updated {@link Bindable}
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/bind/Bindable.java
| 183
|
[] | true
| 2
| 7.52
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
getExternallyManagedInitMethods
|
public Set<String> getExternallyManagedInitMethods() {
synchronized (this.postProcessingLock) {
return (this.externallyManagedInitMethods != null ?
Collections.unmodifiableSet(new LinkedHashSet<>(this.externallyManagedInitMethods)) :
Collections.emptySet());
}
}
|
Get all externally managed initialization methods (as an immutable Set).
<p>See {@link #registerExternallyManagedInitMethod} for details
regarding the format for the initialization methods in the returned set.
@since 5.3.11
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/RootBeanDefinition.java
| 548
|
[] | true
| 2
| 6.24
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
getRouterInstance
|
function getRouterInstance() {
const forest = initializeOrGetDirectiveForestHooks().getIndexedDirectiveForest();
const rootNode = forest[0];
if (!rootNode || !rootNode.nativeElement) {
return null;
}
const injector = getInjectorFromElementNode(rootNode.nativeElement);
if (!injector) {
return null;
}
const ng = ngDebugClient();
return (ng as any).ɵgetRouterInstance?.(injector);
}
|
Opens the source code of a component or a directive in the editor.
@param constructName - The name of the class/function that represents a component, provider, guard
or other callable to view source for.
@param type - The type of the element to view source for component, provider, or directive.
@returns - The element instance of the component, provider, or directive.
|
typescript
|
devtools/projects/ng-devtools-backend/src/lib/client-event-subscribers.ts
| 398
|
[] | false
| 4
| 7.12
|
angular/angular
| 99,544
|
jsdoc
| false
|
|
ttl
|
public Long ttl() {
return ttl;
}
|
Returns the TTL (in milliseconds).
@return ttl the time-to-live (in milliseconds) of the data, or null if there is no TTL
|
java
|
clients/src/main/java/org/apache/kafka/common/config/ConfigData.java
| 65
|
[] |
Long
| true
| 1
| 6.96
|
apache/kafka
| 31,560
|
javadoc
| false
|
getReferences
|
private Set<StandardConfigDataReference> getReferences(ConfigDataLocation configDataLocation,
String resourceLocation, String profile) {
if (isDirectory(resourceLocation)) {
return getReferencesForDirectory(configDataLocation, resourceLocation, profile);
}
return getReferencesForFile(configDataLocation, resourceLocation, profile);
}
|
Create a new {@link StandardConfigDataLocationResolver} instance.
@param logFactory the factory for loggers to use
@param binder a binder backed by the initial {@link Environment}
@param resourceLoader a {@link ResourceLoader} used to load resources
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/config/StandardConfigDataLocationResolver.java
| 181
|
[
"configDataLocation",
"resourceLocation",
"profile"
] | true
| 2
| 6.08
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
list_transform_jobs
|
def list_transform_jobs(
self, name_contains: str | None = None, max_results: int | None = None, **kwargs
) -> list[dict]:
"""
Call boto3's ``list_transform_jobs``.
The transform job name and max results are configurable via arguments.
Other arguments are not, and should be provided via kwargs. Note that
boto3 expects these in CamelCase, for example:
.. code-block:: python
list_transform_jobs(name_contains="myjob", StatusEquals="Failed")
.. seealso::
- :external+boto3:py:meth:`SageMaker.Client.list_transform_jobs`
:param name_contains: (optional) partial name to match.
:param max_results: (optional) maximum number of results to return.
None returns infinite results.
:param kwargs: (optional) kwargs to boto3's list_transform_jobs method.
:return: results of the list_transform_jobs request.
"""
config, max_results = self._preprocess_list_request_args(name_contains, max_results, **kwargs)
list_transform_jobs_request = partial(self.get_conn().list_transform_jobs, **config)
results = self._list_request(
list_transform_jobs_request, "TransformJobSummaries", max_results=max_results
)
return results
|
Call boto3's ``list_transform_jobs``.
The transform job name and max results are configurable via arguments.
Other arguments are not, and should be provided via kwargs. Note that
boto3 expects these in CamelCase, for example:
.. code-block:: python
list_transform_jobs(name_contains="myjob", StatusEquals="Failed")
.. seealso::
- :external+boto3:py:meth:`SageMaker.Client.list_transform_jobs`
:param name_contains: (optional) partial name to match.
:param max_results: (optional) maximum number of results to return.
None returns infinite results.
:param kwargs: (optional) kwargs to boto3's list_transform_jobs method.
:return: results of the list_transform_jobs request.
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/sagemaker.py
| 888
|
[
"self",
"name_contains",
"max_results"
] |
list[dict]
| true
| 1
| 6.24
|
apache/airflow
| 43,597
|
sphinx
| false
|
substituteHelperName
|
function substituteHelperName(node: Identifier): Expression {
const externalHelpersModuleName = currentSourceFile && getExternalHelpersModuleName(currentSourceFile);
if (externalHelpersModuleName) {
noSubstitution.add(getNodeId(node));
return factory.createPropertyAccessExpression(externalHelpersModuleName, node);
}
if (helperNameSubstitutions) {
const name = idText(node);
let substitution = helperNameSubstitutions.get(name);
if (!substitution) {
helperNameSubstitutions.set(name, substitution = factory.createUniqueName(name, GeneratedIdentifierFlags.Optimistic | GeneratedIdentifierFlags.FileLevel));
}
return substitution;
}
return node;
}
|
Hooks node substitutions.
@param hint A hint as to the intended usage of the node.
@param node The node to substitute.
|
typescript
|
src/compiler/transformers/module/esnextAnd2015.ts
| 423
|
[
"node"
] | true
| 5
| 6.72
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
_preload_cuda_lib
|
def _preload_cuda_lib(lib_folder: str, lib_name: str, required: bool = True) -> None: # type: ignore[valid-type]
"""Preloads cuda library if it could not be found otherwise."""
# Should only be called on Linux if default path resolution have failed
if platform.system() != "Linux":
raise AssertionError(f"Should only be called on Linux, got {platform.system()}")
lib_path = None
for path in sys.path:
candidate_lib_paths = _get_cuda_dep_paths(path, lib_folder, lib_name)
if candidate_lib_paths:
lib_path = candidate_lib_paths[0]
break
if not lib_path and required:
raise ValueError(f"{lib_name} not found in the system path {sys.path}")
if lib_path:
ctypes.CDLL(lib_path)
|
Preloads cuda library if it could not be found otherwise.
|
python
|
torch/__init__.py
| 305
|
[
"lib_folder",
"lib_name",
"required"
] |
None
| true
| 7
| 6
|
pytorch/pytorch
| 96,034
|
unknown
| false
|
findImmediatelyPrecedingTokenOfKind
|
function findImmediatelyPrecedingTokenOfKind(end: number, expectedTokenKind: SyntaxKind, sourceFile: SourceFile): Node | undefined {
const precedingToken = findPrecedingToken(end, sourceFile);
return precedingToken && precedingToken.kind === expectedTokenKind && end === precedingToken.getEnd() ?
precedingToken :
undefined;
}
|
Validating `expectedTokenKind` ensures the token was typed in the context we expect (eg: not a comment).
@param expectedTokenKind The kind of the last token constituting the desired parent node.
|
typescript
|
src/services/formatting/formatting.ts
| 253
|
[
"end",
"expectedTokenKind",
"sourceFile"
] | true
| 4
| 6.56
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
tan
|
public static double tan(double angle) {
if (Math.abs(angle) > TAN_MAX_VALUE_FOR_INT_MODULO) {
// Faster than using normalizeMinusHalfPiHalfPi.
angle = remainderTwoPi(angle);
if (angle < -M_HALF_PI) {
angle += Math.PI;
} else if (angle > M_HALF_PI) {
angle -= Math.PI;
}
}
boolean negateResult;
if (angle < 0.0) {
angle = -angle;
negateResult = true;
} else {
negateResult = false;
}
int index = (int) (angle * TAN_INDEXER + 0.5);
double delta = (angle - index * TAN_DELTA_HI) - index * TAN_DELTA_LO;
// index modulo PI, i.e. 2*(virtual tab size minus one).
index &= (2 * (TAN_VIRTUAL_TABS_SIZE - 1) - 1); // index % (2*(TAN_VIRTUAL_TABS_SIZE-1))
// Here, index is in [0,2*(TAN_VIRTUAL_TABS_SIZE-1)-1], i.e. indicates an angle in [0,PI[.
if (index > (TAN_VIRTUAL_TABS_SIZE - 1)) {
index = (2 * (TAN_VIRTUAL_TABS_SIZE - 1)) - index;
delta = -delta;
negateResult = negateResult == false;
}
double result;
if (index < TAN_TABS_SIZE) {
result = tanTab[index] + delta * (tanDer1DivF1Tab[index] + delta * (tanDer2DivF2Tab[index] + delta * (tanDer3DivF3Tab[index]
+ delta * tanDer4DivF4Tab[index])));
} else { // angle in ]TAN_MAX_VALUE_FOR_TABS,TAN_MAX_VALUE_FOR_INT_MODULO], or angle is NaN
// Using tan(angle) == 1/tan(PI/2-angle) formula: changing angle (index and delta), and inverting.
index = (TAN_VIRTUAL_TABS_SIZE - 1) - index;
result = 1 / (tanTab[index] - delta * (tanDer1DivF1Tab[index] - delta * (tanDer2DivF2Tab[index] - delta
* (tanDer3DivF3Tab[index] - delta * tanDer4DivF4Tab[index]))));
}
return negateResult ? -result : result;
}
|
@param angle Angle in radians.
@return Angle tangent.
|
java
|
libs/h3/src/main/java/org/elasticsearch/h3/FastMath.java
| 395
|
[
"angle"
] | true
| 8
| 8.24
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
max
|
public static <A extends Comparable<A>> A max(final A comparable1, final A comparable2) {
return ObjectUtils.compare(comparable1, comparable2, false) > 0 ? comparable1 : comparable2;
}
|
Returns the greater of two {@link Comparable} values, ignoring null.
<p>
For three or more values, use {@link ObjectUtils#max(Comparable...)}.
</p>
@param <A> Type of what we are comparing.
@param comparable1 the first comparable, may be null.
@param comparable2 the second comparable, may be null.
@return the largest of {@code comparable1} and {@code comparable2}.
@see ObjectUtils#max(Comparable...)
@since 3.13.0
|
java
|
src/main/java/org/apache/commons/lang3/compare/ComparableUtils.java
| 222
|
[
"comparable1",
"comparable2"
] |
A
| true
| 2
| 7.52
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
lastIndexOf
|
public static int lastIndexOf(float[] array, float target) {
return lastIndexOf(array, target, 0, array.length);
}
|
Returns the index of the last appearance of the value {@code target} in {@code array}. Note
that this always returns {@code -1} when {@code target} is {@code NaN}.
@param array an array of {@code float} values, possibly empty
@param target a primitive {@code float} value
@return the greatest index {@code i} for which {@code array[i] == target}, or {@code -1} if no
such index exists.
|
java
|
android/guava/src/com/google/common/primitives/Floats.java
| 183
|
[
"array",
"target"
] | true
| 1
| 6.8
|
google/guava
| 51,352
|
javadoc
| false
|
|
resolveMainPath
|
function resolveMainPath(main) {
/** @type {string} */
let mainPath;
// Extension searching for the main entry point is supported for backward compatibility.
// Module._findPath is monkey-patchable here.
const { Module } = require('internal/modules/cjs/loader');
mainPath = Module._findPath(path.resolve(main), null, true);
if (!mainPath) { return; }
const preserveSymlinksMain = getOptionValue('--preserve-symlinks-main');
if (!preserveSymlinksMain) {
const { toRealPath } = require('internal/modules/helpers');
mainPath = toRealPath(mainPath);
}
return mainPath;
}
|
Get the absolute path to the main entry point.
@param {string} main - Entry point path
@returns {string|undefined}
|
javascript
|
lib/internal/modules/run_main.js
| 29
|
[
"main"
] | false
| 3
| 6.4
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
addMainAndStartAttributes
|
private void addMainAndStartAttributes(JarFile source, Manifest manifest) throws IOException {
String mainClass = getMainClass(source, manifest);
String launcherClass = getLayout().getLauncherClassName();
if (launcherClass != null) {
Assert.state(mainClass != null, "Unable to find main class");
manifest.getMainAttributes().putValue(MAIN_CLASS_ATTRIBUTE, launcherClass);
manifest.getMainAttributes().putValue(START_CLASS_ATTRIBUTE, mainClass);
}
else if (mainClass != null) {
manifest.getMainAttributes().putValue(MAIN_CLASS_ATTRIBUTE, mainClass);
}
}
|
Writes a signature file if necessary for the given {@code writtenLibraries}.
@param writtenLibraries the libraries
@param writer the writer to use to write the signature file if necessary
@throws IOException if a failure occurs when writing the signature file
|
java
|
loader/spring-boot-loader-tools/src/main/java/org/springframework/boot/loader/tools/Packager.java
| 319
|
[
"source",
"manifest"
] |
void
| true
| 3
| 6.56
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
remove_repeating
|
def remove_repeating(substr: str, s: str) -> str:
"""Remove repeating module names from string.
Arguments:
task_name (str): Task name (full path including module),
to use as the basis for removing module names.
s (str): The string we want to work on.
Example:
>>> _shorten_names(
... 'x.tasks.add',
... 'x.tasks.add(2, 2) | x.tasks.add(4) | x.tasks.mul(8)',
... )
'x.tasks.add(2, 2) | add(4) | mul(8)'
"""
# find the first occurrence of substr in the string.
index = s.find(substr)
if index >= 0:
return ''.join([
# leave the first occurrence of substr untouched.
s[:index + len(substr)],
# strip seen substr from the rest of the string.
s[index + len(substr):].replace(substr, ''),
])
return s
|
Remove repeating module names from string.
Arguments:
task_name (str): Task name (full path including module),
to use as the basis for removing module names.
s (str): The string we want to work on.
Example:
>>> _shorten_names(
... 'x.tasks.add',
... 'x.tasks.add(2, 2) | x.tasks.add(4) | x.tasks.mul(8)',
... )
'x.tasks.add(2, 2) | add(4) | mul(8)'
|
python
|
celery/utils/text.py
| 156
|
[
"substr",
"s"
] |
str
| true
| 2
| 8.64
|
celery/celery
| 27,741
|
google
| false
|
getCaches
|
protected Collection<? extends Cache> getCaches(
CacheOperationInvocationContext<CacheOperation> context, CacheResolver cacheResolver) {
Collection<? extends Cache> caches = cacheResolver.resolveCaches(context);
if (caches.isEmpty()) {
throw new IllegalStateException("No cache could be resolved for '" +
context.getOperation() + "' using resolver '" + cacheResolver +
"'. At least one cache should be provided per cache operation.");
}
return caches;
}
|
Convenience method to return a String representation of this Method
for use in logging. Can be overridden in subclasses to provide a
different identifier for the given method.
@param method the method we're interested in
@param targetClass class the method is on
@return log message identifying this method
@see org.springframework.util.ClassUtils#getQualifiedMethodName
|
java
|
spring-context/src/main/java/org/springframework/cache/interceptor/CacheAspectSupport.java
| 309
|
[
"context",
"cacheResolver"
] | true
| 2
| 7.44
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
rm
|
public static void rm(final Path... locations) throws IOException {
final LinkedHashMap<Path, Throwable> unremoved = rm(new LinkedHashMap<>(), locations);
if (unremoved.isEmpty() == false) {
final StringBuilder b = new StringBuilder("could not remove the following files (in the order of attempts):\n");
for (final Map.Entry<Path, Throwable> kv : unremoved.entrySet()) {
b.append(" ").append(kv.getKey().toAbsolutePath()).append(": ").append(kv.getValue()).append("\n");
}
throw new IOException(b.toString());
}
}
|
Deletes one or more files or directories (and everything underneath it).
@throws IOException if any of the given files (or their sub-hierarchy files in case of directories) cannot be removed.
|
java
|
libs/core/src/main/java/org/elasticsearch/core/IOUtils.java
| 196
|
[] |
void
| true
| 2
| 6.88
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
toProtocolPerformanceData
|
function toProtocolPerformanceData(performanceData: PerformanceData): protocol.PerformanceData {
const diagnosticsDuration = performanceData.diagnosticsDuration &&
arrayFrom(performanceData.diagnosticsDuration, ([file, data]) => ({ ...data, file }));
return { ...performanceData, diagnosticsDuration };
}
|
@param projects Projects initially known to contain {@link initialLocation}
@param defaultProject The default project containing {@link initialLocation}
@param initialLocation Where the search operation was triggered
@param getResultsForPosition This is where you plug in `findReferences`, `renameLocation`, etc
@param forPositionInResult Given an item returned by {@link getResultsForPosition} enumerate the positions referred to by that result
@returns In the common case where there's only one project, returns an array of results from {@link getResultsForPosition}.
If multiple projects were searched - even if they didn't return results - the result will be a map from project to per-project results.
|
typescript
|
src/server/session.ts
| 3,977
|
[
"performanceData"
] | true
| 2
| 7.12
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
get_dist_package_name_prefix
|
def get_dist_package_name_prefix(provider_id: str) -> str:
"""
Returns Wheel package name prefix for the package id.
:param provider_id: id of the package
:return: the name of wheel package prefix
"""
return "apache_airflow_providers_" + provider_id.replace(".", "_")
|
Returns Wheel package name prefix for the package id.
:param provider_id: id of the package
:return: the name of wheel package prefix
|
python
|
dev/breeze/src/airflow_breeze/utils/packages.py
| 467
|
[
"provider_id"
] |
str
| true
| 1
| 6.88
|
apache/airflow
| 43,597
|
sphinx
| false
|
translateInputToOutputRanges
|
static DebugAddressRangesVector
translateInputToOutputRanges(const BinaryFunction &BF,
const DWARFAddressRangesVector &InputRanges) {
DebugAddressRangesVector OutputRanges;
// If the function hasn't changed return the same ranges.
if (!BF.isEmitted()) {
OutputRanges.resize(InputRanges.size());
llvm::transform(InputRanges, OutputRanges.begin(),
[](const DWARFAddressRange &Range) {
return DebugAddressRange(Range.LowPC, Range.HighPC);
});
return OutputRanges;
}
for (const DWARFAddressRange &Range : InputRanges)
llvm::append_range(OutputRanges, BF.translateInputToOutputRange(
{Range.LowPC, Range.HighPC}));
// Post-processing pass to sort and merge ranges.
llvm::sort(OutputRanges);
DebugAddressRangesVector MergedRanges;
uint64_t PrevHighPC = 0;
for (const DebugAddressRange &Range : OutputRanges) {
if (Range.LowPC <= PrevHighPC) {
MergedRanges.back().HighPC =
std::max(MergedRanges.back().HighPC, Range.HighPC);
} else {
MergedRanges.emplace_back(Range.LowPC, Range.HighPC);
}
PrevHighPC = MergedRanges.back().HighPC;
}
return MergedRanges;
}
|
translate them to a set of address ranges in the output binary.
|
cpp
|
bolt/lib/Rewrite/DWARFRewriter.cpp
| 96
|
[] | true
| 4
| 7.2
|
llvm/llvm-project
| 36,021
|
doxygen
| false
|
|
checkElementIndex
|
@CanIgnoreReturnValue
public static int checkElementIndex(int index, int size, String desc) {
// Carefully optimized for execution by hotspot (explanatory comment above)
if (index < 0 || index >= size) {
throw new IndexOutOfBoundsException(badElementIndex(index, size, desc));
}
return index;
}
|
Ensures that {@code index} specifies a valid <i>element</i> in an array, list or string of size
{@code size}. An element index may range from zero, inclusive, to {@code size}, exclusive.
@param index a user-supplied index identifying an element of an array, list or string
@param size the size of that array, list or string
@param desc the text to use to describe this index in an error message
@return the value of {@code index}
@throws IndexOutOfBoundsException if {@code index} is negative or is not less than {@code size}
@throws IllegalArgumentException if {@code size} is negative
|
java
|
android/guava/src/com/google/common/base/Preconditions.java
| 1,365
|
[
"index",
"size",
"desc"
] | true
| 3
| 7.92
|
google/guava
| 51,352
|
javadoc
| false
|
|
send
|
def send(self, sender, **named):
"""Send signal from sender to all connected receivers.
If any receiver raises an error, the exception is returned as the
corresponding response. (This is different from the "send" in
Django signals. In Celery "send" and "send_robust" do the same thing.)
Arguments:
sender (Any): The sender of the signal.
Either a specific object or :const:`None`.
**named (Any): Named arguments which will be passed to receivers.
Returns:
List: of tuple pairs: `[(receiver, response), … ]`.
"""
responses = []
if not self.receivers or \
self.sender_receivers_cache.get(sender) is NO_RECEIVERS:
return responses
for receiver in self._live_receivers(sender):
try:
response = receiver(signal=self, sender=sender, **named)
except Exception as exc: # pylint: disable=broad-except
if not hasattr(exc, '__traceback__'):
exc.__traceback__ = sys.exc_info()[2]
logger.exception(
'Signal handler %r raised: %r', receiver, exc)
responses.append((receiver, exc))
else:
responses.append((receiver, response))
return responses
|
Send signal from sender to all connected receivers.
If any receiver raises an error, the exception is returned as the
corresponding response. (This is different from the "send" in
Django signals. In Celery "send" and "send_robust" do the same thing.)
Arguments:
sender (Any): The sender of the signal.
Either a specific object or :const:`None`.
**named (Any): Named arguments which will be passed to receivers.
Returns:
List: of tuple pairs: `[(receiver, response), … ]`.
|
python
|
celery/utils/dispatch/signal.py
| 258
|
[
"self",
"sender"
] | false
| 6
| 7.12
|
celery/celery
| 27,741
|
google
| false
|
|
hasPendingRequests
|
public boolean hasPendingRequests(Node node) {
if (unsent.hasRequests(node))
return true;
lock.lock();
try {
return client.hasInFlightRequests(node.idString());
} finally {
lock.unlock();
}
}
|
Check whether there is pending request to the given node. This includes both request that
have been transmitted (i.e. in-flight requests) and those which are awaiting transmission.
@param node The node in question
@return A boolean indicating whether there is pending request
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkClient.java
| 381
|
[
"node"
] | true
| 2
| 8.24
|
apache/kafka
| 31,560
|
javadoc
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.