function_name
stringlengths 1
57
| function_code
stringlengths 20
4.99k
| documentation
stringlengths 50
2k
| language
stringclasses 5
values | file_path
stringlengths 8
166
| line_number
int32 4
16.7k
| parameters
listlengths 0
20
| return_type
stringlengths 0
131
| has_type_hints
bool 2
classes | complexity
int32 1
51
| quality_score
float32 6
9.68
| repo_name
stringclasses 34
values | repo_stars
int32 2.9k
242k
| docstring_style
stringclasses 7
values | is_async
bool 2
classes |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
resolveAndSet
|
public void resolveAndSet(RegisteredBean registeredBean, Object instance) {
Assert.notNull(registeredBean, "'registeredBean' must not be null");
Assert.notNull(instance, "'instance' must not be null");
Field field = getField(registeredBean);
Object resolved = resolveValue(registeredBean, field);
if (resolved != null) {
ReflectionUtils.makeAccessible(field);
ReflectionUtils.setField(field, instance, resolved);
}
}
|
Resolve the field value for the specified registered bean and set it
using reflection.
@param registeredBean the registered bean
@param instance the bean instance
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/aot/AutowiredFieldValueResolver.java
| 158
|
[
"registeredBean",
"instance"
] |
void
| true
| 2
| 6.56
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
metadata
|
protected abstract JoinGroupRequestData.JoinGroupRequestProtocolCollection metadata();
|
Get the current list of protocols and their associated metadata supported
by the local member. The order of the protocols in the list indicates the preference
of the protocol (the first entry is the most preferred). The coordinator takes this
preference into account when selecting the generation protocol (generally more preferred
protocols will be selected as long as all members support them and there is no disagreement
on the preference).
@return Non-empty map of supported protocols and metadata
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinator.java
| 215
|
[] | true
| 1
| 6.48
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
cellSpliterator
|
@Override
final Spliterator<Cell<R, C, V>> cellSpliterator() {
throw new AssertionError("should never be called");
}
|
A builder for creating immutable table instances, especially {@code public static final} tables
("constant tables"). Example:
{@snippet :
static final ImmutableTable<Integer, Character, String> SPREADSHEET =
new ImmutableTable.Builder<Integer, Character, String>()
.put(1, 'A', "foo")
.put(1, 'B', "bar")
.put(2, 'A', "baz")
.buildOrThrow();
}
<p>By default, the order in which cells are added to the builder determines the iteration
ordering of all views in the returned table, with {@link #putAll} following the {@link
Table#cellSet()} iteration order. However, if {@link #orderRowsBy} or {@link #orderColumnsBy}
is called, the views are sorted by the supplied comparators.
<p>For empty or single-cell immutable tables, {@link #of()} and {@link #of(Object, Object,
Object)} are even more convenient.
<p>Builder instances can be reused - it is safe to call {@link #buildOrThrow} multiple times to
build multiple tables in series. Each table is a superset of the tables created before it.
@since 11.0
|
java
|
guava/src/com/google/common/collect/ImmutableTable.java
| 313
|
[] | true
| 1
| 6.72
|
google/guava
| 51,352
|
javadoc
| false
|
|
_join
|
def _join(sep, seq):
"""
Return a string which is the concatenation of the strings in the
sequence `seq`.
Calls :meth:`str.join` element-wise.
Parameters
----------
sep : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype
seq : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype
Returns
-------
out : ndarray
Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype,
depending on input types
See Also
--------
str.join
Examples
--------
>>> import numpy as np
>>> np.strings.join('-', 'osd') # doctest: +SKIP
array('o-s-d', dtype='<U5') # doctest: +SKIP
>>> np.strings.join(['-', '.'], ['ghc', 'osd']) # doctest: +SKIP
array(['g-h-c', 'o.s.d'], dtype='<U5') # doctest: +SKIP
"""
return _to_bytes_or_str_array(
_vec_string(sep, np.object_, 'join', (seq,)), seq)
|
Return a string which is the concatenation of the strings in the
sequence `seq`.
Calls :meth:`str.join` element-wise.
Parameters
----------
sep : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype
seq : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype
Returns
-------
out : ndarray
Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype,
depending on input types
See Also
--------
str.join
Examples
--------
>>> import numpy as np
>>> np.strings.join('-', 'osd') # doctest: +SKIP
array('o-s-d', dtype='<U5') # doctest: +SKIP
>>> np.strings.join(['-', '.'], ['ghc', 'osd']) # doctest: +SKIP
array(['g-h-c', 'o.s.d'], dtype='<U5') # doctest: +SKIP
|
python
|
numpy/_core/strings.py
| 1,359
|
[
"sep",
"seq"
] | false
| 1
| 6.16
|
numpy/numpy
| 31,054
|
numpy
| false
|
|
drop
|
def drop( # type: ignore[override]
self,
codes,
level: Index | np.ndarray | Iterable[Hashable] | None = None,
errors: IgnoreRaise = "raise",
) -> MultiIndex:
"""
Make a new :class:`pandas.MultiIndex` with the passed list of codes deleted.
This method allows for the removal of specified labels from a MultiIndex.
The labels to be removed can be provided as a list of tuples if no level
is specified, or as a list of labels from a specific level if the level
parameter is provided. This can be useful for refining the structure of a
MultiIndex to fit specific requirements.
Parameters
----------
codes : array-like
Must be a list of tuples when ``level`` is not specified.
level : int or level name, default None
Level from which the labels will be dropped.
errors : str, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
Returns
-------
MultiIndex
A new MultiIndex with the specified labels removed.
See Also
--------
MultiIndex.remove_unused_levels : Create new MultiIndex from current that
removes unused levels.
MultiIndex.reorder_levels : Rearrange levels using input order.
MultiIndex.rename : Rename levels in a MultiIndex.
Examples
--------
>>> idx = pd.MultiIndex.from_product(
... [(0, 1, 2), ("green", "purple")], names=["number", "color"]
... )
>>> idx
MultiIndex([(0, 'green'),
(0, 'purple'),
(1, 'green'),
(1, 'purple'),
(2, 'green'),
(2, 'purple')],
names=['number', 'color'])
>>> idx.drop([(1, "green"), (2, "purple")])
MultiIndex([(0, 'green'),
(0, 'purple'),
(1, 'purple'),
(2, 'green')],
names=['number', 'color'])
We can also drop from a specific level.
>>> idx.drop("green", level="color")
MultiIndex([(0, 'purple'),
(1, 'purple'),
(2, 'purple')],
names=['number', 'color'])
>>> idx.drop([1, 2], level=0)
MultiIndex([(0, 'green'),
(0, 'purple')],
names=['number', 'color'])
"""
if level is not None:
return self._drop_from_level(codes, level, errors)
if not isinstance(codes, (np.ndarray, Index)):
try:
codes = com.index_labels_to_array(codes, dtype=np.dtype("object"))
except ValueError:
pass
inds = []
for level_codes in codes:
try:
loc = self.get_loc(level_codes)
# get_loc returns either an integer, a slice, or a boolean
# mask
if isinstance(loc, int):
inds.append(loc)
elif isinstance(loc, slice):
step = loc.step if loc.step is not None else 1
inds.extend(range(loc.start, loc.stop, step))
elif com.is_bool_indexer(loc):
if get_option("performance_warnings") and self._lexsort_depth == 0:
warnings.warn(
"dropping on a non-lexsorted multi-index "
"without a level parameter may impact performance.",
PerformanceWarning,
stacklevel=find_stack_level(),
)
loc = loc.nonzero()[0]
inds.extend(loc)
else:
msg = f"unsupported indexer of type {type(loc)}"
raise AssertionError(msg)
except KeyError:
if errors != "ignore":
raise
return self.delete(inds)
|
Make a new :class:`pandas.MultiIndex` with the passed list of codes deleted.
This method allows for the removal of specified labels from a MultiIndex.
The labels to be removed can be provided as a list of tuples if no level
is specified, or as a list of labels from a specific level if the level
parameter is provided. This can be useful for refining the structure of a
MultiIndex to fit specific requirements.
Parameters
----------
codes : array-like
Must be a list of tuples when ``level`` is not specified.
level : int or level name, default None
Level from which the labels will be dropped.
errors : str, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
Returns
-------
MultiIndex
A new MultiIndex with the specified labels removed.
See Also
--------
MultiIndex.remove_unused_levels : Create new MultiIndex from current that
removes unused levels.
MultiIndex.reorder_levels : Rearrange levels using input order.
MultiIndex.rename : Rename levels in a MultiIndex.
Examples
--------
>>> idx = pd.MultiIndex.from_product(
... [(0, 1, 2), ("green", "purple")], names=["number", "color"]
... )
>>> idx
MultiIndex([(0, 'green'),
(0, 'purple'),
(1, 'green'),
(1, 'purple'),
(2, 'green'),
(2, 'purple')],
names=['number', 'color'])
>>> idx.drop([(1, "green"), (2, "purple")])
MultiIndex([(0, 'green'),
(0, 'purple'),
(1, 'purple'),
(2, 'green')],
names=['number', 'color'])
We can also drop from a specific level.
>>> idx.drop("green", level="color")
MultiIndex([(0, 'purple'),
(1, 'purple'),
(2, 'purple')],
names=['number', 'color'])
>>> idx.drop([1, 2], level=0)
MultiIndex([(0, 'green'),
(0, 'purple')],
names=['number', 'color'])
|
python
|
pandas/core/indexes/multi.py
| 2,574
|
[
"self",
"codes",
"level",
"errors"
] |
MultiIndex
| true
| 12
| 8.08
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
getFormatter
|
private DateTimeFormatter getFormatter(Type type) {
DateTimeFormatter formatter = this.formatters.get(type);
if (formatter != null) {
return formatter;
}
DateTimeFormatter fallbackFormatter = getFallbackFormatter(type);
return this.factories.get(type).createDateTimeFormatter(fallbackFormatter);
}
|
Set the formatter that will be used for objects representing date and time values.
<p>This formatter will be used for {@link LocalDateTime}, {@link ZonedDateTime},
and {@link OffsetDateTime} types. When specified, the
{@link #setDateTimeStyle dateTimeStyle} and
{@link #setUseIsoFormat useIsoFormat} properties will be ignored.
@param formatter the formatter to use
@see #setDateFormatter
@see #setTimeFormatter
|
java
|
spring-context/src/main/java/org/springframework/format/datetime/standard/DateTimeFormatterRegistrar.java
| 204
|
[
"type"
] |
DateTimeFormatter
| true
| 2
| 6.24
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
getIsatapIPv4Address
|
public static Inet4Address getIsatapIPv4Address(Inet6Address ip) {
checkArgument(isIsatapAddress(ip), "Address '%s' is not an ISATAP address.", toAddrString(ip));
return getInet4Address(Arrays.copyOfRange(ip.getAddress(), 12, 16));
}
|
Returns the IPv4 address embedded in an ISATAP address.
@param ip {@link Inet6Address} to be examined for embedded IPv4 in ISATAP address
@return {@link Inet4Address} of embedded IPv4 in an ISATAP address
@throws IllegalArgumentException if the argument is not a valid IPv6 ISATAP address
|
java
|
android/guava/src/com/google/common/net/InetAddresses.java
| 884
|
[
"ip"
] |
Inet4Address
| true
| 1
| 6.32
|
google/guava
| 51,352
|
javadoc
| false
|
addContextValue
|
ExceptionContext addContextValue(String label, Object value);
|
Adds a contextual label-value pair into this context.
<p>
The pair will be added to the context, independently of an already
existing pair with the same label.
</p>
@param label the label of the item to add, {@code null} not recommended
@param value the value of item to add, may be {@code null}
@return {@code this}, for method chaining, not {@code null}
|
java
|
src/main/java/org/apache/commons/lang3/exception/ExceptionContext.java
| 49
|
[
"label",
"value"
] |
ExceptionContext
| true
| 1
| 6.64
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
getInvariantTypeResolver
|
private TypeResolver getInvariantTypeResolver() {
TypeResolver resolver = invariantTypeResolver;
if (resolver == null) {
resolver = (invariantTypeResolver = TypeResolver.invariantly(runtimeType));
}
return resolver;
}
|
Returns the type token representing the generic type declaration of {@code cls}. For example:
{@code TypeToken.getGenericType(Iterable.class)} returns {@code Iterable<T>}.
<p>If {@code cls} isn't parameterized and isn't a generic array, the type token of the class is
returned.
|
java
|
android/guava/src/com/google/common/reflect/TypeToken.java
| 1,202
|
[] |
TypeResolver
| true
| 2
| 6.24
|
google/guava
| 51,352
|
javadoc
| false
|
deserialize
|
@SuppressWarnings("resource") // inputStream is managed by the caller
public static <T> T deserialize(final InputStream inputStream) {
Objects.requireNonNull(inputStream, "inputStream");
try (ObjectInputStream in = new ObjectInputStream(inputStream)) {
@SuppressWarnings("unchecked")
final T obj = (T) in.readObject();
return obj;
} catch (final ClassNotFoundException | IOException | NegativeArraySizeException ex) {
throw new SerializationException(ex);
}
}
|
Deserializes an {@link Object} from the specified stream.
<p>
The stream will be closed once the object is written. This avoids the need for a finally clause, and maybe also
exception handling, in the application code.
</p>
<p>
The stream passed in is not buffered internally within this method. This is the responsibility of your
application if desired.
</p>
<p>
If the call site incorrectly types the return value, a {@link ClassCastException} is thrown from the call site.
Without Generics in this declaration, the call site must type cast and can cause the same ClassCastException.
Note that in both cases, the ClassCastException is in the call site, not in this method.
</p>
<p>
If you want to secure deserialization with a whitelist or blacklist, please use Apache Commons IO's
{@link org.apache.commons.io.serialization.ValidatingObjectInputStream ValidatingObjectInputStream}.
</p>
@param <T> the object type to be deserialized.
@param inputStream the serialized object input stream, must not be null.
@return the deserialized object.
@throws NullPointerException if {@code inputStream} is {@code null}.
@throws SerializationException (runtime) if the serialization fails.
@see org.apache.commons.io.serialization.ValidatingObjectInputStream
|
java
|
src/main/java/org/apache/commons/lang3/SerializationUtils.java
| 199
|
[
"inputStream"
] |
T
| true
| 2
| 7.44
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
toArray
|
@Nullable Object[] toArray();
|
Return the arguments as an object array.
@return the arguments as an object array
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/aot/AutowiredArguments.java
| 77
|
[] | true
| 1
| 6.64
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
escape
|
@Override
public String escape(String string) {
checkNotNull(string); // GWT specific check (do not optimize)
// Inlineable fast-path loop which hands off to escapeSlow() only if needed
int length = string.length();
for (int index = 0; index < length; index++) {
if (escape(string.charAt(index)) != null) {
return escapeSlow(string, index);
}
}
return string;
}
|
Returns the escaped form of a given literal string.
@param string the literal string to be escaped
@return the escaped form of {@code string}
@throws NullPointerException if {@code string} is null
|
java
|
android/guava/src/com/google/common/escape/CharEscaper.java
| 55
|
[
"string"
] |
String
| true
| 3
| 8.08
|
google/guava
| 51,352
|
javadoc
| false
|
buildDefaultBeanName
|
protected String buildDefaultBeanName(BeanDefinition definition, BeanDefinitionRegistry registry) {
return buildDefaultBeanName(definition);
}
|
Derive a default bean name from the given bean definition.
<p>The default implementation delegates to {@link #buildDefaultBeanName(BeanDefinition)}.
@param definition the bean definition to build a bean name for
@param registry the registry that the given bean definition is being registered with
@return the default bean name (never {@code null})
|
java
|
spring-context/src/main/java/org/springframework/context/annotation/AnnotationBeanNameGenerator.java
| 233
|
[
"definition",
"registry"
] |
String
| true
| 1
| 6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
readReplacements
|
@SuppressWarnings({ "unchecked", "rawtypes" })
private static Map<String, String> readReplacements(URL url) {
try (BufferedReader reader = new BufferedReader(
new InputStreamReader(new UrlResource(url).getInputStream(), StandardCharsets.UTF_8))) {
Properties properties = new Properties();
properties.load(reader);
return (Map) properties;
}
catch (IOException ex) {
throw new IllegalArgumentException("Unable to load replacements from location [" + url + "]", ex);
}
}
|
Loads the relocations from the classpath. Relocations are stored in files named
{@code META-INF/spring/full-qualified-annotation-name.replacements} on the
classpath. The file is loaded using {@link Properties#load(java.io.InputStream)}
with each entry containing an auto-configuration class name as the key and the
replacement class name as the value.
@param annotation annotation to load
@param classLoader class loader to use for loading
@return list of names of annotated classes
|
java
|
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/AutoConfigurationReplacements.java
| 122
|
[
"url"
] | true
| 2
| 7.44
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
haversine_distances
|
def haversine_distances(X, Y=None):
"""Compute the Haversine distance between samples in X and Y.
The Haversine (or great circle) distance is the angular distance between
two points on the surface of a sphere. The first coordinate of each point
is assumed to be the latitude, the second is the longitude, given
in radians. The dimension of the data must be 2.
.. math::
D(x, y) = 2\\arcsin[\\sqrt{\\sin^2((x_{lat} - y_{lat}) / 2)
+ \\cos(x_{lat})\\cos(y_{lat})\\
sin^2((x_{lon} - y_{lon}) / 2)}]
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, 2)
A feature array.
Y : {array-like, sparse matrix} of shape (n_samples_Y, 2), default=None
An optional second feature array. If `None`, uses `Y=X`.
Returns
-------
distances : ndarray of shape (n_samples_X, n_samples_Y)
The distance matrix.
Notes
-----
As the Earth is nearly spherical, the haversine formula provides a good
approximation of the distance between two points of the Earth surface, with
a less than 1% error on average.
Examples
--------
We want to calculate the distance between the Ezeiza Airport
(Buenos Aires, Argentina) and the Charles de Gaulle Airport (Paris,
France).
>>> from sklearn.metrics.pairwise import haversine_distances
>>> from math import radians
>>> bsas = [-34.83333, -58.5166646]
>>> paris = [49.0083899664, 2.53844117956]
>>> bsas_in_radians = [radians(_) for _ in bsas]
>>> paris_in_radians = [radians(_) for _ in paris]
>>> result = haversine_distances([bsas_in_radians, paris_in_radians])
>>> result * 6371000/1000 # multiply by Earth radius to get kilometers
array([[ 0. , 11099.54035582],
[11099.54035582, 0. ]])
"""
from sklearn.metrics import DistanceMetric
return DistanceMetric.get_metric("haversine").pairwise(X, Y)
|
Compute the Haversine distance between samples in X and Y.
The Haversine (or great circle) distance is the angular distance between
two points on the surface of a sphere. The first coordinate of each point
is assumed to be the latitude, the second is the longitude, given
in radians. The dimension of the data must be 2.
.. math::
D(x, y) = 2\\arcsin[\\sqrt{\\sin^2((x_{lat} - y_{lat}) / 2)
+ \\cos(x_{lat})\\cos(y_{lat})\\
sin^2((x_{lon} - y_{lon}) / 2)}]
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples_X, 2)
A feature array.
Y : {array-like, sparse matrix} of shape (n_samples_Y, 2), default=None
An optional second feature array. If `None`, uses `Y=X`.
Returns
-------
distances : ndarray of shape (n_samples_X, n_samples_Y)
The distance matrix.
Notes
-----
As the Earth is nearly spherical, the haversine formula provides a good
approximation of the distance between two points of the Earth surface, with
a less than 1% error on average.
Examples
--------
We want to calculate the distance between the Ezeiza Airport
(Buenos Aires, Argentina) and the Charles de Gaulle Airport (Paris,
France).
>>> from sklearn.metrics.pairwise import haversine_distances
>>> from math import radians
>>> bsas = [-34.83333, -58.5166646]
>>> paris = [49.0083899664, 2.53844117956]
>>> bsas_in_radians = [radians(_) for _ in bsas]
>>> paris_in_radians = [radians(_) for _ in paris]
>>> result = haversine_distances([bsas_in_radians, paris_in_radians])
>>> result * 6371000/1000 # multiply by Earth radius to get kilometers
array([[ 0. , 11099.54035582],
[11099.54035582, 0. ]])
|
python
|
sklearn/metrics/pairwise.py
| 991
|
[
"X",
"Y"
] | false
| 1
| 6.32
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
isAfterCodeBlockContext
|
function isAfterCodeBlockContext(context: FormattingContext): boolean {
switch (context.currentTokenParent.kind) {
case SyntaxKind.ClassDeclaration:
case SyntaxKind.ModuleDeclaration:
case SyntaxKind.EnumDeclaration:
case SyntaxKind.CatchClause:
case SyntaxKind.ModuleBlock:
case SyntaxKind.SwitchStatement:
return true;
case SyntaxKind.Block: {
const blockParent = context.currentTokenParent.parent;
// In a codefix scenario, we can't rely on parents being set. So just always return true.
if (!blockParent || blockParent.kind !== SyntaxKind.ArrowFunction && blockParent.kind !== SyntaxKind.FunctionExpression) {
return true;
}
}
}
return false;
}
|
A rule takes a two tokens (left/right) and a particular context
for which you're meant to look at them. You then declare what should the
whitespace annotation be between these tokens via the action param.
@param debugName Name to print
@param left The left side of the comparison
@param right The right side of the comparison
@param context A set of filters to narrow down the space in which this formatter rule applies
@param action a declaration of the expected whitespace
@param flags whether the rule deletes a line or not, defaults to no-op
|
typescript
|
src/services/formatting/rules.ts
| 685
|
[
"context"
] | true
| 4
| 6.4
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
incrementAndGet
|
public float incrementAndGet() {
value++;
return value;
}
|
Increments this instance's value by 1; this method returns the value associated with the instance
immediately after the increment operation. This method is not thread safe.
@return the value associated with the instance after it is incremented.
@since 3.5
|
java
|
src/main/java/org/apache/commons/lang3/mutable/MutableFloat.java
| 312
|
[] | true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
__init__
|
def __init__(self, *args, **kwargs):
"""
Initialize on the given sequence: may take lists, tuples, NumPy arrays
of X,Y pairs, or Point objects. If Point objects are used, ownership is
_not_ transferred to the LineString object.
Examples:
ls = LineString((1, 1), (2, 2))
ls = LineString([(1, 1), (2, 2)])
ls = LineString(array([(1, 1), (2, 2)]))
ls = LineString(Point(1, 1), Point(2, 2))
"""
# If only one argument provided, set the coords array appropriately
if len(args) == 1:
coords = args[0]
else:
coords = args
if not (
isinstance(coords, (tuple, list))
or numpy
and isinstance(coords, numpy.ndarray)
):
raise TypeError("Invalid initialization input for LineStrings.")
# If SRID was passed in with the keyword arguments
srid = kwargs.get("srid")
ncoords = len(coords)
if not ncoords:
super().__init__(self._init_func(None), srid=srid)
return
if ncoords < self._minlength:
raise ValueError(
"%s requires at least %d points, got %s."
% (
self.__class__.__name__,
self._minlength,
ncoords,
)
)
numpy_coords = not isinstance(coords, (tuple, list))
if numpy_coords:
shape = coords.shape # Using numpy's shape.
if len(shape) != 2:
raise TypeError("Too many dimensions.")
self._checkdim(shape[1])
ndim = shape[1]
else:
# Getting the number of coords and the number of dimensions, which
# must stay the same, e.g., no LineString((1, 2), (1, 2, 3)).
ndim = None
# Incrementing through each of the coordinates and verifying
for coord in coords:
if not isinstance(coord, (tuple, list, Point)):
raise TypeError(
"Each coordinate should be a sequence (list or tuple)"
)
if ndim is None:
ndim = len(coord)
self._checkdim(ndim)
elif len(coord) != ndim:
raise TypeError("Dimension mismatch.")
# Creating a coordinate sequence object because it is easier to
# set the points using its methods.
cs = GEOSCoordSeq(capi.create_cs(ncoords, ndim), z=bool(ndim == 3))
point_setter = cs._set_point_3d if ndim == 3 else cs._set_point_2d
for i in range(ncoords):
if numpy_coords:
point_coords = coords[i, :]
elif isinstance(coords[i], Point):
point_coords = coords[i].tuple
else:
point_coords = coords[i]
point_setter(i, point_coords)
# Calling the base geometry initialization with the returned pointer
# from the function.
super().__init__(self._init_func(cs.ptr), srid=srid)
|
Initialize on the given sequence: may take lists, tuples, NumPy arrays
of X,Y pairs, or Point objects. If Point objects are used, ownership is
_not_ transferred to the LineString object.
Examples:
ls = LineString((1, 1), (2, 2))
ls = LineString([(1, 1), (2, 2)])
ls = LineString(array([(1, 1), (2, 2)]))
ls = LineString(Point(1, 1), Point(2, 2))
|
python
|
django/contrib/gis/geos/linestring.py
| 14
|
[
"self"
] | false
| 20
| 6.4
|
django/django
| 86,204
|
unknown
| false
|
|
getDeclarationErrorAddendum
|
function getDeclarationErrorAddendum() {
const ownerName = getCurrentFiberOwnerNameInDevOrNull();
if (ownerName) {
return '\n\nCheck the render method of `' + ownerName + '`.';
}
return '';
}
|
Copyright (c) Meta Platforms, Inc. and affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
@flow
|
javascript
|
packages/react-dom-bindings/src/client/ReactDOMSelect.js
| 23
|
[] | false
| 2
| 6.24
|
facebook/react
| 241,750
|
jsdoc
| false
|
|
splitChildren
|
function splitChildren(children: readonly Node[], pivotOn: (child: Node) => boolean, separateTrailingSemicolon = true): readonly Node[] {
if (children.length < 2) {
return children;
}
const splitTokenIndex = findIndex(children, pivotOn);
if (splitTokenIndex === -1) {
return children;
}
const leftChildren = children.slice(0, splitTokenIndex);
const splitToken = children[splitTokenIndex];
const lastToken = last(children);
const separateLastToken = separateTrailingSemicolon && lastToken.kind === SyntaxKind.SemicolonToken;
const rightChildren = children.slice(splitTokenIndex + 1, separateLastToken ? children.length - 1 : undefined);
const result = compact([
leftChildren.length ? createSyntaxList(leftChildren) : undefined,
splitToken,
rightChildren.length ? createSyntaxList(rightChildren) : undefined,
]);
return separateLastToken ? result.concat(lastToken) : result;
}
|
Splits sibling nodes into up to four partitions:
1) everything left of the first node matched by `pivotOn`,
2) the first node matched by `pivotOn`,
3) everything right of the first node matched by `pivotOn`,
4) a trailing semicolon, if `separateTrailingSemicolon` is enabled.
The left and right groups, if not empty, will each be grouped into their own containing SyntaxList.
@param children The sibling nodes to split.
@param pivotOn The predicate function to match the node to be the pivot. The first node that matches
the predicate will be used; any others that may match will be included into the right-hand group.
@param separateTrailingSemicolon If the last token is a semicolon, it will be returned as a separate
child rather than be included in the right-hand group.
|
typescript
|
src/services/smartSelection.ts
| 318
|
[
"children",
"pivotOn",
"separateTrailingSemicolon"
] | true
| 8
| 6.88
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
clearCacheIncludingAncestors
|
public void clearCacheIncludingAncestors() {
clearCache();
if (getParentMessageSource() instanceof ReloadableResourceBundleMessageSource reloadableMsgSrc) {
reloadableMsgSrc.clearCacheIncludingAncestors();
}
}
|
Clear the resource bundle caches of this MessageSource and all its ancestors.
@see #clearCache
|
java
|
spring-context/src/main/java/org/springframework/context/support/ReloadableResourceBundleMessageSource.java
| 622
|
[] |
void
| true
| 2
| 6.72
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
put
|
public JSONObject put(String name, long value) throws JSONException {
this.nameValuePairs.put(checkName(name), value);
return this;
}
|
Maps {@code name} to {@code value}, clobbering any existing name/value mapping with
the same name.
@param name the name of the property
@param value the value of the property
@return this object.
@throws JSONException if an error occurs
|
java
|
cli/spring-boot-cli/src/json-shade/java/org/springframework/boot/cli/json/JSONObject.java
| 245
|
[
"name",
"value"
] |
JSONObject
| true
| 1
| 6.96
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
containsAny
|
public static boolean containsAny(final int[] array, final int... objectsToFind) {
return IntStreams.of(objectsToFind).anyMatch(e -> contains(array, e));
}
|
Checks if any of the ints are in the given array.
<p>
The method returns {@code false} if a {@code null} array is passed in.
</p>
<p>
If the {@code array} elements you are searching implement {@link Comparator}, consider whether it is worth using
{@link Arrays#sort(int[])} and {@link Arrays#binarySearch(int[], int)}.
</p>
@param array the array to search.
@param objectsToFind any of the ints to find.
@return {@code true} if the array contains any of the ints.
@since 3.18.0
|
java
|
src/main/java/org/apache/commons/lang3/ArrayUtils.java
| 1,761
|
[
"array"
] | true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
toIntValue
|
public static int toIntValue(final char ch, final int defaultValue) {
return isAsciiNumeric(ch) ? ch - 48 : defaultValue;
}
|
Converts the character to the Integer it represents, throwing an
exception if the character is not numeric.
<p>This method converts the char '1' to the int 1 and so on.</p>
<pre>
CharUtils.toIntValue('3', -1) = 3
CharUtils.toIntValue('A', -1) = -1
</pre>
@param ch the character to convert
@param defaultValue the default value to use if the character is not numeric
@return the int value of the character
|
java
|
src/main/java/org/apache/commons/lang3/CharUtils.java
| 410
|
[
"ch",
"defaultValue"
] | true
| 2
| 7.84
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
_check_warnings
|
def _check_warnings(self, *, params):
"""Check whether metadata is passed which is marked as WARN.
If any metadata is passed which is marked as WARN, a warning is raised.
Parameters
----------
params : dict
The metadata passed to a method.
"""
params = {} if params is None else params
warn_params = {
prop
for prop, alias in self._requests.items()
if alias == WARN and prop in params
}
for param in warn_params:
warn(
f"Support for {param} has recently been added to {self.owner} class. "
"To maintain backward compatibility, it is ignored now. "
f"Using `set_{self.method}_request({param}={{True, False}})` "
"on this method of the class, you can set the request value "
"to False to silence this warning, or to True to consume and "
"use the metadata."
)
|
Check whether metadata is passed which is marked as WARN.
If any metadata is passed which is marked as WARN, a warning is raised.
Parameters
----------
params : dict
The metadata passed to a method.
|
python
|
sklearn/utils/_metadata_requests.py
| 430
|
[
"self",
"params"
] | false
| 4
| 6.08
|
scikit-learn/scikit-learn
| 64,340
|
numpy
| false
|
|
decrementAndGet
|
public double decrementAndGet() {
value--;
return value;
}
|
Decrements this instance's value by 1; this method returns the value associated with the instance
immediately after the decrement operation. This method is not thread safe.
@return the value associated with the instance after it is decremented.
@since 3.5
|
java
|
src/main/java/org/apache/commons/lang3/mutable/MutableDouble.java
| 159
|
[] | true
| 1
| 6.96
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
expireAfterAccess
|
@J2ObjCIncompatible
@GwtIncompatible // Duration
@SuppressWarnings("GoodTime") // Duration decomposition
@IgnoreJRERequirement // No more dangerous than wherever the caller got the Duration from
@CanIgnoreReturnValue
public CacheBuilder<K, V> expireAfterAccess(Duration duration) {
return expireAfterAccess(toNanosSaturated(duration), NANOSECONDS);
}
|
Specifies that each entry should be automatically removed from the cache once a fixed duration
has elapsed after the entry's creation, the most recent replacement of its value, or its last
access. Access time is reset by all cache read and write operations (including {@code
Cache.asMap().get(Object)} and {@code Cache.asMap().put(K, V)}), but not by {@code
containsKey(Object)}, nor by operations on the collection-views of {@link Cache#asMap}}. So,
for example, iterating through {@code Cache.asMap().entrySet()} does not reset access time for
the entries you retrieve.
<p>When {@code duration} is zero, this method hands off to {@link #maximumSize(long)
maximumSize}{@code (0)}, ignoring any otherwise-specified maximum size or weight. This can be
useful in testing, or to disable caching temporarily without a code change.
<p>Expired entries may be counted in {@link Cache#size}, but will never be visible to read or
write operations. Expired entries are cleaned up as part of the routine maintenance described
in the class javadoc.
@param duration the length of time after an entry is last accessed that it should be
automatically removed
@return this {@code CacheBuilder} instance (for chaining)
@throws IllegalArgumentException if {@code duration} is negative
@throws IllegalStateException if {@link #expireAfterAccess} was already set
@throws ArithmeticException for durations greater than +/- approximately 292 years
@since 33.3.0 (but since 25.0 in the JRE <a
href="https://github.com/google/guava#guava-google-core-libraries-for-java">flavor</a>)
|
java
|
android/guava/src/com/google/common/cache/CacheBuilder.java
| 799
|
[
"duration"
] | true
| 1
| 6.4
|
google/guava
| 51,352
|
javadoc
| false
|
|
suppress
|
protected void suppress(Consumer<AnnotationSpec.Builder> annotationSpec) {
if (!this.warnings.isEmpty()) {
Builder annotation = AnnotationSpec.builder(SuppressWarnings.class)
.addMember("value", generateValueCode());
annotationSpec.accept(annotation);
}
}
|
Consume the builder for {@link SuppressWarnings} if necessary. If this
instance has no warnings registered, the consumer is not invoked.
@param annotationSpec a consumer of the {@link AnnotationSpec.Builder}
@see MethodSpec.Builder#addAnnotation(AnnotationSpec)
@see TypeSpec.Builder#addAnnotation(AnnotationSpec)
@see FieldSpec.Builder#addAnnotation(AnnotationSpec)
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/aot/CodeWarnings.java
| 126
|
[
"annotationSpec"
] |
void
| true
| 2
| 6.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
writeParentDirectoryEntries
|
private void writeParentDirectoryEntries(String name) throws IOException {
String parent = name.endsWith("/") ? name.substring(0, name.length() - 1) : name;
while (parent.lastIndexOf('/') != -1) {
parent = parent.substring(0, parent.lastIndexOf('/'));
if (!parent.isEmpty()) {
writeEntry(new JarArchiveEntry(parent + "/"), null, null);
}
}
}
|
Perform the actual write of a {@link JarEntry}. All other write methods delegate to
this one.
@param entry the entry to write
@param library the library for the entry or {@code null}
@param entryWriter the entry writer or {@code null} if there is no content
@throws IOException in case of I/O errors
|
java
|
loader/spring-boot-loader-tools/src/main/java/org/springframework/boot/loader/tools/AbstractJarWriter.java
| 278
|
[
"name"
] |
void
| true
| 4
| 6.72
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
shortArrayToLong
|
public static long shortArrayToLong(final short[] src, final int srcPos, final long dstInit, final int dstPos, final int nShorts) {
if (src.length == 0 && srcPos == 0 || 0 == nShorts) {
return dstInit;
}
if ((nShorts - 1) * Short.SIZE + dstPos >= Long.SIZE) {
throw new IllegalArgumentException("(nShorts - 1) * 16 + dstPos >= 64");
}
long out = dstInit;
for (int i = 0; i < nShorts; i++) {
final int shift = i * Short.SIZE + dstPos;
final long bits = (0xffffL & src[i + srcPos]) << shift;
final long mask = 0xffffL << shift;
out = out & ~mask | bits;
}
return out;
}
|
Converts an array of short into a long using the default (little-endian, LSB0) byte and bit ordering.
@param src the short array to convert.
@param srcPos the position in {@code src}, in short unit, from where to start the conversion.
@param dstInit initial value of the destination long.
@param dstPos the position of the LSB, in bits, in the result long.
@param nShorts the number of shorts to convert.
@return a long containing the selected bits.
@throws NullPointerException if {@code src} is {@code null}.
@throws IllegalArgumentException if {@code (nShorts - 1) * 16 + dstPos >= 64}.
@throws ArrayIndexOutOfBoundsException if {@code srcPos + nShorts > src.length}.
|
java
|
src/main/java/org/apache/commons/lang3/Conversion.java
| 1,256
|
[
"src",
"srcPos",
"dstInit",
"dstPos",
"nShorts"
] | true
| 6
| 8.08
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
instantiate
|
public @Nullable T instantiate(String name) {
return instantiate(null, name);
}
|
Instantiate the given set of class name, injecting constructor arguments as
necessary.
@param name the class name to instantiate
@return an instantiated instance
@since 3.4.0
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/util/Instantiator.java
| 137
|
[
"name"
] |
T
| true
| 1
| 6.48
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
snapshotElementsToList
|
private List<E> snapshotElementsToList() {
List<E> list = newArrayListWithExpectedSize(size());
for (Multiset.Entry<E> entry : entrySet()) {
E element = entry.getElement();
for (int i = entry.getCount(); i > 0; i--) {
list.add(element);
}
}
return list;
}
|
{@inheritDoc}
<p>If the data in the multiset is modified by any other threads during this method, it is
undefined which (if any) of these modifications will be reflected in the result.
|
java
|
android/guava/src/com/google/common/collect/ConcurrentHashMultiset.java
| 203
|
[] | true
| 2
| 6.4
|
google/guava
| 51,352
|
javadoc
| false
|
|
start_cdxgen_servers
|
def start_cdxgen_servers(application_root_path: Path, run_in_parallel: bool, parallelism: int) -> None:
"""
Start cdxgen servers that is used to perform cdxgen scans of applications in child process
:param run_in_parallel: run parallel servers
:param parallelism: parallelism to use
:param application_root_path: path where the application to scan is located
"""
run_command(
[
"docker",
"pull",
"ghcr.io/cyclonedx/cdxgen",
],
check=True,
)
if not run_in_parallel:
fork_cdxgen_server(application_root_path)
else:
for i in range(parallelism):
fork_cdxgen_server(application_root_path, port=8080 + i)
time.sleep(1)
get_console().print("[info]Waiting for cdxgen server(s) to start")
time.sleep(3)
if os.environ.get("CI", "false") == "true":
# In CI we wait longer for the server to start
get_console().print("[info]Waiting longer for cdxgen server(s) to start in CI")
time.sleep(5)
print("::endgroup::")
|
Start cdxgen servers that is used to perform cdxgen scans of applications in child process
:param run_in_parallel: run parallel servers
:param parallelism: parallelism to use
:param application_root_path: path where the application to scan is located
|
python
|
dev/breeze/src/airflow_breeze/utils/cdxgen.py
| 53
|
[
"application_root_path",
"run_in_parallel",
"parallelism"
] |
None
| true
| 5
| 6.08
|
apache/airflow
| 43,597
|
sphinx
| false
|
copy
|
def copy( # type: ignore[override]
self,
names=None,
deep: bool = False,
name=None,
) -> Self:
"""
Make a copy of this object. Names, dtype, levels and codes can be passed and \
will be set on new copy.
The `copy` method provides a mechanism to create a duplicate of an
existing MultiIndex object. This is particularly useful in scenarios where
modifications are required on an index, but the original MultiIndex should
remain unchanged. By specifying the `deep` parameter, users can control
whether the copy should be a deep or shallow copy, providing flexibility
depending on the size and complexity of the MultiIndex.
Parameters
----------
names : sequence, optional
Names to set on the new MultiIndex object.
deep : bool, default False
If False, the new object will be a shallow copy. If True, a deep copy
will be attempted. Deep copying can be potentially expensive for large
MultiIndex objects.
name : Label
Kept for compatibility with 1-dimensional Index. Should not be used.
Returns
-------
MultiIndex
A new MultiIndex object with the specified modifications.
See Also
--------
MultiIndex.from_arrays : Convert arrays to MultiIndex.
MultiIndex.from_tuples : Convert list of tuples to MultiIndex.
MultiIndex.from_frame : Convert DataFrame to MultiIndex.
Notes
-----
In most cases, there should be no functional difference from using
``deep``, but if ``deep`` is passed it will attempt to deepcopy.
This could be potentially expensive on large MultiIndex objects.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([["a"], ["b"], ["c"]])
>>> mi
MultiIndex([('a', 'b', 'c')],
)
>>> mi.copy()
MultiIndex([('a', 'b', 'c')],
)
"""
names = self._validate_names(name=name, names=names, deep=deep)
keep_id = not deep
levels, codes = None, None
if deep:
from copy import deepcopy
levels = deepcopy(self.levels)
codes = deepcopy(self.codes)
levels = levels if levels is not None else self.levels
codes = codes if codes is not None else self.codes
new_index = type(self)(
levels=levels,
codes=codes,
sortorder=self.sortorder,
names=names,
verify_integrity=False,
)
new_index._cache = self._cache.copy()
new_index._reset_cache("levels") # GH32669
if keep_id:
new_index._id = self._id
return new_index
|
Make a copy of this object. Names, dtype, levels and codes can be passed and \
will be set on new copy.
The `copy` method provides a mechanism to create a duplicate of an
existing MultiIndex object. This is particularly useful in scenarios where
modifications are required on an index, but the original MultiIndex should
remain unchanged. By specifying the `deep` parameter, users can control
whether the copy should be a deep or shallow copy, providing flexibility
depending on the size and complexity of the MultiIndex.
Parameters
----------
names : sequence, optional
Names to set on the new MultiIndex object.
deep : bool, default False
If False, the new object will be a shallow copy. If True, a deep copy
will be attempted. Deep copying can be potentially expensive for large
MultiIndex objects.
name : Label
Kept for compatibility with 1-dimensional Index. Should not be used.
Returns
-------
MultiIndex
A new MultiIndex object with the specified modifications.
See Also
--------
MultiIndex.from_arrays : Convert arrays to MultiIndex.
MultiIndex.from_tuples : Convert list of tuples to MultiIndex.
MultiIndex.from_frame : Convert DataFrame to MultiIndex.
Notes
-----
In most cases, there should be no functional difference from using
``deep``, but if ``deep`` is passed it will attempt to deepcopy.
This could be potentially expensive on large MultiIndex objects.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([["a"], ["b"], ["c"]])
>>> mi
MultiIndex([('a', 'b', 'c')],
)
>>> mi.copy()
MultiIndex([('a', 'b', 'c')],
)
|
python
|
pandas/core/indexes/multi.py
| 1,314
|
[
"self",
"names",
"deep",
"name"
] |
Self
| true
| 5
| 8.24
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
registerBeanDefinitions
|
public int registerBeanDefinitions(Map<?, ?> map, @Nullable String prefix, String resourceDescription)
throws BeansException {
if (prefix == null) {
prefix = "";
}
int beanCount = 0;
for (Object key : map.keySet()) {
if (!(key instanceof String keyString)) {
throw new IllegalArgumentException("Illegal key [" + key + "]: only Strings allowed");
}
if (keyString.startsWith(prefix)) {
// Key is of form: prefix<name>.property
String nameAndProperty = keyString.substring(prefix.length());
// Find dot before property name, ignoring dots in property keys.
int sepIdx ;
int propKeyIdx = nameAndProperty.indexOf(PropertyAccessor.PROPERTY_KEY_PREFIX);
if (propKeyIdx != -1) {
sepIdx = nameAndProperty.lastIndexOf(SEPARATOR, propKeyIdx);
}
else {
sepIdx = nameAndProperty.lastIndexOf(SEPARATOR);
}
if (sepIdx != -1) {
String beanName = nameAndProperty.substring(0, sepIdx);
if (logger.isTraceEnabled()) {
logger.trace("Found bean name '" + beanName + "'");
}
if (!getRegistry().containsBeanDefinition(beanName)) {
// If we haven't already registered it...
registerBeanDefinition(beanName, map, prefix + beanName, resourceDescription);
++beanCount;
}
}
else {
// Ignore it: It wasn't a valid bean name and property,
// although it did start with the required prefix.
if (logger.isDebugEnabled()) {
logger.debug("Invalid bean name and property [" + nameAndProperty + "]");
}
}
}
}
return beanCount;
}
|
Register bean definitions contained in a Map.
Ignore ineligible properties.
@param map a map of {@code name} to {@code property} (String or Object). Property
values will be strings if coming from a Properties file etc. Property names
(keys) <b>must</b> be Strings. Class keys must be Strings.
@param prefix a filter within the keys in the map: for example, 'beans.'
(can be empty or {@code null})
@param resourceDescription description of the resource that the
Map came from (for logging purposes)
@return the number of bean definitions found
@throws BeansException in case of loading or parsing errors
@see #registerBeanDefinitions(Map, String)
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/PropertiesBeanDefinitionReader.java
| 356
|
[
"map",
"prefix",
"resourceDescription"
] | true
| 9
| 8.08
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
|
isCycloneDxBom
|
private boolean isCycloneDxBom(JarEntry entry) {
if (!entry.getName().startsWith("META-INF/sbom/")) {
return false;
}
return entry.getName().endsWith(".cdx.json") || entry.getName().endsWith("/bom.json");
}
|
Return the {@link File} to use to back up the original source.
@return the file to use to back up the original source
|
java
|
loader/spring-boot-loader-tools/src/main/java/org/springframework/boot/loader/tools/Packager.java
| 424
|
[
"entry"
] | true
| 3
| 6.88
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
resolveSchemaEntity
|
protected @Nullable InputSource resolveSchemaEntity(@Nullable String publicId, String systemId) {
InputSource source;
// External dtd/xsd lookup via https even for canonical http declaration
String url = systemId;
if (url.startsWith("http:")) {
url = "https:" + url.substring(5);
}
if (logger.isWarnEnabled()) {
logger.warn("DTD/XSD XML entity [" + systemId + "] not found, falling back to remote https resolution");
}
try {
source = new InputSource(ResourceUtils.toURL(url).openStream());
source.setPublicId(publicId);
source.setSystemId(systemId);
}
catch (IOException ex) {
if (logger.isDebugEnabled()) {
logger.debug("Could not resolve XML entity [" + systemId + "] through URL [" + url + "]", ex);
}
// Fall back to the parser's default behavior.
source = null;
}
return source;
}
|
A fallback method for {@link #resolveEntity(String, String)} that is used when a
"schema" entity (DTD or XSD) cannot be resolved as a local resource. The default
behavior is to perform remote resolution over HTTPS.
<p>Subclasses can override this method to change the default behavior.
<ul>
<li>Return {@code null} to fall back to the parser's
{@linkplain org.xml.sax.EntityResolver#resolveEntity(String, String) default behavior}.</li>
<li>Throw an exception to prevent remote resolution of the DTD or XSD.</li>
</ul>
@param publicId the public identifier of the external entity being referenced,
or null if none was supplied
@param systemId the system identifier of the external entity being referenced,
representing the URL of the DTD or XSD
@return an InputSource object describing the new input source, or null to request
that the parser open a regular URI connection to the system identifier
@since 6.0.4
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/xml/ResourceEntityResolver.java
| 137
|
[
"publicId",
"systemId"
] |
InputSource
| true
| 5
| 7.92
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
group_lines_by_file
|
def group_lines_by_file(lines: list[str]) -> dict[str, list[str]]:
"""
Group matching lines by filename.
Args:
lines: List of grep output lines in format "filename:line:content"
Returns:
Dictionary mapping filename to list of line remainders (without filename prefix)
"""
grouped: dict[str, list[str]] = {}
for line in lines:
if not line:
continue
# Extract filename and remainder from "filename:line:content" format
parts = line.split(":", 1)
filename = parts[0]
remainder = parts[1] if len(parts) > 1 else ""
if filename not in grouped:
grouped[filename] = []
grouped[filename].append(remainder)
return grouped
|
Group matching lines by filename.
Args:
lines: List of grep output lines in format "filename:line:content"
Returns:
Dictionary mapping filename to list of line remainders (without filename prefix)
|
python
|
tools/linter/adapters/grep_linter.py
| 91
|
[
"lines"
] |
dict[str, list[str]]
| true
| 5
| 7.6
|
pytorch/pytorch
| 96,034
|
google
| false
|
doClose
|
protected void doClose() {
// Check whether an actual close attempt is necessary...
if (this.active.get() && this.closed.compareAndSet(false, true)) {
if (logger.isDebugEnabled()) {
logger.debug("Closing " + this);
}
try {
// Publish shutdown event.
publishEvent(new ContextClosedEvent(this));
}
catch (Throwable ex) {
logger.warn("Exception thrown from ApplicationListener handling ContextClosedEvent", ex);
}
// Stop all Lifecycle beans, to avoid delays during individual destruction.
if (this.lifecycleProcessor != null) {
try {
this.lifecycleProcessor.onClose();
}
catch (Throwable ex) {
logger.warn("Exception thrown from LifecycleProcessor on context close", ex);
}
}
// Destroy all cached singletons in the context's BeanFactory.
destroyBeans();
// Close the state of this context itself.
closeBeanFactory();
// Let subclasses do some final clean-up if they wish...
onClose();
// Reset common introspection caches to avoid class reference leaks.
resetCommonCaches();
// Reset local application listeners to pre-refresh state.
if (this.earlyApplicationListeners != null) {
this.applicationListeners.clear();
this.applicationListeners.addAll(this.earlyApplicationListeners);
}
// Reset internal delegates.
this.applicationEventMulticaster = null;
this.messageSource = null;
this.lifecycleProcessor = null;
// Switch to inactive.
this.active.set(false);
}
}
|
Actually performs context closing: publishes a ContextClosedEvent and
destroys the singletons in the bean factory of this application context.
<p>Called by both {@code close()} and a JVM shutdown hook, if any.
@see org.springframework.context.event.ContextClosedEvent
@see #destroyBeans()
@see #close()
@see #registerShutdownHook()
|
java
|
spring-context/src/main/java/org/springframework/context/support/AbstractApplicationContext.java
| 1,160
|
[] |
void
| true
| 8
| 6.4
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
maybeExecuteForceMerge
|
private Set<Index> maybeExecuteForceMerge(ProjectMetadata project, List<Index> indices) {
Set<Index> affectedIndices = new HashSet<>();
for (Index index : indices) {
IndexMetadata backingIndex = project.index(index);
assert backingIndex != null : "the data stream backing indices must exist";
String indexName = index.getName();
boolean alreadyForceMerged = isForceMergeComplete(backingIndex);
if (alreadyForceMerged) {
logger.trace("Already force merged {}", indexName);
continue;
}
ByteSizeValue configuredFloorSegmentMerge = MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING.get(
backingIndex.getSettings()
);
Integer configuredMergeFactor = MergePolicyConfig.INDEX_MERGE_POLICY_MERGE_FACTOR_SETTING.get(backingIndex.getSettings());
if ((configuredFloorSegmentMerge == null || configuredFloorSegmentMerge.equals(targetMergePolicyFloorSegment) == false)
|| (configuredMergeFactor == null || configuredMergeFactor.equals(targetMergePolicyFactor) == false)) {
UpdateSettingsRequest updateMergePolicySettingsRequest = new UpdateSettingsRequest();
updateMergePolicySettingsRequest.indices(indexName);
updateMergePolicySettingsRequest.settings(
Settings.builder()
.put(MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING.getKey(), targetMergePolicyFloorSegment)
.put(MergePolicyConfig.INDEX_MERGE_POLICY_MERGE_FACTOR_SETTING.getKey(), targetMergePolicyFactor)
);
updateMergePolicySettingsRequest.masterNodeTimeout(TimeValue.MAX_VALUE);
affectedIndices.add(index);
transportActionsDeduplicator.executeOnce(
Tuple.tuple(project.id(), updateMergePolicySettingsRequest),
new ErrorRecordingActionListener(
TransportUpdateSettingsAction.TYPE.name(),
project.id(),
indexName,
errorStore,
Strings.format(
"Data stream lifecycle encountered an error trying to to update settings [%s] for index [%s]",
updateMergePolicySettingsRequest.settings().keySet(),
indexName
),
signallingErrorRetryInterval
),
(req, reqListener) -> updateIndexSetting(project.id(), updateMergePolicySettingsRequest, reqListener)
);
} else {
affectedIndices.add(index);
ForceMergeRequest forceMergeRequest = new ForceMergeRequest(indexName);
// time to force merge the index
transportActionsDeduplicator.executeOnce(
Tuple.tuple(project.id(), new ForceMergeRequestWrapper(forceMergeRequest)),
new ErrorRecordingActionListener(
ForceMergeAction.NAME,
project.id(),
indexName,
errorStore,
Strings.format(
"Data stream lifecycle encountered an error trying to force merge index [%s]. Data stream lifecycle will "
+ "attempt to force merge the index on its next run.",
indexName
),
signallingErrorRetryInterval
),
(req, reqListener) -> forceMergeIndex(project.id(), forceMergeRequest, reqListener)
);
}
}
return affectedIndices;
}
|
This method sends requests to delete any indices in the datastream that exceed its retention policy. It returns the set of indices
it has sent delete requests for.
@param project The project metadata from which to get index metadata
@param dataStream The data stream
@param indicesToExcludeForRemainingRun Indices to exclude from retention even if it would be time for them to be deleted
@return The set of indices that delete requests have been sent for
|
java
|
modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java
| 1,013
|
[
"project",
"indices"
] | true
| 6
| 7.84
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
|
load
|
public Class<?> load() {
try {
return loader.loadClass(className);
} catch (ClassNotFoundException e) {
// Shouldn't happen, since the class name is read from the class path.
throw new IllegalStateException(e);
}
}
|
Loads (but doesn't link or initialize) the class.
@throws LinkageError when there were errors in loading classes that this class depends on.
For example, {@link NoClassDefFoundError}.
|
java
|
android/guava/src/com/google/common/reflect/ClassPath.java
| 375
|
[] | true
| 2
| 6.56
|
google/guava
| 51,352
|
javadoc
| false
|
|
findInCaches
|
private @Nullable Object findInCaches(CacheOperationContext context, Object key,
CacheOperationInvoker invoker, Method method, CacheOperationContexts contexts) {
for (Cache cache : context.getCaches()) {
if (CompletableFuture.class.isAssignableFrom(context.getMethod().getReturnType())) {
CompletableFuture<?> result = doRetrieve(cache, key);
if (result != null) {
return result.exceptionallyCompose(ex -> {
if (!(ex instanceof RuntimeException rex)) {
return CompletableFuture.failedFuture(ex);
}
try {
getErrorHandler().handleCacheGetError(rex, cache, key);
return CompletableFuture.completedFuture(null);
}
catch (Throwable ex2) {
return CompletableFuture.failedFuture(ex2);
}
}).thenCompose(value -> (CompletableFuture<?>) evaluate(
(value != null ? CompletableFuture.completedFuture(unwrapCacheValue(value)) : null),
invoker, method, contexts));
}
else {
continue;
}
}
if (this.reactiveCachingHandler != null) {
Object returnValue = this.reactiveCachingHandler.findInCaches(
context, cache, key, invoker, method, contexts);
if (returnValue != ReactiveCachingHandler.NOT_HANDLED) {
return returnValue;
}
}
Cache.ValueWrapper result = doGet(cache, key);
if (result != null) {
return result;
}
}
return null;
}
|
Find a cached value only for {@link CacheableOperation} that passes the condition.
@param contexts the cacheable operations
@return a {@link Cache.ValueWrapper} holding the cached value,
or {@code null} if none is found
|
java
|
spring-context/src/main/java/org/springframework/cache/interceptor/CacheAspectSupport.java
| 530
|
[
"context",
"key",
"invoker",
"method",
"contexts"
] |
Object
| true
| 9
| 7.92
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
_handle_fab_downgrade
|
def _handle_fab_downgrade(*, session: Session) -> None:
"""
Handle FAB downgrade requirements for downgrades to Airflow versions < 3.0.0.
First, checks if the FAB db version matches the known version from 1.4.0.
If it matches, no FAB db tables need to be touched.
Otherwise, imports the FABDBManager and calls its downgrade method.
:param session: sqlalchemy session for connection to airflow metadata database
:raises RuntimeError: If FAB provider is required but cannot be imported
"""
fab_version = _get_fab_migration_version(session=session)
if fab_version == "6709f7a774b9": # 1.4.0
# FAB version matches - we can proceed without touching the FAB db tables
log.info(
"FAB migration version %s matches known version from 1.4.0. "
"FAB provider is not required for downgrade.",
fab_version,
)
return
connection = settings.get_engine().connect()
insp = inspect(connection)
if not fab_version and insp.has_table("ab_user"):
log.info(
"FAB migration version not found, but FAB tables exist. "
"FAB provider is not required for downgrade.",
)
return
# FAB db version is different or not found - require the FAB provider
try:
from airflow.providers.fab.auth_manager.models.db import FABDBManager
except ImportError:
raise RuntimeError(
"Import error occurred while importing FABDBManager. The apache-airflow-provider-fab package must be installed before we can "
"downgrade to <3.0.0."
)
dbm = FABDBManager(session)
if hasattr(dbm, "reset_to_2_x"):
dbm.reset_to_2_x()
else:
# Older version before we added that function, it only has a single migration so we can just create the tables
# to ensure they are there
dbm.create_db_from_orm()
|
Handle FAB downgrade requirements for downgrades to Airflow versions < 3.0.0.
First, checks if the FAB db version matches the known version from 1.4.0.
If it matches, no FAB db tables need to be touched.
Otherwise, imports the FABDBManager and calls its downgrade method.
:param session: sqlalchemy session for connection to airflow metadata database
:raises RuntimeError: If FAB provider is required but cannot be imported
|
python
|
airflow-core/src/airflow/utils/db.py
| 1,239
|
[
"session"
] |
None
| true
| 6
| 6.88
|
apache/airflow
| 43,597
|
sphinx
| false
|
padStart
|
public static String padStart(String string, int minLength, char padChar) {
checkNotNull(string); // eager for GWT.
if (string.length() >= minLength) {
return string;
}
StringBuilder sb = new StringBuilder(minLength);
for (int i = string.length(); i < minLength; i++) {
sb.append(padChar);
}
sb.append(string);
return sb.toString();
}
|
Returns a string, of length at least {@code minLength}, consisting of {@code string} prepended
with as many copies of {@code padChar} as are necessary to reach that length. For example,
<ul>
<li>{@code padStart("7", 3, '0')} returns {@code "007"}
<li>{@code padStart("2010", 3, '0')} returns {@code "2010"}
</ul>
<p>See {@link java.util.Formatter} for a richer set of formatting capabilities.
@param string the string which should appear at the end of the result
@param minLength the minimum length the resulting string must have. Can be zero or negative, in
which case the input string is always returned.
@param padChar the character to insert at the beginning of the result until the minimum length
is reached
@return the padded string
|
java
|
android/guava/src/com/google/common/base/Strings.java
| 90
|
[
"string",
"minLength",
"padChar"
] |
String
| true
| 3
| 8.08
|
google/guava
| 51,352
|
javadoc
| false
|
toSource
|
function toSource(func) {
if (func != null) {
try {
return funcToString.call(func);
} catch (e) {}
try {
return (func + '');
} catch (e) {}
}
return '';
}
|
Converts `func` to its source code.
@private
@param {Function} func The function to convert.
@returns {string} Returns the source code.
|
javascript
|
lodash.js
| 6,871
|
[
"func"
] | false
| 4
| 6.24
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
elementSet
|
Set<E> elementSet();
|
Returns the set of distinct elements contained in this multiset. The element set is backed by
the same data as the multiset, so any change to either is immediately reflected in the other.
The order of the elements in the element set is unspecified.
<p>If the element set supports any removal operations, these necessarily cause <b>all</b>
occurrences of the removed element(s) to be removed from the multiset. Implementations are not
expected to support the add operations, although this is possible.
<p>A common use for the element set is to find the number of distinct elements in the multiset:
{@code elementSet().size()}.
@return a view of the set of distinct elements in this multiset
|
java
|
android/guava/src/com/google/common/collect/Multiset.java
| 245
|
[] | true
| 1
| 6.48
|
google/guava
| 51,352
|
javadoc
| false
|
|
resolveWiringInfo
|
@Nullable BeanWiringInfo resolveWiringInfo(Object beanInstance);
|
Resolve the BeanWiringInfo for the given bean instance.
@param beanInstance the bean instance to resolve info for
@return the BeanWiringInfo, or {@code null} if not found
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/wiring/BeanWiringInfoResolver.java
| 44
|
[
"beanInstance"
] |
BeanWiringInfo
| true
| 1
| 6.48
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
execute
|
def execute(self, context: Context) -> str:
"""
Execute S3 to DynamoDB Job from Airflow.
:param context: The current context of the task instance
:return: The Amazon resource number (ARN)
"""
if self.use_existing_table:
self.log.info("Loading from S3 into new DynamoDB table %s", self.dynamodb_table_name)
return self._load_into_existing_table()
self.log.info("Loading from S3 into existing DynamoDB table %s", self.dynamodb_table_name)
return self._load_into_new_table(
table_name=self.dynamodb_table_name, delete_on_error=self.delete_on_error
)
|
Execute S3 to DynamoDB Job from Airflow.
:param context: The current context of the task instance
:return: The Amazon resource number (ARN)
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/transfers/s3_to_dynamodb.py
| 245
|
[
"self",
"context"
] |
str
| true
| 2
| 7.92
|
apache/airflow
| 43,597
|
sphinx
| false
|
parse
|
@Nullable BeanDefinition parse(Element element, ParserContext parserContext);
|
Parse the specified {@link Element} and register the resulting
{@link BeanDefinition BeanDefinition(s)} with the
{@link org.springframework.beans.factory.xml.ParserContext#getRegistry() BeanDefinitionRegistry}
embedded in the supplied {@link ParserContext}.
<p>Implementations must return the primary {@link BeanDefinition} that results
from the parse if they will ever be used in a nested fashion (for example as
an inner tag in a {@code <property/>} tag). Implementations may return
{@code null} if they will <strong>not</strong> be used in a nested fashion.
@param element the element that is to be parsed into one or more {@link BeanDefinition BeanDefinitions}
@param parserContext the object encapsulating the current state of the parsing process;
provides access to a {@link org.springframework.beans.factory.support.BeanDefinitionRegistry}
@return the primary {@link BeanDefinition}
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/xml/BeanDefinitionParser.java
| 55
|
[
"element",
"parserContext"
] |
BeanDefinition
| true
| 1
| 6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
getGlobalConstructor
|
function getGlobalConstructor(name: string, minLanguageVersion: ScriptTarget): SerializedTypeNode {
return languageVersion < minLanguageVersion ?
getGlobalConstructorWithFallback(name) :
factory.createIdentifier(name);
}
|
Serializes an qualified name as an expression for decorator type metadata.
@param node The qualified name to serialize.
|
typescript
|
src/compiler/transformers/typeSerializer.ts
| 628
|
[
"name",
"minLanguageVersion"
] | true
| 2
| 6.72
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
toLongStream
|
public LongStream toLongStream() {
return LongStream.rangeClosed(getMinimum(), getMaximum());
}
|
Returns a sequential ordered {@code LongStream} from {@link #getMinimum()} (inclusive) to {@link #getMaximum()} (inclusive) by an incremental step of
{@code 1}.
@return a sequential {@code LongStream} for the range of {@code long} elements
@since 3.18.0
|
java
|
src/main/java/org/apache/commons/lang3/LongRange.java
| 118
|
[] |
LongStream
| true
| 1
| 6.32
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
asof_locs
|
def asof_locs(self, where: Index, mask: npt.NDArray[np.bool_]) -> np.ndarray:
"""
where : array of timestamps
mask : np.ndarray[bool]
Array of booleans where data is not NA.
"""
if isinstance(where, DatetimeIndex):
where = PeriodIndex(where._values, freq=self.freq)
elif not isinstance(where, PeriodIndex):
raise TypeError("asof_locs `where` must be DatetimeIndex or PeriodIndex")
return super().asof_locs(where, mask)
|
where : array of timestamps
mask : np.ndarray[bool]
Array of booleans where data is not NA.
|
python
|
pandas/core/indexes/period.py
| 406
|
[
"self",
"where",
"mask"
] |
np.ndarray
| true
| 3
| 6.72
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
poll
|
@CanIgnoreReturnValue
@Override
public @Nullable E poll() {
return isEmpty() ? null : removeAndGet(0);
}
|
Adds the given element to this queue. If this queue has a maximum size, after adding {@code
element} the queue will automatically evict its greatest element (according to its comparator),
which may be {@code element} itself.
|
java
|
android/guava/src/com/google/common/collect/MinMaxPriorityQueue.java
| 302
|
[] |
E
| true
| 2
| 6.88
|
google/guava
| 51,352
|
javadoc
| false
|
define_kernel
|
def define_kernel(self, src_code_str: str, node_schedule) -> str:
"""Produce the kernel string
Args:
src_code_str: The finalized kernel code string
node_schedule: List of nodes in the schedule
Note:
This is a little weird since async_compile.cutedsl() has to write the string to
a file in order to cute compile it. Feels bad to have two...
"""
wrapper = V.graph.wrapper_code
# Use the string as the key for caching
if src_code_str in wrapper.src_to_kernel:
kernel_name = wrapper.src_to_kernel[src_code_str]
else:
fused_name = (
get_fused_kernel_name(node_schedule, config.triton.descriptive_names)
if config.triton.descriptive_names
else ""
)
kernel_hash = hashlib.sha256(src_code_str.encode("utf-8")).hexdigest()[:8]
if fused_name == "fused":
kernel_name = f"cutedsl_{kernel_hash}"
else:
kernel_name = f"cutedsl_{fused_name}_{kernel_hash}"
wrapper.src_to_kernel[src_code_str] = kernel_name
src_code_str = src_code_str.replace(
str(Placeholder.KERNEL_NAME), kernel_name
)
_, _, kernel_path = get_path(code_hash(src_code_str), "py")
compile_wrapper = IndentedBuffer()
compile_wrapper.writeline(f"async_compile.cutedsl({kernel_name!r}, r'''")
compile_wrapper.splice(src_code_str, strip=True)
compile_wrapper.writeline("''')")
metadata_comment = f"# kernel path: {kernel_path}"
origins, detailed_origins = get_kernel_metadata(node_schedule, wrapper)
metadata_comment += "\n" + origins + "\n" + detailed_origins
wrapper.define_kernel(
kernel_name, compile_wrapper.getvalue(), metadata_comment
)
return kernel_name
|
Produce the kernel string
Args:
src_code_str: The finalized kernel code string
node_schedule: List of nodes in the schedule
Note:
This is a little weird since async_compile.cutedsl() has to write the string to
a file in order to cute compile it. Feels bad to have two...
|
python
|
torch/_inductor/codegen/cutedsl/cutedsl_scheduling.py
| 59
|
[
"self",
"src_code_str",
"node_schedule"
] |
str
| true
| 6
| 7.04
|
pytorch/pytorch
| 96,034
|
google
| false
|
releaseActiveTasks
|
private CompletableFuture<Void> releaseActiveTasks() {
if (memberEpoch > 0) {
return revokeActiveTasks(toTaskIdSet(currentAssignment.activeTasks));
} else {
return releaseLostActiveTasks();
}
}
|
Leaves the group.
<p>
This method does the following:
<ol>
<li>Transitions member state to {@link MemberState#PREPARE_LEAVING}.</li>
<li>Requests the invocation of the revocation callback or lost callback.</li>
<li>Once the callback completes, it clears the current and target assignment, unsubscribes from
all topics and transitions the member state to {@link MemberState#LEAVING}.</li>
</ol>
States {@link MemberState#PREPARE_LEAVING} and {@link MemberState#LEAVING} cause the heartbeat request manager
to send a leave group heartbeat.
</p>
@return future that will complete when the revocation callback execution completes and the heartbeat
to leave the group has been sent out.
|
java
|
clients/src/main/java/org/apache/kafka/clients/consumer/internals/StreamsMembershipManager.java
| 950
|
[] | true
| 2
| 7.6
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
processPendingNotifications
|
void processPendingNotifications() {
RemovalNotification<K, V> notification;
while ((notification = removalNotificationQueue.poll()) != null) {
try {
removalListener.onRemoval(notification);
} catch (Throwable e) {
logger.log(Level.WARNING, "Exception thrown by removal listener", e);
}
}
}
|
Notifies listeners that an entry has been automatically removed due to expiration, eviction, or
eligibility for garbage collection. This should be called every time expireEntries or
evictEntry is called (once the lock is released).
|
java
|
android/guava/src/com/google/common/cache/LocalCache.java
| 1,833
|
[] |
void
| true
| 3
| 6.4
|
google/guava
| 51,352
|
javadoc
| false
|
read
|
@Override
public long read(ByteBuffer[] dsts) throws IOException {
return socketChannel.read(dsts);
}
|
Reads a sequence of bytes from this channel into the given buffers.
@param dsts - The buffers into which bytes are to be transferred.
@return The number of bytes read, possibly zero, or -1 if the channel has reached end-of-stream.
@throws IOException if some other I/O error occurs
|
java
|
clients/src/main/java/org/apache/kafka/common/network/PlaintextTransportLayer.java
| 112
|
[
"dsts"
] | true
| 1
| 6.96
|
apache/kafka
| 31,560
|
javadoc
| false
|
|
resolveContextualObject
|
default @Nullable Object resolveContextualObject(String key) {
return null;
}
|
Resolve the contextual object for the given key, if any.
For example, the HttpServletRequest object for key "request".
<p>Since 7.0, this interface method returns {@code null} by default.
@param key the contextual key
@return the corresponding object, or {@code null} if none found
@throws IllegalStateException if the underlying scope is not currently active
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/config/Scope.java
| 134
|
[
"key"
] |
Object
| true
| 1
| 6.64
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
dtype_to_arrow_c_fmt
|
def dtype_to_arrow_c_fmt(dtype: DtypeObj) -> str:
"""
Represent pandas `dtype` as a format string in Apache Arrow C notation.
Parameters
----------
dtype : np.dtype
Datatype of pandas DataFrame to represent.
Returns
-------
str
Format string in Apache Arrow C notation of the given `dtype`.
"""
if isinstance(dtype, CategoricalDtype):
return ArrowCTypes.INT64
elif dtype == np.dtype("O"):
return ArrowCTypes.STRING
elif isinstance(dtype, ArrowDtype):
import pyarrow as pa
pa_type = dtype.pyarrow_dtype
if pa.types.is_decimal(pa_type):
return f"d:{pa_type.precision},{pa_type.scale}"
elif pa.types.is_timestamp(pa_type) and pa_type.tz is not None:
return f"ts{pa_type.unit[0]}:{pa_type.tz}"
format_str = PYARROW_CTYPES.get(str(pa_type), None)
if format_str is not None:
return format_str
format_str = getattr(ArrowCTypes, dtype.name.upper(), None)
if format_str is not None:
return format_str
if isinstance(dtype, pd.StringDtype):
# TODO(infer_string) this should be LARGE_STRING for pyarrow storage,
# but current tests don't cover this distinction
return ArrowCTypes.STRING
elif lib.is_np_dtype(dtype, "M"):
# Selecting the first char of resolution string:
# dtype.str -> '<M8[ns]' -> 'n'
resolution = np.datetime_data(dtype)[0][0]
return ArrowCTypes.TIMESTAMP.format(resolution=resolution, tz="")
elif isinstance(dtype, DatetimeTZDtype):
return ArrowCTypes.TIMESTAMP.format(resolution=dtype.unit[0], tz=dtype.tz)
elif isinstance(dtype, pd.BooleanDtype):
return ArrowCTypes.BOOL
raise NotImplementedError(
f"Conversion of {dtype} to Arrow C format string is not implemented."
)
|
Represent pandas `dtype` as a format string in Apache Arrow C notation.
Parameters
----------
dtype : np.dtype
Datatype of pandas DataFrame to represent.
Returns
-------
str
Format string in Apache Arrow C notation of the given `dtype`.
|
python
|
pandas/core/interchange/utils.py
| 104
|
[
"dtype"
] |
str
| true
| 13
| 6.88
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
compareTo
|
@Override
public int compareTo(final Pair<L, R> other) {
// @formatter:off
return new CompareToBuilder()
.append(getLeft(), other.getLeft())
.append(getRight(), other.getRight())
.toComparison();
// @formatter:on
}
|
Compares the pair based on the left element followed by the right element.
The types must be {@link Comparable}.
@param other the other pair, not null.
@return negative if this is less, zero if equal, positive if greater.
|
java
|
src/main/java/org/apache/commons/lang3/tuple/Pair.java
| 151
|
[
"other"
] | true
| 1
| 7.2
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
wait_for_task_status
|
def wait_for_task_status(self, replication_task_arn: str, status: DmsTaskWaiterStatus):
"""
Wait for replication task to reach status; supported statuses: deleted, ready, running, stopped.
:param status: Status to wait for
:param replication_task_arn: Replication task ARN
"""
if not isinstance(status, DmsTaskWaiterStatus):
raise TypeError("Status must be an instance of DmsTaskWaiterStatus")
dms_client = self.get_conn()
waiter = dms_client.get_waiter(f"replication_task_{status.value}")
waiter.wait(
Filters=[
{
"Name": "replication-task-arn",
"Values": [
replication_task_arn,
],
},
],
WithoutSettings=True,
)
|
Wait for replication task to reach status; supported statuses: deleted, ready, running, stopped.
:param status: Status to wait for
:param replication_task_arn: Replication task ARN
|
python
|
providers/amazon/src/airflow/providers/amazon/aws/hooks/dms.py
| 203
|
[
"self",
"replication_task_arn",
"status"
] | true
| 2
| 6.4
|
apache/airflow
| 43,597
|
sphinx
| false
|
|
incrementalAlterConfigs
|
default AlterConfigsResult incrementalAlterConfigs(Map<ConfigResource, Collection<AlterConfigOp>> configs) {
return incrementalAlterConfigs(configs, new AlterConfigsOptions());
}
|
Incrementally updates the configuration for the specified resources with default options.
<p>
This is a convenience method for {@link #incrementalAlterConfigs(Map, AlterConfigsOptions)} with default options.
See the overload for more details.
<p>
This operation is supported by brokers with version 2.3.0 or higher.
@param configs The resources with their configs
@return The AlterConfigsResult
|
java
|
clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
| 501
|
[
"configs"
] |
AlterConfigsResult
| true
| 1
| 6.16
|
apache/kafka
| 31,560
|
javadoc
| false
|
toString
|
@Override
public String toString() {
if (this.resource instanceof FileSystemResource || this.resource instanceof FileUrlResource) {
try {
return "file [" + this.resource.getFile() + "]";
}
catch (IOException ex) {
// Ignore
}
}
return this.resource.toString();
}
|
Return the profile or {@code null} if the resource is not profile specific.
@return the profile or {@code null}
@since 2.4.6
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/config/StandardConfigDataResource.java
| 120
|
[] |
String
| true
| 4
| 8.4
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
buildLazyResourceProxy
|
protected Object buildLazyResourceProxy(LookupElement element, @Nullable String requestingBeanName) {
TargetSource ts = new TargetSource() {
@Override
public Class<?> getTargetClass() {
return element.lookupType;
}
@Override
public Object getTarget() {
return getResource(element, requestingBeanName);
}
};
ProxyFactory pf = new ProxyFactory();
pf.setTargetSource(ts);
if (element.lookupType.isInterface()) {
pf.addInterface(element.lookupType);
}
ClassLoader classLoader = (this.beanFactory instanceof ConfigurableBeanFactory configurableBeanFactory ?
configurableBeanFactory.getBeanClassLoader() : null);
return pf.getProxy(classLoader);
}
|
Obtain a lazily resolving resource proxy for the given name and type,
delegating to {@link #getResource} on demand once a method call comes in.
@param element the descriptor for the annotated field/method
@param requestingBeanName the name of the requesting bean
@return the resource object (never {@code null})
@since 4.2
@see #getResource
@see Lazy
|
java
|
spring-context/src/main/java/org/springframework/context/annotation/CommonAnnotationBeanPostProcessor.java
| 475
|
[
"element",
"requestingBeanName"
] |
Object
| true
| 3
| 7.76
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
get
|
public static SslBundle get(PemSslBundleProperties properties, ResourceLoader resourceLoader) {
PemSslStore keyStore = getPemSslStore("keystore", properties.getKeystore(), resourceLoader);
if (keyStore != null) {
keyStore = keyStore.withAlias(properties.getKey().getAlias())
.withPassword(properties.getKey().getPassword());
}
PemSslStore trustStore = getPemSslStore("truststore", properties.getTruststore(), resourceLoader);
SslStoreBundle storeBundle = new PemSslStoreBundle(keyStore, trustStore);
return new PropertiesSslBundle(storeBundle, properties);
}
|
Get an {@link SslBundle} for the given {@link PemSslBundleProperties}.
@param properties the source properties
@param resourceLoader the resource loader used to load content
@return an {@link SslBundle} instance
@since 3.3.5
|
java
|
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/ssl/PropertiesSslBundle.java
| 118
|
[
"properties",
"resourceLoader"
] |
SslBundle
| true
| 2
| 7.76
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
postReadCleanup
|
void postReadCleanup() {
if ((readCount.incrementAndGet() & DRAIN_THRESHOLD) == 0) {
runCleanup();
}
}
|
Performs routine cleanup following a read. Normally cleanup happens during writes, or from
the cleanupExecutor. If cleanup is not observed after a sufficient number of reads, try
cleaning up from the read thread.
|
java
|
android/guava/src/com/google/common/collect/MapMakerInternalMap.java
| 1,988
|
[] |
void
| true
| 2
| 6.72
|
google/guava
| 51,352
|
javadoc
| false
|
convertValue
|
protected Object convertValue(Object value) {
return value;
}
|
Hook to convert each encountered Map value.
The default implementation simply returns the passed-in value as-is.
<p>Can be overridden to perform conversion of certain values,
for example from String to Integer.
<p>Only called if actually creating a new Map!
This is by default not the case if the type of the passed-in Map
already matches. Override {@link #alwaysCreateNewMap()} to
enforce creating a new Map in every case.
@param value the source value
@return the value to be used in the target Map
@see #alwaysCreateNewMap
|
java
|
spring-beans/src/main/java/org/springframework/beans/propertyeditors/CustomMapEditor.java
| 190
|
[
"value"
] |
Object
| true
| 1
| 6.64
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
desugarUserArgs
|
function desugarUserArgs(args: UserArgs = {}) {
const { select, ..._args } = args // exclude select
if (typeof select === 'object') {
return desugarUserArgsAggregate({ ..._args, _count: select })
} else {
return desugarUserArgsAggregate({ ..._args, _count: { _all: true } })
}
}
|
Transforms the `userArgs` for the `.count` shorthand. It is an API sugar. It
reuses the logic from the `.aggregate` shorthand to add additional handling.
The goal here is to desugar it into something that is understood by the QE.
@param args to transform
@returns
|
typescript
|
packages/client/src/runtime/core/model/aggregates/count.ts
| 12
|
[
"args"
] | false
| 3
| 7.28
|
prisma/prisma
| 44,834
|
jsdoc
| false
|
|
can_hold_element
|
def can_hold_element(arr: ArrayLike, element: Any) -> bool:
"""
Can we do an inplace setitem with this element in an array with this dtype?
Parameters
----------
arr : np.ndarray or ExtensionArray
element : Any
Returns
-------
bool
"""
dtype = arr.dtype
if not isinstance(dtype, np.dtype) or dtype.kind in "mM":
if isinstance(dtype, (PeriodDtype, IntervalDtype, DatetimeTZDtype, np.dtype)):
# np.dtype here catches datetime64ns and timedelta64ns; we assume
# in this case that we have DatetimeArray/TimedeltaArray
arr = cast(
"PeriodArray | DatetimeArray | TimedeltaArray | IntervalArray", arr
)
try:
arr._validate_setitem_value(element)
return True
except (ValueError, TypeError):
return False
if dtype == "string":
try:
arr._maybe_convert_setitem_value(element) # type: ignore[union-attr]
return True
except (ValueError, TypeError):
return False
# This is technically incorrect, but maintains the behavior of
# ExtensionBlock._can_hold_element
return True
try:
np_can_hold_element(dtype, element)
return True
except (TypeError, LossySetitemError):
return False
|
Can we do an inplace setitem with this element in an array with this dtype?
Parameters
----------
arr : np.ndarray or ExtensionArray
element : Any
Returns
-------
bool
|
python
|
pandas/core/dtypes/cast.py
| 1,610
|
[
"arr",
"element"
] |
bool
| true
| 5
| 6.72
|
pandas-dev/pandas
| 47,362
|
numpy
| false
|
parse_uid
|
def parse_uid(uid):
"""Parse user id.
Arguments:
uid (str, int): Actual uid, or the username of a user.
Returns:
int: The actual uid.
"""
try:
return int(uid)
except ValueError:
try:
return pwd.getpwnam(uid).pw_uid
except (AttributeError, KeyError):
raise KeyError(f'User does not exist: {uid}')
|
Parse user id.
Arguments:
uid (str, int): Actual uid, or the username of a user.
Returns:
int: The actual uid.
|
python
|
celery/platforms.py
| 432
|
[
"uid"
] | false
| 1
| 6.08
|
celery/celery
| 27,741
|
google
| false
|
|
initializePermission
|
function initializePermission() {
const permission = getOptionValue('--permission');
if (permission) {
process.binding = function binding(_module) {
throw new ERR_ACCESS_DENIED('process.binding');
};
// Guarantee path module isn't monkey-patched to bypass permission model
ObjectFreeze(require('path'));
const { has } = require('internal/process/permission');
const warnFlags = [
'--allow-addons',
'--allow-child-process',
'--allow-inspector',
'--allow-wasi',
'--allow-worker',
];
for (const flag of warnFlags) {
if (getOptionValue(flag)) {
process.emitWarning(
`The flag ${flag} must be used with extreme caution. ` +
'It could invalidate the permission model.', 'SecurityWarning');
}
}
const warnCommaFlags = [
'--allow-fs-read',
'--allow-fs-write',
];
for (const flag of warnCommaFlags) {
const value = getOptionValue(flag);
if (value.length === 1 && value[0].includes(',')) {
process.emitWarning(
`The ${flag} CLI flag has changed. ` +
'Passing a comma-separated list of paths is no longer valid. ' +
'Documentation can be found at ' +
'https://nodejs.org/api/permissions.html#file-system-permissions',
'Warning',
);
}
}
const experimentalWarnFlags = [
'--allow-net',
];
for (const flag of experimentalWarnFlags) {
if (getOptionValue(flag)) {
process.emitWarning(
`The flag ${flag} is under experimental phase.`,
'ExperimentalWarning');
}
}
ObjectDefineProperty(process, 'permission', {
__proto__: null,
enumerable: true,
configurable: false,
value: {
has,
},
});
} else {
const { availableFlags } = require('internal/process/permission');
ArrayPrototypeForEach(availableFlags(), (flag) => {
const value = getOptionValue(flag);
if (value.length) {
throw new ERR_MISSING_OPTION('--permission');
}
});
}
}
|
Patch the process object with legacy properties and normalizations.
Replace `process.argv[0]` with `process.execPath`, preserving the original `argv[0]` value as `process.argv0`.
Replace `process.argv[1]` with the resolved absolute file path of the entry point, if found.
@param {boolean} expandArgv1 - Whether to replace `process.argv[1]` with the resolved absolute file path of
the main entry point.
@returns {string}
|
javascript
|
lib/internal/process/pre_execution.js
| 604
|
[] | false
| 8
| 6.88
|
nodejs/node
| 114,839
|
jsdoc
| false
|
|
toString
|
@Override
public String toString() {
return getClass().getName() + ": " + this.nameMap;
}
|
Return if the given method name matches the mapped name.
<p>The default implementation checks for "xxx*", "*xxx" and "*xxx*" matches,
as well as direct equality. Can be overridden in subclasses.
@param methodName the method name of the class
@param mappedName the name in the descriptor
@return if the names match
@see org.springframework.util.PatternMatchUtils#simpleMatch(String, String)
|
java
|
spring-context/src/main/java/org/springframework/cache/interceptor/NameMatchCacheOperationSource.java
| 122
|
[] |
String
| true
| 1
| 6.32
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
fromDataObjectBinders
|
private @Nullable Object fromDataObjectBinders(@Nullable BindMethod bindMethod,
Function<DataObjectBinder, @Nullable Object> operation) {
List<DataObjectBinder> dataObjectBinders = this.dataObjectBinders.get(bindMethod);
Assert.state(dataObjectBinders != null, "'dataObjectBinders' must not be null");
for (DataObjectBinder dataObjectBinder : dataObjectBinders) {
Object bound = operation.apply(dataObjectBinder);
if (bound != null) {
return bound;
}
}
return null;
}
|
Bind the specified target {@link Bindable} using this binder's
{@link ConfigurationPropertySource property sources} or create a new instance using
the type of the {@link Bindable} if the result of the binding is {@code null}.
@param name the configuration property name to bind
@param target the target bindable
@param handler the bind handler (may be {@code null})
@param <T> the bound or created type
@return the bound or created object
@since 2.2.0
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/properties/bind/Binder.java
| 512
|
[
"bindMethod",
"operation"
] |
Object
| true
| 2
| 7.92
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
parseExpression
|
function parseExpression(): Expression {
// Expression[in]:
// AssignmentExpression[in]
// Expression[in] , AssignmentExpression[in]
// clear the decorator context when parsing Expression, as it should be unambiguous when parsing a decorator
const saveDecoratorContext = inDecoratorContext();
if (saveDecoratorContext) {
setDecoratorContext(/*val*/ false);
}
const pos = getNodePos();
let expr = parseAssignmentExpressionOrHigher(/*allowReturnTypeInArrowFunction*/ true);
let operatorToken: BinaryOperatorToken;
while ((operatorToken = parseOptionalToken(SyntaxKind.CommaToken))) {
expr = makeBinaryExpression(expr, operatorToken, parseAssignmentExpressionOrHigher(/*allowReturnTypeInArrowFunction*/ true), pos);
}
if (saveDecoratorContext) {
setDecoratorContext(/*val*/ true);
}
return expr;
}
|
Reports a diagnostic error for the current token being an invalid name.
@param blankDiagnostic Diagnostic to report for the case of the name being blank (matched tokenIfBlankName).
@param nameDiagnostic Diagnostic to report for all other cases.
@param tokenIfBlankName Current token if the name was invalid for being blank (not provided / skipped).
|
typescript
|
src/compiler/parser.ts
| 5,041
|
[] | true
| 4
| 6.88
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
render_gemm_arguments
|
def render_gemm_arguments(
self,
argument_template: str,
epilogue_template: str,
should_swap_xw: bool,
X: IRNode,
W: IRNode,
Bias: IRNode,
Y: IRNode,
alpha: float,
beta: float,
kernel: CUDATemplateKernel,
epilogue_args,
) -> str:
"""
Render the Cutlass CUDA C++ code required for passing arguments to the GEMM operation.
Args:
argument_template (str): Template for the GEMM operation arguments.
epilogue_template (str): Template for the epilogue arguments.
should_swap_xw (bool): Determines whether X, W operands should be swapped. If True, applies an explicit
transpose operation to X and W.
X (IRNode): The X input tensor.
W (IRNode): The W input tensor.
Bias (IRNode): The bias tensor.
Y (IRNode): The output tensor.
alpha (float): Scaling factor for the product of the inputs.
beta (float): Scaling factor for the output tensor.
kernel (CUDATemplateKernel): CUDA Template kernel for the operation.
epilogue_args (any): Additional arguments for the epilogue state.
Returns:
str: A block of CUDA C++ code as a string, ready to be used as arguments for the GEMM operation.
Note: If `should_swap_xw` is True, a transpose operation will be applied to the X, W, Bias, and Y
tensors. This operation also implies the M and N dimensions of Bias and GEMM output to be swapped
before the function call.
"""
options = {
"alpha": alpha,
"beta": beta,
"X": X,
"W": W,
"Y": Y,
"Bias": Bias,
"template": self,
"kernel": kernel,
"M": "M",
"N": "N",
"epilogue_args": epilogue_args,
}
assert epilogue_template is not None
if should_swap_xw:
# Swap
def clone_with_transposed_stride(node: IRNode) -> IRNode:
old_layout = node.get_layout()
new_stride = list(old_layout.stride) # type: ignore[union-attr]
new_stride[-2], new_stride[-1] = new_stride[-1], new_stride[-2]
assert old_layout.device is not None
new_layout = FixedLayout(
old_layout.device,
old_layout.dtype,
list(old_layout.size), # type: ignore[union-attr]
new_stride,
old_layout.offset, # type: ignore[union-attr]
)
return Buffer(name=node.get_name(), layout=new_layout)
new_X = clone_with_transposed_stride(X)
new_W = clone_with_transposed_stride(W)
new_Bias = clone_with_transposed_stride(Bias)
new_Y = clone_with_transposed_stride(Y)
options["X"], options["W"], options["Bias"], options["Y"] = (
new_W,
new_X,
new_Bias,
new_Y,
)
options["M"], options["N"] = "N", "M"
epilogue_arguments = self._template_from_string(epilogue_template).render(
**options
)
arguments = self._template_from_string(argument_template).render(
epilogue_arguments=epilogue_arguments, **options
)
return arguments
|
Render the Cutlass CUDA C++ code required for passing arguments to the GEMM operation.
Args:
argument_template (str): Template for the GEMM operation arguments.
epilogue_template (str): Template for the epilogue arguments.
should_swap_xw (bool): Determines whether X, W operands should be swapped. If True, applies an explicit
transpose operation to X and W.
X (IRNode): The X input tensor.
W (IRNode): The W input tensor.
Bias (IRNode): The bias tensor.
Y (IRNode): The output tensor.
alpha (float): Scaling factor for the product of the inputs.
beta (float): Scaling factor for the output tensor.
kernel (CUDATemplateKernel): CUDA Template kernel for the operation.
epilogue_args (any): Additional arguments for the epilogue state.
Returns:
str: A block of CUDA C++ code as a string, ready to be used as arguments for the GEMM operation.
Note: If `should_swap_xw` is True, a transpose operation will be applied to the X, W, Bias, and Y
tensors. This operation also implies the M and N dimensions of Bias and GEMM output to be swapped
before the function call.
|
python
|
torch/_inductor/codegen/cuda/gemm_template.py
| 1,608
|
[
"self",
"argument_template",
"epilogue_template",
"should_swap_xw",
"X",
"W",
"Bias",
"Y",
"alpha",
"beta",
"kernel",
"epilogue_args"
] |
str
| true
| 2
| 8
|
pytorch/pytorch
| 96,034
|
google
| false
|
of
|
@SuppressWarnings("unchecked")
static <S, T> ValueExtractor<T> of(Extractor<S, T> extractor) {
return (instance) -> {
if (instance == null) {
return null;
}
return (skip(instance)) ? (T) SKIP : extractor.extract((S) instance);
};
}
|
Create a new {@link ValueExtractor} based on the given {@link Function}.
@param <S> the source type
@param <T> the extracted type
@param extractor the extractor to use
@return a new {@link ValueExtractor} instance
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/json/JsonWriter.java
| 746
|
[
"extractor"
] | true
| 3
| 7.76
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
keySet
|
@Override
public Set<K> keySet() {
Set<K> result = keySet;
return (result == null) ? keySet = new KeySet() : result;
}
|
Specifies the delegate maps going in each direction. Called by subclasses during
deserialization.
|
java
|
android/guava/src/com/google/common/collect/AbstractBiMap.java
| 208
|
[] | true
| 2
| 6.4
|
google/guava
| 51,352
|
javadoc
| false
|
|
toString
|
@Override
public String toString() {
return new ToStringCreator(this).append("beanName", getBeanName())
.append("mergedBeanDefinition", getMergedBeanDefinition()).toString();
}
|
Resolve an autowired argument.
@param descriptor the descriptor for the dependency (field/method/constructor)
@param typeConverter the TypeConverter to use for populating arrays and collections
@param autowiredBeanNames a Set that all names of autowired beans (used for
resolving the given dependency) are supposed to be added to
@return the resolved object, or {@code null} if none found
@since 6.0.9
|
java
|
spring-beans/src/main/java/org/springframework/beans/factory/support/RegisteredBean.java
| 256
|
[] |
String
| true
| 1
| 6.24
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
equalsIgnoreCase
|
@Deprecated
public static boolean equalsIgnoreCase(final CharSequence cs1, final CharSequence cs2) {
return Strings.CI.equals(cs1, cs2);
}
|
Compares two CharSequences, returning {@code true} if they represent equal sequences of characters, ignoring case.
<p>
{@code null}s are handled without exceptions. Two {@code null} references are considered equal. The comparison is <strong>case insensitive</strong>.
</p>
<pre>
StringUtils.equalsIgnoreCase(null, null) = true
StringUtils.equalsIgnoreCase(null, "abc") = false
StringUtils.equalsIgnoreCase("abc", null) = false
StringUtils.equalsIgnoreCase("abc", "abc") = true
StringUtils.equalsIgnoreCase("abc", "ABC") = true
</pre>
@param cs1 the first CharSequence, may be {@code null}.
@param cs2 the second CharSequence, may be {@code null}.
@return {@code true} if the CharSequences are equal (case-insensitive), or both {@code null}.
@since 3.0 Changed signature from equalsIgnoreCase(String, String) to equalsIgnoreCase(CharSequence, CharSequence)
@see #equals(CharSequence, CharSequence)
@deprecated Use {@link Strings#equals(CharSequence, CharSequence) Strings.CI.equals(CharSequence, CharSequence)}.
|
java
|
src/main/java/org/apache/commons/lang3/StringUtils.java
| 1,864
|
[
"cs1",
"cs2"
] | true
| 1
| 6.32
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
strictIndexOf
|
function strictIndexOf(array, value, fromIndex) {
var index = fromIndex - 1,
length = array.length;
while (++index < length) {
if (array[index] === value) {
return index;
}
}
return -1;
}
|
A specialized version of `_.indexOf` which performs strict equality
comparisons of values, i.e. `===`.
@private
@param {Array} array The array to inspect.
@param {*} value The value to search for.
@param {number} fromIndex The index to search from.
@returns {number} Returns the index of the matched value, else `-1`.
|
javascript
|
lodash.js
| 1,298
|
[
"array",
"value",
"fromIndex"
] | false
| 3
| 6.08
|
lodash/lodash
| 61,490
|
jsdoc
| false
|
|
muteState
|
public ChannelMuteState muteState() {
return muteState;
}
|
Unmute the channel. The channel can be unmuted only if it is in the MUTED state. For other muted states
(MUTED_AND_*), this is a no-op.
@return Whether or not the channel is in the NOT_MUTED state after the call
|
java
|
clients/src/main/java/org/apache/kafka/common/network/KafkaChannel.java
| 314
|
[] |
ChannelMuteState
| true
| 1
| 6.96
|
apache/kafka
| 31,560
|
javadoc
| false
|
getSpringBootExceptionHandler
|
@Nullable SpringBootExceptionHandler getSpringBootExceptionHandler() {
if (isMainThread(Thread.currentThread())) {
return SpringBootExceptionHandler.forCurrentThread();
}
return null;
}
|
Register that the given exception has been logged. By default, if the running in
the main thread, this method will suppress additional printing of the stacktrace.
@param exception the exception that was logged
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/SpringApplication.java
| 922
|
[] |
SpringBootExceptionHandler
| true
| 2
| 7.04
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
packDibitImpl
|
public static void packDibitImpl(int[] vector, byte[] packed) {
int limit = vector.length - 7;
int i = 0;
int index = 0;
for (; i < limit; i += 8, index++) {
assert vector[i] >= 0 && vector[i] <= 3;
assert vector[i + 1] >= 0 && vector[i + 1] <= 3;
assert vector[i + 2] >= 0 && vector[i + 2] <= 3;
assert vector[i + 3] >= 0 && vector[i + 3] <= 3;
assert vector[i + 4] >= 0 && vector[i + 4] <= 3;
assert vector[i + 5] >= 0 && vector[i + 5] <= 3;
assert vector[i + 6] >= 0 && vector[i + 6] <= 3;
assert vector[i + 7] >= 0 && vector[i + 7] <= 3;
int lowerByte = (vector[i] & 1) << 7 | (vector[i + 1] & 1) << 6 | (vector[i + 2] & 1) << 5 | (vector[i + 3] & 1) << 4
| (vector[i + 4] & 1) << 3 | (vector[i + 5] & 1) << 2 | (vector[i + 6] & 1) << 1 | (vector[i + 7] & 1);
int upperByte = ((vector[i] >> 1) & 1) << 7 | ((vector[i + 1] >> 1) & 1) << 6 | ((vector[i + 2] >> 1) & 1) << 5 | ((vector[i
+ 3] >> 1) & 1) << 4 | ((vector[i + 4] >> 1) & 1) << 3 | ((vector[i + 5] >> 1) & 1) << 2 | ((vector[i + 6] >> 1) & 1) << 1
| ((vector[i + 7] >> 1) & 1);
packed[index] = (byte) lowerByte;
packed[index + packed.length / 2] = (byte) upperByte;
}
if (i == vector.length) {
return;
}
int lowerByte = 0;
int upperByte = 0;
for (int j = 7; i < vector.length; j--, i++) {
assert vector[i] >= 0 && vector[i] <= 3;
lowerByte |= (vector[i] & 1) << j;
upperByte |= ((vector[i] >> 1) & 1) << j;
}
packed[index] = (byte) lowerByte;
packed[index + packed.length / 2] = (byte) upperByte;
}
|
Packs two bit vector (values 0-3) into a byte array with lower bits first.
The striding is similar to transposeHalfByte
@param vector the input vector with values 0-3
@param packed the output packed byte array
|
java
|
libs/simdvec/src/main/java/org/elasticsearch/simdvec/internal/vectorization/DefaultESVectorUtilSupport.java
| 341
|
[
"vector",
"packed"
] |
void
| true
| 13
| 6.72
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
toString
|
@Override
public String toString() {
StringBuilder result = new StringBuilder();
result.append(getResourceDescription(this.resource));
if (this.location != null) {
result.append(" - ").append(this.location);
}
return result.toString();
}
|
Return the location of the property within the source (if known).
@return the location or {@code null}
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/origin/TextResourceOrigin.java
| 95
|
[] |
String
| true
| 2
| 8.24
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
getRemoteParentContext
|
private Context getRemoteParentContext(TraceContext traceContext) {
// https://github.com/open-telemetry/opentelemetry-java/discussions/2884#discussioncomment-381870
// If you just want to propagate across threads within the same process, you don't need context propagators (extract/inject).
// You can just pass the Context object directly to another thread (it is immutable and thus thread-safe).
final String traceParentHeader = traceContext.getTransient(Task.PARENT_TRACE_PARENT_HEADER);
final String traceStateHeader = traceContext.getTransient(Task.PARENT_TRACE_STATE);
if (traceParentHeader != null) {
final Map<String, String> traceContextMap = Maps.newMapWithExpectedSize(2);
// traceparent and tracestate should match the keys used by W3CTraceContextPropagator
traceContextMap.put(Task.TRACE_PARENT_HTTP_HEADER, traceParentHeader);
if (traceStateHeader != null) {
traceContextMap.put(Task.TRACE_STATE, traceStateHeader);
}
return services.openTelemetry.getPropagators()
.getTextMapPropagator()
.extract(Context.current(), traceContextMap, new MapKeyGetter());
}
return null;
}
|
Called when a span starts. This version of the method relies on context to assign the span a parent.
@param name the name of the span. Sent to the tracing system
@param attributes
|
java
|
modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/tracing/APMTracer.java
| 249
|
[
"traceContext"
] |
Context
| true
| 3
| 6.88
|
elastic/elasticsearch
| 75,680
|
javadoc
| false
|
load
|
public abstract V load(K key) throws Exception;
|
Computes or retrieves the value corresponding to {@code key}.
@param key the non-null key whose value should be loaded
@return the value associated with {@code key}; <b>must not be null</b>
@throws Exception if unable to load the result
@throws InterruptedException if this method is interrupted. {@code InterruptedException} is
treated like any other {@code Exception} in all respects except that, when it is caught,
the thread's interrupt status is set
|
java
|
android/guava/src/com/google/common/cache/CacheLoader.java
| 73
|
[
"key"
] |
V
| true
| 1
| 6.32
|
google/guava
| 51,352
|
javadoc
| false
|
inject_url_defaults
|
def inject_url_defaults(self, endpoint: str, values: dict[str, t.Any]) -> None:
"""Injects the URL defaults for the given endpoint directly into
the values dictionary passed. This is used internally and
automatically called on URL building.
.. versionadded:: 0.7
"""
names: t.Iterable[str | None] = (None,)
# url_for may be called outside a request context, parse the
# passed endpoint instead of using request.blueprints.
if "." in endpoint:
names = chain(
names, reversed(_split_blueprint_path(endpoint.rpartition(".")[0]))
)
for name in names:
if name in self.url_default_functions:
for func in self.url_default_functions[name]:
func(endpoint, values)
|
Injects the URL defaults for the given endpoint directly into
the values dictionary passed. This is used internally and
automatically called on URL building.
.. versionadded:: 0.7
|
python
|
src/flask/sansio/app.py
| 953
|
[
"self",
"endpoint",
"values"
] |
None
| true
| 5
| 6
|
pallets/flask
| 70,946
|
unknown
| false
|
isUnambiguouslyStartOfFunctionType
|
function isUnambiguouslyStartOfFunctionType() {
nextToken();
if (token() === SyntaxKind.CloseParenToken || token() === SyntaxKind.DotDotDotToken) {
// ( )
// ( ...
return true;
}
if (skipParameterStart()) {
// We successfully skipped modifiers (if any) and an identifier or binding pattern,
// now see if we have something that indicates a parameter declaration
if (
token() === SyntaxKind.ColonToken || token() === SyntaxKind.CommaToken ||
token() === SyntaxKind.QuestionToken || token() === SyntaxKind.EqualsToken
) {
// ( xxx :
// ( xxx ,
// ( xxx ?
// ( xxx =
return true;
}
if (token() === SyntaxKind.CloseParenToken) {
nextToken();
if (token() === SyntaxKind.EqualsGreaterThanToken) {
// ( xxx ) =>
return true;
}
}
}
return false;
}
|
Reports a diagnostic error for the current token being an invalid name.
@param blankDiagnostic Diagnostic to report for the case of the name being blank (matched tokenIfBlankName).
@param nameDiagnostic Diagnostic to report for all other cases.
@param tokenIfBlankName Current token if the name was invalid for being blank (not provided / skipped).
|
typescript
|
src/compiler/parser.ts
| 4,881
|
[] | false
| 10
| 6.08
|
microsoft/TypeScript
| 107,154
|
jsdoc
| false
|
|
pendingToString
|
@Override
protected @Nullable String pendingToString() {
@RetainedLocalRef ListenableFuture<? extends V> localInputFuture = delegateRef;
@RetainedLocalRef ScheduledFuture<?> localTimer = timer;
if (localInputFuture != null) {
String message = "inputFuture=[" + localInputFuture + "]";
if (localTimer != null) {
long delay = localTimer.getDelay(MILLISECONDS);
// Negative delays look confusing in an error message
if (delay > 0) {
message += ", remaining delay=[" + delay + " ms]";
}
}
return message;
}
return null;
}
|
A runnable that is called when the delegate or the timer completes.
|
java
|
android/guava/src/com/google/common/util/concurrent/TimeoutFuture.java
| 161
|
[] |
String
| true
| 4
| 7.04
|
google/guava
| 51,352
|
javadoc
| false
|
expectedCacheHitRatio
|
double expectedCacheHitRatio(
const BinaryFunctionListType &BinaryFunctions,
const std::unordered_map<BinaryBasicBlock *, uint64_t> &BBAddr,
const std::unordered_map<BinaryBasicBlock *, uint64_t> &BBSize) {
std::unordered_map<const BinaryFunction *, Predecessors> Calls =
extractFunctionCalls(BinaryFunctions);
// Compute 'hotness' of the functions
double TotalSamples = 0;
std::unordered_map<BinaryFunction *, double> FunctionSamples;
for (BinaryFunction *BF : BinaryFunctions) {
double Samples = 0;
for (std::pair<BinaryFunction *, uint64_t> Pair : Calls[BF])
Samples += Pair.second;
Samples = std::max(Samples, (double)BF->getKnownExecutionCount());
FunctionSamples[BF] = Samples;
TotalSamples += Samples;
}
// Compute 'hotness' of the pages
std::unordered_map<uint64_t, double> PageSamples;
for (BinaryFunction *BF : BinaryFunctions) {
if (BF->getLayout().block_empty())
continue;
auto BBAddrIt = BBAddr.find(BF->getLayout().block_front());
assert(BBAddrIt != BBAddr.end());
const uint64_t Page = BBAddrIt->second / ITLBPageSize;
auto FunctionSamplesIt = FunctionSamples.find(BF);
assert(FunctionSamplesIt != FunctionSamples.end());
PageSamples[Page] += FunctionSamplesIt->second;
}
// Computing the expected number of misses for every function
double Misses = 0;
for (BinaryFunction *BF : BinaryFunctions) {
// Skip the function if it has no samples
auto FunctionSamplesIt = FunctionSamples.find(BF);
assert(FunctionSamplesIt != FunctionSamples.end());
double Samples = FunctionSamplesIt->second;
if (BF->getLayout().block_empty() || Samples == 0.0)
continue;
auto BBAddrIt = BBAddr.find(BF->getLayout().block_front());
assert(BBAddrIt != BBAddr.end());
const uint64_t Page = BBAddrIt->second / ITLBPageSize;
// The probability that the page is not present in the cache
const double MissProb =
pow(1.0 - PageSamples[Page] / TotalSamples, ITLBEntries);
// Processing all callers of the function
for (std::pair<BinaryFunction *, uint64_t> Pair : Calls[BF]) {
BinaryFunction *SrcFunction = Pair.first;
BBAddrIt = BBAddr.find(SrcFunction->getLayout().block_front());
assert(BBAddrIt != BBAddr.end());
const uint64_t SrcPage = BBAddrIt->second / ITLBPageSize;
// Is this a 'long' or a 'short' call?
if (Page != SrcPage) {
// This is a miss
Misses += MissProb * Pair.second;
}
Samples -= Pair.second;
}
assert(Samples >= 0.0 && "Function samples computed incorrectly");
// The remaining samples likely come from the jitted code
Misses += Samples * MissProb;
}
return 100.0 * (1.0 - Misses / TotalSamples);
}
|
estimates the expected number of cache misses for the long ones.
|
cpp
|
bolt/lib/Passes/CacheMetrics.cpp
| 142
|
[] | true
| 6
| 7.12
|
llvm/llvm-project
| 36,021
|
doxygen
| false
|
|
loadProperties
|
protected Properties loadProperties(Resource resource, String filename) throws IOException {
Properties props = newProperties();
try (InputStream is = resource.getInputStream()) {
String resourceFilename = resource.getFilename();
if (resourceFilename != null && resourceFilename.endsWith(XML_EXTENSION)) {
if (logger.isDebugEnabled()) {
logger.debug("Loading properties [" + resource.getFilename() + "]");
}
this.propertiesPersister.loadFromXml(props, is);
}
else {
String encoding = null;
if (this.fileEncodings != null) {
encoding = this.fileEncodings.getProperty(filename);
}
if (encoding == null) {
encoding = getDefaultEncoding();
}
if (encoding != null) {
if (logger.isDebugEnabled()) {
logger.debug("Loading properties [" + resource.getFilename() + "] with encoding '" + encoding + "'");
}
this.propertiesPersister.load(props, new InputStreamReader(is, encoding));
}
else {
if (logger.isDebugEnabled()) {
logger.debug("Loading properties [" + resource.getFilename() + "]");
}
this.propertiesPersister.load(props, is);
}
}
return props;
}
}
|
Load the properties from the given resource.
@param resource the resource to load from
@param filename the original bundle filename (basename + Locale)
@return the populated Properties instance
@throws IOException if properties loading failed
|
java
|
spring-context/src/main/java/org/springframework/context/support/ReloadableResourceBundleMessageSource.java
| 559
|
[
"resource",
"filename"
] |
Properties
| true
| 9
| 7.6
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
stringOrNull
|
private static @Nullable String stringOrNull(char @Nullable [] in) {
return (in == null) ? null : new String(in);
}
|
Returns a string that would replace the given character in the specified escaper, or {@code
null} if no replacement should be made. This method is intended for use in tests through the
{@code EscaperAsserts} class; production users of {@link UnicodeEscaper} should limit
themselves to its public interface.
@param cp the Unicode code point to escape if necessary
@return the replacement string, or {@code null} if no escaping was needed
|
java
|
android/guava/src/com/google/common/escape/Escapers.java
| 188
|
[
"in"
] |
String
| true
| 2
| 7.68
|
google/guava
| 51,352
|
javadoc
| false
|
ABSL_LOCKS_EXCLUDED
|
ABSL_LOCKS_EXCLUDED(stream_mu_) {
ServerCallbackReaderWriter<Request, Response>* stream =
stream_.load(std::memory_order_acquire);
if (stream == nullptr) {
grpc::internal::MutexLock l(&stream_mu_);
stream = stream_.load(std::memory_order_relaxed);
if (stream == nullptr) {
backlog_.write_wanted = resp;
backlog_.write_options_wanted = options;
return;
}
}
stream->Write(resp, options);
}
|
\param[in] options The WriteOptions to use for writing this message
|
cpp
|
include/grpcpp/support/server_callback.h
| 338
|
[] | true
| 3
| 6.24
|
grpc/grpc
| 44,113
|
doxygen
| false
|
|
columnKeySet
|
@Override
public ImmutableSet<C> columnKeySet() {
return columnKeyToIndex.keySet();
}
|
Returns an immutable set of the valid column keys, including those that are associated with
null values only.
@return immutable set of column keys
|
java
|
android/guava/src/com/google/common/collect/ArrayTable.java
| 635
|
[] | true
| 1
| 6.8
|
google/guava
| 51,352
|
javadoc
| false
|
|
getLoader
|
@SuppressWarnings("unchecked")
private <R extends ConfigDataResource> ConfigDataLoader<R> getLoader(ConfigDataLoaderContext context, R resource) {
ConfigDataLoader<R> result = null;
for (int i = 0; i < this.loaders.size(); i++) {
ConfigDataLoader<R> candidate = this.loaders.get(i);
if (this.resourceTypes.get(i).isInstance(resource)) {
if (candidate.isLoadable(context, resource)) {
if (result != null) {
throw new IllegalStateException("Multiple loaders found for resource '" + resource + "' ["
+ candidate.getClass().getName() + "," + result.getClass().getName() + "]");
}
result = candidate;
}
}
}
Assert.state(result != null, () -> "No loader found for resource '" + resource + "'");
return result;
}
|
Load {@link ConfigData} using the first appropriate {@link ConfigDataLoader}.
@param <R> the resource type
@param context the loader context
@param resource the resource to load
@return the loaded {@link ConfigData}
@throws IOException on IO error
|
java
|
core/spring-boot/src/main/java/org/springframework/boot/context/config/ConfigDataLoaders.java
| 103
|
[
"context",
"resource"
] | true
| 5
| 7.44
|
spring-projects/spring-boot
| 79,428
|
javadoc
| false
|
|
escape
|
@Override
public final String escape(String s) {
checkNotNull(s); // GWT specific check (do not optimize).
for (int i = 0; i < s.length(); i++) {
char c = s.charAt(i);
if ((c < replacementsLength && replacements[c] != null) || c > safeMax || c < safeMin) {
return escapeSlow(s, i);
}
}
return s;
}
|
Creates a new ArrayBasedCharEscaper instance with the given replacement map and specified safe
range. If {@code safeMax < safeMin} then no characters are considered safe. This initializer is
useful when explicit instances of ArrayBasedEscaperMap are used to allow the sharing of large
replacement mappings.
<p>If a character has no mapped replacement then it is checked against the safe range. If it
lies outside that, then {@link #escapeUnsafe} is called, otherwise no escaping is performed.
@param escaperMap the mapping of characters to be escaped
@param safeMin the lowest character value in the safe range
@param safeMax the highest character value in the safe range
|
java
|
android/guava/src/com/google/common/escape/ArrayBasedCharEscaper.java
| 103
|
[
"s"
] |
String
| true
| 6
| 6.72
|
google/guava
| 51,352
|
javadoc
| false
|
toBoolean
|
public static boolean toBoolean(final int value) {
return value != 0;
}
|
Converts an int to a boolean using the convention that {@code zero}
is {@code false}, everything else is {@code true}.
<pre>
BooleanUtils.toBoolean(0) = false
BooleanUtils.toBoolean(1) = true
BooleanUtils.toBoolean(2) = true
</pre>
@param value the int to convert
@return {@code true} if non-zero, {@code false}
if zero
|
java
|
src/main/java/org/apache/commons/lang3/BooleanUtils.java
| 413
|
[
"value"
] | true
| 1
| 6.8
|
apache/commons-lang
| 2,896
|
javadoc
| false
|
|
cancelInFlightRequests
|
private void cancelInFlightRequests(String nodeId,
long now,
Collection<ClientResponse> responses,
boolean timedOut) {
Iterable<InFlightRequest> inFlightRequests = this.inFlightRequests.clearAll(nodeId);
for (InFlightRequest request : inFlightRequests) {
if (log.isDebugEnabled()) {
log.debug("Cancelled in-flight {} request with correlation id {} due to node {} being disconnected " +
"(elapsed time since creation: {}ms, elapsed time since send: {}ms, throttle time: {}ms, request timeout: {}ms): {}",
request.header.apiKey(), request.header.correlationId(), nodeId,
request.timeElapsedSinceCreateMs(now), request.timeElapsedSinceSendMs(now),
request.throttleTimeMs(), request.requestTimeoutMs, request.request);
} else {
log.info("Cancelled in-flight {} request with correlation id {} due to node {} being disconnected " +
"(elapsed time since creation: {}ms, elapsed time since send: {}ms, throttle time: {}ms, request timeout: {}ms)",
request.header.apiKey(), request.header.correlationId(), nodeId,
request.timeElapsedSinceCreateMs(now), request.timeElapsedSinceSendMs(now),
request.throttleTimeMs(), request.requestTimeoutMs);
}
if (!request.isInternalRequest) {
if (responses != null) {
ClientResponse clientResponse;
if (timedOut)
clientResponse = request.timedOut(now);
else
clientResponse = request.disconnected(now);
responses.add(clientResponse);
}
} else if (request.header.apiKey() == ApiKeys.METADATA) {
metadataUpdater.handleFailedRequest(now, Optional.empty());
} else if (isTelemetryApi(request.header.apiKey()) && telemetrySender != null) {
telemetrySender.handleFailedRequest(request.header.apiKey(), null);
}
}
}
|
Disconnects the connection to a particular node, if there is one.
Any pending ClientRequests for this connection will receive disconnections.
@param nodeId The id of the node
|
java
|
clients/src/main/java/org/apache/kafka/clients/NetworkClient.java
| 398
|
[
"nodeId",
"now",
"responses",
"timedOut"
] |
void
| true
| 8
| 6.56
|
apache/kafka
| 31,560
|
javadoc
| false
|
itemsize
|
def itemsize(self) -> int:
"""
Return the number of bytes in this dtype.
For Arrow-backed dtypes:
- Returns the fixed-width bit size divided by 8 for standard fixed-width types.
- For boolean types, returns the NumPy itemsize.
- Falls back to the NumPy dtype itemsize for variable-width & unsupported types.
Examples
--------
>>> import pyarrow as pa
>>> import pandas as pd
>>> dtype = pd.ArrowDtype(pa.int32())
>>> dtype.itemsize
4
>>> dtype = pd.ArrowDtype(pa.bool_())
>>> dtype.itemsize # falls back to numpy dtype
1
"""
if pa.types.is_boolean(self.pyarrow_dtype):
return self.numpy_dtype.itemsize
# Use pyarrow itemsize for fixed-width data types
# e.g. int32 -> 32 bits // 8 = 4 bytes
try:
return self.pyarrow_dtype.bit_width // 8
except (ValueError, AttributeError, NotImplementedError):
return self.numpy_dtype.itemsize
|
Return the number of bytes in this dtype.
For Arrow-backed dtypes:
- Returns the fixed-width bit size divided by 8 for standard fixed-width types.
- For boolean types, returns the NumPy itemsize.
- Falls back to the NumPy dtype itemsize for variable-width & unsupported types.
Examples
--------
>>> import pyarrow as pa
>>> import pandas as pd
>>> dtype = pd.ArrowDtype(pa.int32())
>>> dtype.itemsize
4
>>> dtype = pd.ArrowDtype(pa.bool_())
>>> dtype.itemsize # falls back to numpy dtype
1
|
python
|
pandas/core/dtypes/dtypes.py
| 2,313
|
[
"self"
] |
int
| true
| 2
| 7.44
|
pandas-dev/pandas
| 47,362
|
unknown
| false
|
createURI
|
protected URI createURI(String value) throws URISyntaxException {
int colonIndex = value.indexOf(':');
if (this.encode && colonIndex != -1) {
int fragmentIndex = value.indexOf('#', colonIndex + 1);
String scheme = value.substring(0, colonIndex);
String ssp = value.substring(colonIndex + 1, (fragmentIndex > 0 ? fragmentIndex : value.length()));
String fragment = (fragmentIndex > 0 ? value.substring(fragmentIndex + 1) : null);
return new URI(scheme, ssp, fragment);
}
else {
// not encoding or the value contains no scheme - fallback to default
return new URI(value);
}
}
|
Create a URI instance for the given user-specified String value.
<p>The default implementation encodes the value into an RFC-2396 compliant URI.
@param value the value to convert into a URI instance
@return the URI instance
@throws java.net.URISyntaxException if URI conversion failed
|
java
|
spring-beans/src/main/java/org/springframework/beans/propertyeditors/URIEditor.java
| 138
|
[
"value"
] |
URI
| true
| 5
| 7.92
|
spring-projects/spring-framework
| 59,386
|
javadoc
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.