function_name
stringlengths
1
57
function_code
stringlengths
20
4.99k
documentation
stringlengths
50
2k
language
stringclasses
5 values
file_path
stringlengths
8
166
line_number
int32
4
16.7k
parameters
listlengths
0
20
return_type
stringlengths
0
131
has_type_hints
bool
2 classes
complexity
int32
1
51
quality_score
float32
6
9.68
repo_name
stringclasses
34 values
repo_stars
int32
2.9k
242k
docstring_style
stringclasses
7 values
is_async
bool
2 classes
validate_bool_kwarg
def validate_bool_kwarg( value: BoolishNoneT, arg_name: str, none_allowed: bool = True, int_allowed: bool = False, ) -> BoolishNoneT: """ Ensure that argument passed in arg_name can be interpreted as boolean. Parameters ---------- value : bool Value to be validated. arg_name : str Name of the argument. To be reflected in the error message. none_allowed : bool, default True Whether to consider None to be a valid boolean. int_allowed : bool, default False Whether to consider integer value to be a valid boolean. Returns ------- value The same value as input. Raises ------ ValueError If the value is not a valid boolean. """ good_value = is_bool(value) if none_allowed: good_value = good_value or (value is None) if int_allowed: good_value = good_value or isinstance(value, int) if not good_value: raise ValueError( f'For argument "{arg_name}" expected type bool, received ' f"type {type(value).__name__}." ) return value
Ensure that argument passed in arg_name can be interpreted as boolean. Parameters ---------- value : bool Value to be validated. arg_name : str Name of the argument. To be reflected in the error message. none_allowed : bool, default True Whether to consider None to be a valid boolean. int_allowed : bool, default False Whether to consider integer value to be a valid boolean. Returns ------- value The same value as input. Raises ------ ValueError If the value is not a valid boolean.
python
pandas/util/_validators.py
228
[ "value", "arg_name", "none_allowed", "int_allowed" ]
BoolishNoneT
true
6
6.72
pandas-dev/pandas
47,362
numpy
false
preinitialize
private void preinitialize() { Runner runner = new Runner(this.factoriesLoader.load(BackgroundPreinitializer.class)); try { Thread thread = new Thread(runner, "background-preinit"); thread.start(); } catch (Exception ex) { // This will fail on Google App Engine where creating threads is // prohibited. We can safely continue but startup will be slightly slower // as the initialization will now happen on the main thread. complete.countDown(); } }
System property that instructs Spring Boot how to run pre initialization. When the property is set to {@code true}, no pre-initialization happens and each item is initialized in the foreground as it needs to. When the property is {@code false} (default), pre initialization runs in a separate thread in the background.
java
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/preinitialize/BackgroundPreinitializingApplicationListener.java
98
[]
void
true
2
7.2
spring-projects/spring-boot
79,428
javadoc
false
itertuples
def itertuples( self, index: bool = True, name: str | None = "Pandas" ) -> Iterable[tuple[Any, ...]]: """ Iterate over DataFrame rows as namedtuples. Parameters ---------- index : bool, default True If True, return the index as the first element of the tuple. name : str or None, default "Pandas" The name of the returned namedtuples or None to return regular tuples. Returns ------- iterator An object to iterate over namedtuples for each row in the DataFrame with the first field possibly being the index and following fields being the column values. See Also -------- DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. DataFrame.items : Iterate over (column name, Series) pairs. Notes ----- The column names will be renamed to positional names if they are invalid Python identifiers, repeated, or start with an underscore. Examples -------- >>> df = pd.DataFrame( ... {"num_legs": [4, 2], "num_wings": [0, 2]}, index=["dog", "hawk"] ... ) >>> df num_legs num_wings dog 4 0 hawk 2 2 >>> for row in df.itertuples(): ... print(row) Pandas(Index='dog', num_legs=4, num_wings=0) Pandas(Index='hawk', num_legs=2, num_wings=2) By setting the `index` parameter to False we can remove the index as the first element of the tuple: >>> for row in df.itertuples(index=False): ... print(row) Pandas(num_legs=4, num_wings=0) Pandas(num_legs=2, num_wings=2) With the `name` parameter set we set a custom name for the yielded namedtuples: >>> for row in df.itertuples(name="Animal"): ... print(row) Animal(Index='dog', num_legs=4, num_wings=0) Animal(Index='hawk', num_legs=2, num_wings=2) """ arrays = [] fields = list(self.columns) if index: arrays.append(self.index) fields.insert(0, "Index") # use integer indexing because of possible duplicate column names arrays.extend(self.iloc[:, k] for k in range(len(self.columns))) if name is not None: # https://github.com/python/mypy/issues/9046 # error: namedtuple() expects a string literal as the first argument itertuple = collections.namedtuple( # type: ignore[misc] name, fields, rename=True ) return map(itertuple._make, zip(*arrays, strict=True)) # fallback to regular tuples return zip(*arrays, strict=True)
Iterate over DataFrame rows as namedtuples. Parameters ---------- index : bool, default True If True, return the index as the first element of the tuple. name : str or None, default "Pandas" The name of the returned namedtuples or None to return regular tuples. Returns ------- iterator An object to iterate over namedtuples for each row in the DataFrame with the first field possibly being the index and following fields being the column values. See Also -------- DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. DataFrame.items : Iterate over (column name, Series) pairs. Notes ----- The column names will be renamed to positional names if they are invalid Python identifiers, repeated, or start with an underscore. Examples -------- >>> df = pd.DataFrame( ... {"num_legs": [4, 2], "num_wings": [0, 2]}, index=["dog", "hawk"] ... ) >>> df num_legs num_wings dog 4 0 hawk 2 2 >>> for row in df.itertuples(): ... print(row) Pandas(Index='dog', num_legs=4, num_wings=0) Pandas(Index='hawk', num_legs=2, num_wings=2) By setting the `index` parameter to False we can remove the index as the first element of the tuple: >>> for row in df.itertuples(index=False): ... print(row) Pandas(num_legs=4, num_wings=0) Pandas(num_legs=2, num_wings=2) With the `name` parameter set we set a custom name for the yielded namedtuples: >>> for row in df.itertuples(name="Animal"): ... print(row) Animal(Index='dog', num_legs=4, num_wings=0) Animal(Index='hawk', num_legs=2, num_wings=2)
python
pandas/core/frame.py
1,598
[ "self", "index", "name" ]
Iterable[tuple[Any, ...]]
true
3
8.4
pandas-dev/pandas
47,362
numpy
false
max_cookie_size
def max_cookie_size(self) -> int: # type: ignore """Read-only view of the :data:`MAX_COOKIE_SIZE` config key. See :attr:`~werkzeug.wrappers.Response.max_cookie_size` in Werkzeug's docs. """ if current_app: return current_app.config["MAX_COOKIE_SIZE"] # type: ignore[no-any-return] # return Werkzeug's default when not in an app context return super().max_cookie_size
Read-only view of the :data:`MAX_COOKIE_SIZE` config key. See :attr:`~werkzeug.wrappers.Response.max_cookie_size` in Werkzeug's docs.
python
src/flask/wrappers.py
247
[ "self" ]
int
true
2
6.56
pallets/flask
70,946
unknown
false
getCollectionElementType
private TypeMirror getCollectionElementType(TypeMirror type) { if (((TypeElement) this.types.asElement(type)).getQualifiedName().contentEquals(Collection.class.getName())) { DeclaredType declaredType = (DeclaredType) type; // raw type, just "Collection" if (declaredType.getTypeArguments().isEmpty()) { return this.types.getDeclaredType(this.env.getElementUtils().getTypeElement(Object.class.getName())); } // return type argument to Collection<...> return declaredType.getTypeArguments().get(0); } // recursively walk the supertypes, looking for Collection<...> for (TypeMirror superType : this.env.getTypeUtils().directSupertypes(type)) { if (this.types.isAssignable(superType, this.collectionType)) { return getCollectionElementType(superType); } } return null; }
Extract the target element type from the specified container type or {@code null} if no element type was found. @param type a type, potentially wrapping an element type @return the element type or {@code null} if no specific type was found
java
configuration-metadata/spring-boot-configuration-processor/src/main/java/org/springframework/boot/configurationprocessor/TypeUtils.java
154
[ "type" ]
TypeMirror
true
4
8.24
spring-projects/spring-boot
79,428
javadoc
false
asarray
def asarray(a, dtype=None, order=None): """ Convert the input to a masked array of the given data-type. No copy is performed if the input is already an `ndarray`. If `a` is a subclass of `MaskedArray`, a base class `MaskedArray` is returned. Parameters ---------- a : array_like Input data, in any form that can be converted to a masked array. This includes lists, lists of tuples, tuples, tuples of tuples, tuples of lists, ndarrays and masked arrays. dtype : dtype, optional By default, the data-type is inferred from the input data. order : {'C', 'F'}, optional Whether to use row-major ('C') or column-major ('FORTRAN') memory representation. Default is 'C'. Returns ------- out : MaskedArray Masked array interpretation of `a`. See Also -------- asanyarray : Similar to `asarray`, but conserves subclasses. Examples -------- >>> import numpy as np >>> x = np.arange(10.).reshape(2, 5) >>> x array([[0., 1., 2., 3., 4.], [5., 6., 7., 8., 9.]]) >>> np.ma.asarray(x) masked_array( data=[[0., 1., 2., 3., 4.], [5., 6., 7., 8., 9.]], mask=False, fill_value=1e+20) >>> type(np.ma.asarray(x)) <class 'numpy.ma.MaskedArray'> """ order = order or 'C' return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=False, order=order)
Convert the input to a masked array of the given data-type. No copy is performed if the input is already an `ndarray`. If `a` is a subclass of `MaskedArray`, a base class `MaskedArray` is returned. Parameters ---------- a : array_like Input data, in any form that can be converted to a masked array. This includes lists, lists of tuples, tuples, tuples of tuples, tuples of lists, ndarrays and masked arrays. dtype : dtype, optional By default, the data-type is inferred from the input data. order : {'C', 'F'}, optional Whether to use row-major ('C') or column-major ('FORTRAN') memory representation. Default is 'C'. Returns ------- out : MaskedArray Masked array interpretation of `a`. See Also -------- asanyarray : Similar to `asarray`, but conserves subclasses. Examples -------- >>> import numpy as np >>> x = np.arange(10.).reshape(2, 5) >>> x array([[0., 1., 2., 3., 4.], [5., 6., 7., 8., 9.]]) >>> np.ma.asarray(x) masked_array( data=[[0., 1., 2., 3., 4.], [5., 6., 7., 8., 9.]], mask=False, fill_value=1e+20) >>> type(np.ma.asarray(x)) <class 'numpy.ma.MaskedArray'>
python
numpy/ma/core.py
8,555
[ "a", "dtype", "order" ]
false
2
7.68
numpy/numpy
31,054
numpy
false
_maybe_adjust_name
def _maybe_adjust_name(name: str, version: Sequence[int]) -> str: """ Prior to 0.10.1, we named values blocks like: values_block_0 and the name values_0, adjust the given name if necessary. Parameters ---------- name : str version : Tuple[int, int, int] Returns ------- str """ if isinstance(version, str) or len(version) < 3: raise ValueError("Version is incorrect, expected sequence of 3 integers.") if version[0] == 0 and version[1] <= 10 and version[2] == 0: m = re.search(r"values_block_(\d+)", name) if m: grp = m.groups()[0] name = f"values_{grp}" return name
Prior to 0.10.1, we named values blocks like: values_block_0 and the name values_0, adjust the given name if necessary. Parameters ---------- name : str version : Tuple[int, int, int] Returns ------- str
python
pandas/io/pytables.py
5,391
[ "name", "version" ]
str
true
7
7.04
pandas-dev/pandas
47,362
numpy
false
dot
def dot(lhs: Any, rhs: Any, sum_dims: Any) -> Union[_Tensor, torch.Tensor]: """ Perform dot product between two tensors along specified dimensions. Args: lhs: Left-hand side tensor rhs: Right-hand side tensor sum_dims: Dimensions to sum over (contract) Returns: Result of dot product """ # Get tensor info lhs_info = TensorInfo.create(lhs, ensure_batched=False, ensure_present=False) rhs_info = TensorInfo.create(rhs, ensure_batched=False, ensure_present=False) if not (lhs_info and rhs_info): # Fall back to regular operations return torch.matmul(lhs, rhs) assert lhs_info.tensor is not None and rhs_info.tensor is not None, ( "Cannot perform dot product on None tensors" ) lhs_strides = lhs_info.tensor.stride() rhs_strides = rhs_info.tensor.stride() # Create dot parts for different dimension categories lro_dims = DotPart() # Left-right-output (batch dims) lo_dims = DotPart() # Left-output only ro_dims = DotPart() # Right-output only lr_dims = DotPart() # Left-right (contracted dims) def insert_dim(d: Any, lhs_idx: Any, rhs_idx: Any) -> None: """Insert dimension into appropriate part based on stride pattern.""" reduced = d in sum_dims lhs_stride = lhs_strides[lhs_idx] if lhs_idx is not None else 0 rhs_stride = rhs_strides[rhs_idx] if rhs_idx is not None else 0 if reduced: lr_dims.append(d) else: if (lhs_stride == 0) == (rhs_stride == 0): lro_dims.append(d) # Both have or both lack this dim elif lhs_stride != 0: lo_dims.append(d) # Only lhs has this dim else: ro_dims.append(d) # Only rhs has this dim # Track which rhs dimensions we've seen rhs_seen = [False] * len(rhs_info.levels) # Process lhs dimensions for i, lhs_level in enumerate(lhs_info.levels): rhs_idx = None for j, rhs_level in enumerate(rhs_info.levels): if lhs_level == rhs_level: rhs_idx = j rhs_seen[j] = True break insert_dim(lhs_level, i, rhs_idx) # Process remaining rhs dimensions for i, rhs_level in enumerate(rhs_info.levels): if not rhs_seen[i]: insert_dim(rhs_level, None, i) # Validate sum dimensions exist if len(lr_dims.dims) != len(sum_dims): for d in sum_dims: if d not in lhs_info.levels and d not in rhs_info.levels: raise ValueError(f"summing over non-existent dimension {d}") # Prepare tensors and perform matrix multiplication if len(lro_dims.dims) != 0: # Batched matrix multiply lhs_tensor = dot_prepare([lro_dims, lo_dims, lr_dims], lhs_info) rhs_tensor = dot_prepare([lro_dims, lr_dims, ro_dims], rhs_info) result = torch.bmm(lhs_tensor, rhs_tensor) return dot_finish([lro_dims, lo_dims, ro_dims], result) else: # Regular matrix multiply lhs_tensor = dot_prepare([lo_dims, lr_dims], lhs_info) rhs_tensor = dot_prepare([lr_dims, ro_dims], rhs_info) result = torch.mm(lhs_tensor, rhs_tensor) return dot_finish([lo_dims, ro_dims], result)
Perform dot product between two tensors along specified dimensions. Args: lhs: Left-hand side tensor rhs: Right-hand side tensor sum_dims: Dimensions to sum over (contract) Returns: Result of dot product
python
functorch/dim/__init__.py
1,399
[ "lhs", "rhs", "sum_dims" ]
Union[_Tensor, torch.Tensor]
true
22
6.32
pytorch/pytorch
96,034
google
false
heartbeatIntervalForResponse
public abstract long heartbeatIntervalForResponse(R response);
Returns the heartbeat interval for the response. @param response The heartbeat response @return The heartbeat interval
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractHeartbeatRequestManager.java
523
[ "response" ]
true
1
6.32
apache/kafka
31,560
javadoc
false
_discover_hooks_from_connection_types
def _discover_hooks_from_connection_types( self, hook_class_names_registered: set[str], already_registered_warning_connection_types: set[str], package_name: str, provider: ProviderInfo, ): """ Discover hooks from the "connection-types" property. This is new, better method that replaces discovery from hook-class-names as it allows to lazy import individual Hook classes when they are accessed. The "connection-types" keeps information about both - connection type and class name so we can discover all connection-types without importing the classes. :param hook_class_names_registered: set of registered hook class names for this provider :param already_registered_warning_connection_types: set of connections for which warning should be printed in logs as they were already registered before :param package_name: :param provider: :return: """ provider_uses_connection_types = False connection_types = provider.data.get("connection-types") if connection_types: for connection_type_dict in connection_types: connection_type = connection_type_dict["connection-type"] hook_class_name = connection_type_dict["hook-class-name"] hook_class_names_registered.add(hook_class_name) already_registered = self._hook_provider_dict.get(connection_type) if already_registered: if already_registered.package_name != package_name: already_registered_warning_connection_types.add(connection_type) else: log.warning( "The connection type '%s' is already registered in the" " package '%s' with different class names: '%s' and '%s'. ", connection_type, package_name, already_registered.hook_class_name, hook_class_name, ) else: self._hook_provider_dict[connection_type] = HookClassProvider( hook_class_name=hook_class_name, package_name=package_name ) # Defer importing hook to access time by setting import hook method as dict value self._hooks_lazy_dict[connection_type] = functools.partial( self._import_hook, connection_type=connection_type, provider_info=provider, ) provider_uses_connection_types = True return provider_uses_connection_types
Discover hooks from the "connection-types" property. This is new, better method that replaces discovery from hook-class-names as it allows to lazy import individual Hook classes when they are accessed. The "connection-types" keeps information about both - connection type and class name so we can discover all connection-types without importing the classes. :param hook_class_names_registered: set of registered hook class names for this provider :param already_registered_warning_connection_types: set of connections for which warning should be printed in logs as they were already registered before :param package_name: :param provider: :return:
python
airflow-core/src/airflow/providers_manager.py
613
[ "self", "hook_class_names_registered", "already_registered_warning_connection_types", "package_name", "provider" ]
true
7
7.52
apache/airflow
43,597
sphinx
false
newTreeMap
@SuppressWarnings("NonApiType") // acts as a direct substitute for a constructor call public static <K extends @Nullable Object, V extends @Nullable Object> TreeMap<K, V> newTreeMap( SortedMap<K, ? extends V> map) { return new TreeMap<>(map); }
Creates a <i>mutable</i> {@code TreeMap} instance with the same mappings as the specified map and using the same ordering as the specified map. <p><b>Note:</b> if mutability is not required, use {@link ImmutableSortedMap#copyOfSorted(SortedMap)} instead. <p><b>Note:</b> this method is now unnecessary and should be treated as deprecated. Instead, use the {@code TreeMap} constructor directly, taking advantage of <a href="https://docs.oracle.com/javase/tutorial/java/generics/genTypeInference.html#type-inference-instantiation">"diamond" syntax</a>. @param map the sorted map whose mappings are to be placed in the new map and whose comparator is to be used to sort the new map @return a new {@code TreeMap} initialized with the mappings from {@code map} and using the comparator of {@code map}
java
android/guava/src/com/google/common/collect/Maps.java
384
[ "map" ]
true
1
6.24
google/guava
51,352
javadoc
false
is_number
def is_number(obj: object) -> TypeGuard[Number | np.number]: """ Check if the object is a number. Returns True when the object is a number, and False if is not. Parameters ---------- obj : any type The object to check if is a number. Returns ------- bool Whether `obj` is a number or not. See Also -------- api.types.is_integer: Checks a subgroup of numbers. Examples -------- >>> from pandas.api.types import is_number >>> is_number(1) True >>> is_number(7.15) True Booleans are valid because they are int subclass. >>> is_number(False) True >>> is_number("foo") False >>> is_number("5") False """ return isinstance(obj, (Number, np.number))
Check if the object is a number. Returns True when the object is a number, and False if is not. Parameters ---------- obj : any type The object to check if is a number. Returns ------- bool Whether `obj` is a number or not. See Also -------- api.types.is_integer: Checks a subgroup of numbers. Examples -------- >>> from pandas.api.types import is_number >>> is_number(1) True >>> is_number(7.15) True Booleans are valid because they are int subclass. >>> is_number(False) True >>> is_number("foo") False >>> is_number("5") False
python
pandas/core/dtypes/inference.py
40
[ "obj" ]
TypeGuard[Number | np.number]
true
1
7.28
pandas-dev/pandas
47,362
numpy
false
get
private static ResourceLoader get(ResourceLoader resourceLoader, SpringFactoriesLoader springFactoriesLoader, boolean preferFileResolution) { Assert.notNull(resourceLoader, "'resourceLoader' must not be null"); Assert.notNull(springFactoriesLoader, "'springFactoriesLoader' must not be null"); List<ProtocolResolver> protocolResolvers = springFactoriesLoader.load(ProtocolResolver.class); List<FilePathResolver> filePathResolvers = (preferFileResolution) ? springFactoriesLoader.load(FilePathResolver.class) : Collections.emptyList(); return new ProtocolResolvingResourceLoader(resourceLoader, protocolResolvers, filePathResolvers); }
Return a {@link ResourceLoader} delegating to the given resource loader and supporting additional {@link ProtocolResolver ProtocolResolvers} registered in {@code spring.factories}. @param resourceLoader the delegate resource loader @param springFactoriesLoader the {@link SpringFactoriesLoader} used to load {@link ProtocolResolver ProtocolResolvers} @return a {@link ResourceLoader} instance @since 3.4.0
java
core/spring-boot/src/main/java/org/springframework/boot/io/ApplicationResourceLoader.java
159
[ "resourceLoader", "springFactoriesLoader", "preferFileResolution" ]
ResourceLoader
true
2
7.28
spring-projects/spring-boot
79,428
javadoc
false
withTimeout
public CloseOptions withTimeout(final Duration timeout) { this.timeout = Optional.ofNullable(timeout); return this; }
Fluent method to set the timeout for the close process. @param timeout the maximum time to wait for the consumer to close. If {@code null}, the default timeout will be used. @return this {@code CloseOptions} instance.
java
clients/src/main/java/org/apache/kafka/clients/consumer/CloseOptions.java
89
[ "timeout" ]
CloseOptions
true
1
6.64
apache/kafka
31,560
javadoc
false
toString
@Override public String toString() { StringBuilder bld = new StringBuilder("TAGGED_FIELDS_TYPE_NAME("); String prefix = ""; for (Map.Entry<Integer, Field> field : fields.entrySet()) { bld.append(prefix); prefix = ", "; bld.append(field.getKey()).append(" -> ").append(field.getValue().toString()); } bld.append(")"); return bld.toString(); }
Create a new TaggedFields object with the given tags and fields. @param fields This is an array containing Integer tags followed by associated Field objects. @return The new {@link TaggedFields}
java
clients/src/main/java/org/apache/kafka/common/protocol/types/TaggedFields.java
134
[]
String
true
1
6.72
apache/kafka
31,560
javadoc
false
isASCIINumber
function isASCIINumber (value) { if (value.length === 0) return false for (let i = 0; i < value.length; i++) { if (value.charCodeAt(i) < 0x30 || value.charCodeAt(i) > 0x39) return false } return true }
Checks if the given value is a base 10 digit. @param {string} value @returns {boolean}
javascript
deps/undici/src/lib/web/eventsource/util.js
18
[ "value" ]
false
5
6.24
nodejs/node
114,839
jsdoc
false
bucket_fsdp_all_gather
def bucket_fsdp_all_gather( gm: torch.fx.GraphModule, bucket_cap_mb_by_bucket_idx: Callable[[int], float] | None = None, mode: BucketMode = "default", ) -> None: """ Bucketing pass for SimpleFSDP all_gather ops. Attributes: gm (torch.fx.GraphModule): Graph module of the graph. bucket_cap_mb_by_bucket_idx (Callable[[int], float] | None): callback function that takes in bucket id and returns size of a bucket in megabytes. """ if bucket_cap_mb_by_bucket_idx is None: from torch._inductor.fx_passes.bucketing import ( bucket_cap_mb_by_bucket_idx_default, ) bucket_cap_mb_by_bucket_idx = bucket_cap_mb_by_bucket_idx_default assert bucket_cap_mb_by_bucket_idx is not None ag_buckets = bucket_all_gather_by_mb( gm, bucket_cap_mb_by_bucket_idx, filter_wait_node=is_fsdp_all_gather_wait, ) if len(ag_buckets) == 0: return merge_all_gather(gm, ag_buckets, mode)
Bucketing pass for SimpleFSDP all_gather ops. Attributes: gm (torch.fx.GraphModule): Graph module of the graph. bucket_cap_mb_by_bucket_idx (Callable[[int], float] | None): callback function that takes in bucket id and returns size of a bucket in megabytes.
python
torch/_inductor/fx_passes/fsdp.py
57
[ "gm", "bucket_cap_mb_by_bucket_idx", "mode" ]
None
true
3
6.4
pytorch/pytorch
96,034
unknown
false
allow_in_pre_dispatch_graph
def allow_in_pre_dispatch_graph(func): """ Experimental decorator that adds user function to export pre-dispatch graph. Note that we only support custom autograd function/subclass constructors today. To use this function: 1. For subclasses: 1. refer to instructions in mark_subclass_constructor_exportable_experimental 2. Define apply method on your custom autograd function and apply this decorator. Example: class MyCoolCustomAutogradFunc(autograd.Function): @classmethod @torch._export.wrappers.allow_in_pre_dispatch_graph def apply(cls, *args, **kwargs): return super(MyCoolCustomAutogradFunc, cls).apply(*args, **kwargs) """ if _is_init(func): return mark_subclass_constructor_exportable_experimental(func) if not (_is_init(func) or func.__name__ == "apply"): raise RuntimeError( f"torch._export.wrappers.allow_in_pre_dispatch_graph can only be applied on subclass tensor.__init_ " f"or custom_autograd_function.apply. " f"But, you are adding it on {func.__name__} which is not supported. " f"If __init__ doesn't exist on your subclass, please add it. Look at DTensor.__init__ implementation for example. " f"If you are adding it on custom autograd function, please add it on apply method. " f"If anything else, file an issue on github and we may consider extending our support. " ) @wraps(func) def wrapper(*args, **kwargs): if not torch.compiler.is_exporting(): return func(*args, **kwargs) if not inspect.isclass(args[0]): return func(*args, **kwargs) if not issubclass(args[0], torch.autograd.Function): return func(*args, **kwargs) from torch._ops import _get_dispatch_mode_pre_dispatch mode = _get_dispatch_mode_pre_dispatch(torch._C._TorchDispatchModeKey.PROXY) if mode is None: return func(*args, **kwargs) # Sometimes custom autograd functions can call into HOPs that don't have proxy impl # at PreDispatch level, so we just dispatch it below to get the concrete result. include_to_set = torch._C._dispatch_tls_local_include_set().remove( torch._C.DispatchKey.PreDispatch ) exclude_to_set = ( torch._C._dispatch_tls_local_exclude_set() | torch._C.DispatchKeySet(torch._C.DispatchKey.PreDispatch) ) with torch._C._ForceDispatchKeyGuard(include_to_set, exclude_to_set): out = func(*args, **kwargs) assert mode.pre_dispatch, "Should only do this in predispatch" tracer = mode.tracer function_cls_name = f"{args[0].__module__}.{args[0].__qualname__}" graphable = ((function_cls_name, *args[1:]), kwargs) from torch.export.custom_ops import ( _call_custom_autograd_function_in_pre_dispatch, ) spec_name = "_".join(function_cls_name.split(".")) call_spec_cache_key = type( _call_custom_autograd_function_in_pre_dispatch ).__name__.lower() _emit_flat_apply_call( tracer=tracer, spec_name=spec_name, const_target_for_apply=_call_custom_autograd_function_in_pre_dispatch, graphable_args=graphable, track_value=out, call_spec_cache_key=call_spec_cache_key, ) return out return wrapper
Experimental decorator that adds user function to export pre-dispatch graph. Note that we only support custom autograd function/subclass constructors today. To use this function: 1. For subclasses: 1. refer to instructions in mark_subclass_constructor_exportable_experimental 2. Define apply method on your custom autograd function and apply this decorator. Example: class MyCoolCustomAutogradFunc(autograd.Function): @classmethod @torch._export.wrappers.allow_in_pre_dispatch_graph def apply(cls, *args, **kwargs): return super(MyCoolCustomAutogradFunc, cls).apply(*args, **kwargs)
python
torch/_export/wrappers.py
254
[ "func" ]
false
8
6.96
pytorch/pytorch
96,034
unknown
false
asConfigurationPropertyName
private ConfigurationPropertyName asConfigurationPropertyName(ConfigurationMetadataProperty metadataProperty) { return ConfigurationPropertyName.isValid(metadataProperty.getId()) ? ConfigurationPropertyName.of(metadataProperty.getId()) : ConfigurationPropertyName.adapt(metadataProperty.getId(), '.'); }
Analyse the {@link ConfigurableEnvironment environment} and attempt to rename legacy properties if a replacement exists. @return a report of the migration
java
core/spring-boot-properties-migrator/src/main/java/org/springframework/boot/context/properties/migrator/PropertiesMigrationReporter.java
134
[ "metadataProperty" ]
ConfigurationPropertyName
true
2
6.08
spring-projects/spring-boot
79,428
javadoc
false
_validate_apply_axis_arg
def _validate_apply_axis_arg( arg: NDFrame | Sequence | np.ndarray, arg_name: str, dtype: Any | None, data: NDFrame, ) -> np.ndarray: """ For the apply-type methods, ``axis=None`` creates ``data`` as DataFrame, and for ``axis=[1,0]`` it creates a Series. Where ``arg`` is expected as an element of some operator with ``data`` we must make sure that the two are compatible shapes, or raise. Parameters ---------- arg : sequence, Series or DataFrame the user input arg arg_name : string name of the arg for use in error messages dtype : numpy dtype, optional forced numpy dtype if given data : Series or DataFrame underling subset of Styler data on which operations are performed Returns ------- ndarray """ dtype = {"dtype": dtype} if dtype else {} # raise if input is wrong for axis: if isinstance(arg, Series) and isinstance(data, DataFrame): raise ValueError( f"'{arg_name}' is a Series but underlying data for operations " f"is a DataFrame since 'axis=None'" ) if isinstance(arg, DataFrame) and isinstance(data, Series): raise ValueError( f"'{arg_name}' is a DataFrame but underlying data for " f"operations is a Series with 'axis in [0,1]'" ) if isinstance(arg, (Series, DataFrame)): # align indx / cols to data arg = arg.reindex_like(data).to_numpy(**dtype) else: arg = np.asarray(arg, **dtype) assert isinstance(arg, np.ndarray) # mypy requirement if arg.shape != data.shape: # check valid input raise ValueError( f"supplied '{arg_name}' is not correct shape for data over " f"selected 'axis': got {arg.shape}, " f"expected {data.shape}" ) return arg
For the apply-type methods, ``axis=None`` creates ``data`` as DataFrame, and for ``axis=[1,0]`` it creates a Series. Where ``arg`` is expected as an element of some operator with ``data`` we must make sure that the two are compatible shapes, or raise. Parameters ---------- arg : sequence, Series or DataFrame the user input arg arg_name : string name of the arg for use in error messages dtype : numpy dtype, optional forced numpy dtype if given data : Series or DataFrame underling subset of Styler data on which operations are performed Returns ------- ndarray
python
pandas/io/formats/style.py
3,882
[ "arg", "arg_name", "dtype", "data" ]
np.ndarray
true
9
6.72
pandas-dev/pandas
47,362
numpy
false
add
def add(self, *, caller, callee): """Add a method mapping. Parameters ---------- caller : str Parent estimator's method name in which the ``callee`` is called. callee : str Child object's method name. This method is called in ``caller``. Returns ------- self : MethodMapping Returns self. """ if caller not in METHODS: raise ValueError( f"Given caller:{caller} is not a valid method. Valid methods are:" f" {METHODS}" ) if callee not in METHODS: raise ValueError( f"Given callee:{callee} is not a valid method. Valid methods are:" f" {METHODS}" ) self._routes.append(MethodPair(caller=caller, callee=callee)) return self
Add a method mapping. Parameters ---------- caller : str Parent estimator's method name in which the ``callee`` is called. callee : str Child object's method name. This method is called in ``caller``. Returns ------- self : MethodMapping Returns self.
python
sklearn/utils/_metadata_requests.py
775
[ "self", "caller", "callee" ]
false
3
6.08
scikit-learn/scikit-learn
64,340
numpy
false
hangup
public ExitStatus hangup() { return new ExitStatus(this.code, this.name, true); }
Convert the existing code to a hangup. @return a new ExitStatus with hangup=true
java
cli/spring-boot-cli/src/main/java/org/springframework/boot/cli/command/status/ExitStatus.java
95
[]
ExitStatus
true
1
6.48
spring-projects/spring-boot
79,428
javadoc
false
fnv32_BROKEN
constexpr uint32_t fnv32_BROKEN( const char* buf, uint32_t hash = fnv32_hash_start) noexcept { for (; *buf; ++buf) { hash = fnv32_append_byte_BROKEN(hash, static_cast<uint8_t>(*buf)); } return hash; }
FNV hash of a c-str. Continues hashing until a null byte is reached. @param hash The initial hash seed. @methodset fnv
cpp
folly/hash/FnvHash.h
101
[]
true
2
7.04
facebook/folly
30,157
doxygen
false
verifyFullFetchResponsePartitions
String verifyFullFetchResponsePartitions(Set<TopicPartition> topicPartitions, Set<Uuid> ids, short version) { StringBuilder bld = new StringBuilder(); Set<TopicPartition> extra = findMissing(topicPartitions, sessionPartitions.keySet()); Set<TopicPartition> omitted = findMissing(sessionPartitions.keySet(), topicPartitions); Set<Uuid> extraIds = new HashSet<>(); if (version >= 13) { extraIds = findMissing(ids, sessionTopicNames.keySet()); } if (!omitted.isEmpty()) { bld.append("omittedPartitions=(").append(omitted.stream().map(TopicPartition::toString).collect(Collectors.joining(", "))).append("), "); } if (!extra.isEmpty()) { bld.append("extraPartitions=(").append(extra.stream().map(TopicPartition::toString).collect(Collectors.joining(", "))).append("), "); } if (!extraIds.isEmpty()) { bld.append("extraIds=(").append(extraIds.stream().map(Uuid::toString).collect(Collectors.joining(", "))).append("), "); } if ((!omitted.isEmpty()) || (!extra.isEmpty()) || (!extraIds.isEmpty())) { bld.append("response=(").append(topicPartitions.stream().map(TopicPartition::toString).collect(Collectors.joining(", "))).append(")"); return bld.toString(); } return null; }
Verify that a full fetch response contains all the partitions in the fetch session. @param topicPartitions The topicPartitions from the FetchResponse. @param ids The topic IDs from the FetchResponse. @param version The version of the FetchResponse. @return null if the full fetch response partitions are valid; human-readable problem description otherwise.
java
clients/src/main/java/org/apache/kafka/clients/FetchSessionHandler.java
431
[ "topicPartitions", "ids", "version" ]
String
true
8
7.6
apache/kafka
31,560
javadoc
false
throwableMembers
private static void throwableMembers(Members<ILoggingEvent> members, Extractor extractor) { members.add("full_message", extractor::messageAndStackTrace); members.add("_error_type", ILoggingEvent::getThrowableProxy).as(IThrowableProxy::getClassName); members.add("_error_stack_trace", extractor::stackTrace); members.add("_error_message", ILoggingEvent::getThrowableProxy).as(IThrowableProxy::getMessage); }
GELF requires "seconds since UNIX epoch with optional <b>decimal places for milliseconds</b>". To comply with this requirement, we format a POSIX timestamp with millisecond precision as e.g. "1725459730385" -> "1725459730.385" @param timeStamp the timestamp of the log message @return the timestamp formatted as string with millisecond precision
java
core/spring-boot/src/main/java/org/springframework/boot/logging/logback/GraylogExtendedLogFormatStructuredLogFormatter.java
127
[ "members", "extractor" ]
void
true
1
6.08
spring-projects/spring-boot
79,428
javadoc
false
describeClassicGroups
DescribeClassicGroupsResult describeClassicGroups(Collection<String> groupIds, DescribeClassicGroupsOptions options);
Describe some classic groups in the cluster. @param groupIds The IDs of the groups to describe. @param options The options to use when describing the groups. @return The DescribeClassicGroupsResult.
java
clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
2,067
[ "groupIds", "options" ]
DescribeClassicGroupsResult
true
1
6.48
apache/kafka
31,560
javadoc
false
preferredReadReplica
public static Optional<Integer> preferredReadReplica(FetchResponseData.PartitionData partitionResponse) { return partitionResponse.preferredReadReplica() == INVALID_PREFERRED_REPLICA_ID ? Optional.empty() : Optional.of(partitionResponse.preferredReadReplica()); }
Convenience method to find the size of a response. @param version The version of the response to use. @param partIterator The partition iterator. @return The response size in bytes.
java
clients/src/main/java/org/apache/kafka/common/requests/FetchResponse.java
188
[ "partitionResponse" ]
true
2
8
apache/kafka
31,560
javadoc
false
createDefaultBeanWiringInfoResolver
protected @Nullable BeanWiringInfoResolver createDefaultBeanWiringInfoResolver() { return new ClassNameBeanWiringInfoResolver(); }
Create the default BeanWiringInfoResolver to be used if none was specified explicitly. <p>The default implementation builds a {@link ClassNameBeanWiringInfoResolver}. @return the default BeanWiringInfoResolver (never {@code null})
java
spring-beans/src/main/java/org/springframework/beans/factory/wiring/BeanConfigurerSupport.java
93
[]
BeanWiringInfoResolver
true
1
6
spring-projects/spring-framework
59,386
javadoc
false
_check_for_bom
def _check_for_bom(self, first_row: list[Scalar]) -> list[Scalar]: """ Checks whether the file begins with the BOM character. If it does, remove it. In addition, if there is quoting in the field subsequent to the BOM, remove it as well because it technically takes place at the beginning of the name, not the middle of it. """ # first_row will be a list, so we need to check # that that list is not empty before proceeding. if not first_row: return first_row # The first element of this row is the one that could have the # BOM that we want to remove. Check that the first element is a # string before proceeding. if not isinstance(first_row[0], str): return first_row # Check that the string is not empty, as that would # obviously not have a BOM at the start of it. if not first_row[0]: return first_row # Since the string is non-empty, check that it does # in fact begin with a BOM. first_elt = first_row[0][0] if first_elt != _BOM: return first_row first_row_bom = first_row[0] new_row: str if len(first_row_bom) > 1 and first_row_bom[1] == self.quotechar: start = 2 quote = first_row_bom[1] end = first_row_bom[2:].index(quote) + 2 # Extract the data between the quotation marks new_row = first_row_bom[start:end] # Extract any remaining data after the second # quotation mark. if len(first_row_bom) > end + 1: new_row += first_row_bom[end + 1 :] else: # No quotation so just remove BOM from first element new_row = first_row_bom[1:] new_row_list: list[Scalar] = [new_row] return new_row_list + first_row[1:]
Checks whether the file begins with the BOM character. If it does, remove it. In addition, if there is quoting in the field subsequent to the BOM, remove it as well because it technically takes place at the beginning of the name, not the middle of it.
python
pandas/io/parsers/python_parser.py
824
[ "self", "first_row" ]
list[Scalar]
true
9
6
pandas-dev/pandas
47,362
unknown
false
get
def get(self: Self, key: Key) -> Value | None: """ Retrieve a value from the cache. Args: key (Key): The key to look up. Returns: Value | None: The cached value if present, else None. """
Retrieve a value from the cache. Args: key (Key): The key to look up. Returns: Value | None: The cached value if present, else None.
python
torch/_inductor/cache.py
43
[ "self", "key" ]
Value | None
true
1
6.56
pytorch/pytorch
96,034
google
false
table_exists
def table_exists(self, table: str) -> bool: """ Check if a table exists in Cassandra. :param table: Target Cassandra table. Use dot notation to target a specific keyspace. """ keyspace = self.keyspace if "." in table: keyspace, table = table.split(".", 1) cluster_metadata = self.get_conn().cluster.metadata return keyspace in cluster_metadata.keyspaces and table in cluster_metadata.keyspaces[keyspace].tables
Check if a table exists in Cassandra. :param table: Target Cassandra table. Use dot notation to target a specific keyspace.
python
providers/apache/cassandra/src/airflow/providers/apache/cassandra/hooks/cassandra.py
176
[ "self", "table" ]
bool
true
3
6.72
apache/airflow
43,597
sphinx
false
insertCaptureThisForNodeIfNeeded
function insertCaptureThisForNodeIfNeeded(statements: Statement[], node: Node): boolean { if (hierarchyFacts & HierarchyFacts.CapturedLexicalThis && node.kind !== SyntaxKind.ArrowFunction) { insertCaptureThisForNode(statements, node, factory.createThis()); return true; } return false; }
Adds a statement to capture the `this` of a function declaration if it is needed. NOTE: This must be executed *after* the subtree has been visited. @param statements The statements for the new function body. @param node A node.
typescript
src/compiler/transformers/es2015.ts
2,151
[ "statements", "node" ]
true
3
6.88
microsoft/TypeScript
107,154
jsdoc
false
immutableEnumMap
public static <K extends Enum<K>, V> ImmutableMap<K, V> immutableEnumMap( Map<K, ? extends V> map) { if (map instanceof ImmutableEnumMap) { @SuppressWarnings("unchecked") // safe covariant cast ImmutableEnumMap<K, V> result = (ImmutableEnumMap<K, V>) map; return result; } Iterator<? extends Entry<K, ? extends V>> entryItr = map.entrySet().iterator(); if (!entryItr.hasNext()) { return ImmutableMap.of(); } Entry<K, ? extends V> entry1 = entryItr.next(); K key1 = entry1.getKey(); V value1 = entry1.getValue(); checkEntryNotNull(key1, value1); // Do something that works for j2cl, where we can't call getDeclaredClass(): EnumMap<K, V> enumMap = new EnumMap<>(singletonMap(key1, value1)); while (entryItr.hasNext()) { Entry<K, ? extends V> entry = entryItr.next(); K key = entry.getKey(); V value = entry.getValue(); checkEntryNotNull(key, value); enumMap.put(key, value); } return ImmutableEnumMap.asImmutable(enumMap); }
Returns an immutable map instance containing the given entries. Internally, the returned map will be backed by an {@link EnumMap}. <p>The iteration order of the returned map follows the enum's iteration order, not the order in which the elements appear in the given map. @param map the map to make an immutable copy of @return an immutable map containing those entries @since 14.0
java
android/guava/src/com/google/common/collect/Maps.java
127
[ "map" ]
true
4
8.08
google/guava
51,352
javadoc
false
sensor
public Sensor sensor(String name) { return this.sensor(name, Sensor.RecordingLevel.INFO); }
Get or create a sensor with the given unique name and no parent sensors. This uses a default recording level of INFO. @param name The sensor name @return The sensor
java
clients/src/main/java/org/apache/kafka/common/metrics/Metrics.java
325
[ "name" ]
Sensor
true
1
6.96
apache/kafka
31,560
javadoc
false
reverse
public static void reverse(TDigestIntArray order, int offset, int length) { for (int i = 0; i < length / 2; i++) { int t = order.get(offset + i); order.set(offset + i, order.get(offset + length - i - 1)); order.set(offset + length - i - 1, t); } }
Reverses part of an array. @param order The array containing the data to reverse. @param offset Where to start reversing. @param length How many elements to reverse
java
libs/tdigest/src/main/java/org/elasticsearch/tdigest/Sort.java
190
[ "order", "offset", "length" ]
void
true
2
7.04
elastic/elasticsearch
75,680
javadoc
false
withPrefix
default ConfigurationPropertySource withPrefix(@Nullable String prefix) { return (StringUtils.hasText(prefix)) ? new PrefixedConfigurationPropertySource(this, prefix) : this; }
Return a variant of this source that supports a prefix. @param prefix the prefix for properties in the source @return a {@link ConfigurationPropertySource} instance supporting a prefix @since 2.5.0
java
core/spring-boot/src/main/java/org/springframework/boot/context/properties/source/ConfigurationPropertySource.java
86
[ "prefix" ]
ConfigurationPropertySource
true
2
7.84
spring-projects/spring-boot
79,428
javadoc
false
_lstsq
def _lstsq(X, y, indices, fit_intercept): """Least Squares Estimator for TheilSenRegressor class. This function calculates the least squares method on a subset of rows of X and y defined by the indices array. Optionally, an intercept column is added if intercept is set to true. Parameters ---------- X : array-like of shape (n_samples, n_features) Design matrix, where `n_samples` is the number of samples and `n_features` is the number of features. y : ndarray of shape (n_samples,) Target vector, where `n_samples` is the number of samples. indices : ndarray of shape (n_subpopulation, n_subsamples) Indices of all subsamples with respect to the chosen subpopulation. fit_intercept : bool Fit intercept or not. Returns ------- weights : ndarray of shape (n_subpopulation, n_features + intercept) Solution matrix of n_subpopulation solved least square problems. """ fit_intercept = int(fit_intercept) n_features = X.shape[1] + fit_intercept n_subsamples = indices.shape[1] weights = np.empty((indices.shape[0], n_features)) X_subpopulation = np.ones((n_subsamples, n_features)) # gelss need to pad y_subpopulation to be of the max dim of X_subpopulation y_subpopulation = np.zeros((max(n_subsamples, n_features))) (lstsq,) = get_lapack_funcs(("gelss",), (X_subpopulation, y_subpopulation)) for index, subset in enumerate(indices): X_subpopulation[:, fit_intercept:] = X[subset, :] y_subpopulation[:n_subsamples] = y[subset] weights[index] = lstsq(X_subpopulation, y_subpopulation)[1][:n_features] return weights
Least Squares Estimator for TheilSenRegressor class. This function calculates the least squares method on a subset of rows of X and y defined by the indices array. Optionally, an intercept column is added if intercept is set to true. Parameters ---------- X : array-like of shape (n_samples, n_features) Design matrix, where `n_samples` is the number of samples and `n_features` is the number of features. y : ndarray of shape (n_samples,) Target vector, where `n_samples` is the number of samples. indices : ndarray of shape (n_subpopulation, n_subsamples) Indices of all subsamples with respect to the chosen subpopulation. fit_intercept : bool Fit intercept or not. Returns ------- weights : ndarray of shape (n_subpopulation, n_features + intercept) Solution matrix of n_subpopulation solved least square problems.
python
sklearn/linear_model/_theil_sen.py
163
[ "X", "y", "indices", "fit_intercept" ]
false
2
6.08
scikit-learn/scikit-learn
64,340
numpy
false
supportsType
default boolean supportsType(Class<?> type) { Class<?> objectType = getObjectType(); return (objectType != null && type.isAssignableFrom(objectType)); }
Determine whether this factory supports the requested type. <p>By default, this supports the primary type exposed by the factory, as indicated by {@link #getObjectType()}. Specific factories may support additional types for dependency injection. @param type the requested type @return {@code true} if {@link #getObject(Class)} is able to return a corresponding instance, {@code false} otherwise @since 7.0 @see #getObject(Class) @see #getObjectType()
java
spring-beans/src/main/java/org/springframework/beans/factory/SmartFactoryBean.java
85
[ "type" ]
true
2
7.36
spring-projects/spring-framework
59,386
javadoc
false
flatten
private void flatten(@Nullable String prefix, Map<String, Object> result, Map<String, Object> map) { String namePrefix = (prefix != null) ? prefix + "." : ""; map.forEach((key, value) -> extract(namePrefix + key, result, value)); }
Flatten the map keys using period separator. @param map the map that should be flattened @return the flattened map
java
core/spring-boot/src/main/java/org/springframework/boot/support/SpringApplicationJsonEnvironmentPostProcessor.java
125
[ "prefix", "result", "map" ]
void
true
2
8.16
spring-projects/spring-boot
79,428
javadoc
false
_has_same_id_matched_objs
def _has_same_id_matched_objs(frame: DynamoFrameType, cache_entry: Any) -> bool: """ Checks if the ID_MATCH'd objects saved on cache_entry are same as the ones in frame.f_locals. """ if not cache_entry: return False for ( local_name, weakref_from_cache_entry, ) in cache_entry.guard_manager.id_matched_objs.items(): if weakref_from_cache_entry() is not None: weakref_from_frame = _get_weakref_from_f_locals(frame, local_name) if weakref_from_frame is not weakref_from_cache_entry: return False # Also covers the case where no ID_MATCH objects are saved in frame.f_locals return True
Checks if the ID_MATCH'd objects saved on cache_entry are same as the ones in frame.f_locals.
python
torch/_dynamo/cache_size.py
114
[ "frame", "cache_entry" ]
bool
true
5
6
pytorch/pytorch
96,034
unknown
false
containsTypeVariables
public static boolean containsTypeVariables(final Type type) { if (type instanceof TypeVariable<?>) { return true; } if (type instanceof Class<?>) { return ((Class<?>) type).getTypeParameters().length > 0; } if (type instanceof ParameterizedType) { for (final Type arg : ((ParameterizedType) type).getActualTypeArguments()) { if (containsTypeVariables(arg)) { return true; } } return false; } if (type instanceof WildcardType) { final WildcardType wild = (WildcardType) type; return containsTypeVariables(getImplicitLowerBounds(wild)[0]) || containsTypeVariables(getImplicitUpperBounds(wild)[0]); } if (type instanceof GenericArrayType) { return containsTypeVariables(((GenericArrayType) type).getGenericComponentType()); } return false; }
Tests, recursively, whether any of the type parameters associated with {@code type} are bound to variables. @param type The type to check for type variables. @return Whether any of the type parameters associated with {@code type} are bound to variables. @since 3.2
java
src/main/java/org/apache/commons/lang3/reflect/TypeUtils.java
373
[ "type" ]
true
8
8.24
apache/commons-lang
2,896
javadoc
false
isin
def isin(self, values) -> Series: """ Whether elements in Series are contained in `values`. Return a boolean Series showing whether each element in the Series matches an element in the passed sequence of `values` exactly. Parameters ---------- values : set or list-like The sequence of values to test. Passing in a single string will raise a ``TypeError``. Instead, turn a single string into a list of one element. Returns ------- Series Series of booleans indicating if each element is in values. Raises ------ TypeError * If `values` is a string See Also -------- DataFrame.isin : Equivalent method on DataFrame. Examples -------- >>> s = pd.Series( ... ["llama", "cow", "llama", "beetle", "llama", "hippo"], name="animal" ... ) >>> s.isin(["cow", "llama"]) 0 True 1 True 2 True 3 False 4 True 5 False Name: animal, dtype: bool To invert the boolean values, use the ``~`` operator: >>> ~s.isin(["cow", "llama"]) 0 False 1 False 2 False 3 True 4 False 5 True Name: animal, dtype: bool Passing a single string as ``s.isin('llama')`` will raise an error. Use a list of one element instead: >>> s.isin(["llama"]) 0 True 1 False 2 True 3 False 4 True 5 False Name: animal, dtype: bool Strings and integers are distinct and are therefore not comparable: >>> pd.Series([1]).isin(["1"]) 0 False dtype: bool >>> pd.Series([1.1]).isin(["1.1"]) 0 False dtype: bool """ result = algorithms.isin(self._values, values) return self._constructor(result, index=self.index, copy=False).__finalize__( self, method="isin" )
Whether elements in Series are contained in `values`. Return a boolean Series showing whether each element in the Series matches an element in the passed sequence of `values` exactly. Parameters ---------- values : set or list-like The sequence of values to test. Passing in a single string will raise a ``TypeError``. Instead, turn a single string into a list of one element. Returns ------- Series Series of booleans indicating if each element is in values. Raises ------ TypeError * If `values` is a string See Also -------- DataFrame.isin : Equivalent method on DataFrame. Examples -------- >>> s = pd.Series( ... ["llama", "cow", "llama", "beetle", "llama", "hippo"], name="animal" ... ) >>> s.isin(["cow", "llama"]) 0 True 1 True 2 True 3 False 4 True 5 False Name: animal, dtype: bool To invert the boolean values, use the ``~`` operator: >>> ~s.isin(["cow", "llama"]) 0 False 1 False 2 False 3 True 4 False 5 True Name: animal, dtype: bool Passing a single string as ``s.isin('llama')`` will raise an error. Use a list of one element instead: >>> s.isin(["llama"]) 0 True 1 False 2 True 3 False 4 True 5 False Name: animal, dtype: bool Strings and integers are distinct and are therefore not comparable: >>> pd.Series([1]).isin(["1"]) 0 False dtype: bool >>> pd.Series([1.1]).isin(["1.1"]) 0 False dtype: bool
python
pandas/core/series.py
5,883
[ "self", "values" ]
Series
true
1
7.2
pandas-dev/pandas
47,362
numpy
false
find_common_type
def find_common_type(types): """ Find a common data type among the given dtypes. Parameters ---------- types : list of dtypes Returns ------- pandas extension or numpy dtype See Also -------- numpy.find_common_type """ if not types: raise ValueError("no types given") first = types[0] # workaround for find_common_type([np.dtype('datetime64[ns]')] * 2) # => object if lib.dtypes_all_equal(list(types)): return first # get unique types (dict.fromkeys is used as order-preserving set()) types = list(dict.fromkeys(types).keys()) if any(isinstance(t, ExtensionDtype) for t in types): for t in types: if isinstance(t, ExtensionDtype): res = t._get_common_dtype(types) if res is not None: return res return np.dtype("object") # take lowest unit if all(lib.is_np_dtype(t, "M") for t in types): return np.dtype(max(types)) if all(lib.is_np_dtype(t, "m") for t in types): return np.dtype(max(types)) # don't mix bool / int or float or complex # this is different from numpy, which casts bool with float/int as int has_bools = any(t.kind == "b" for t in types) if has_bools: for t in types: if t.kind in "iufc": return np.dtype("object") return np_find_common_type(*types)
Find a common data type among the given dtypes. Parameters ---------- types : list of dtypes Returns ------- pandas extension or numpy dtype See Also -------- numpy.find_common_type
python
pandas/core/dtypes/cast.py
1,307
[ "types" ]
false
12
6.24
pandas-dev/pandas
47,362
numpy
false
notNull
@Deprecated public static <T> T notNull(final T object) { return notNull(object, DEFAULT_IS_NULL_EX_MESSAGE); }
Validate that the specified argument is not {@code null}; otherwise throwing an exception. <pre>Validate.notNull(myObject, "The object must not be null");</pre> <p>The message of the exception is &quot;The validated object is null&quot;. @param <T> the object type. @param object the object to check. @return the validated object (never {@code null} for method chaining). @throws NullPointerException if the object is {@code null}. @see #notNull(Object, String, Object...) @deprecated Use {@link Objects#requireNonNull(Object)}.
java
src/main/java/org/apache/commons/lang3/Validate.java
1,041
[ "object" ]
T
true
1
6.32
apache/commons-lang
2,896
javadoc
false
setupInspectorHooks
function setupInspectorHooks() { // If Debugger.setAsyncCallStackDepth is sent during bootstrap, // we cannot immediately call into JS to enable the hooks, which could // interrupt the JS execution of bootstrap. So instead we save the // notification in the inspector agent if it's sent in the middle of // bootstrap, and process the notification later here. if (internalBinding('config').hasInspector) { const { enable, disable, } = require('internal/inspector_async_hook'); internalBinding('inspector').registerAsyncHook(enable, disable); } }
Patch the process object with legacy properties and normalizations. Replace `process.argv[0]` with `process.execPath`, preserving the original `argv[0]` value as `process.argv0`. Replace `process.argv[1]` with the resolved absolute file path of the entry point, if found. @param {boolean} expandArgv1 - Whether to replace `process.argv[1]` with the resolved absolute file path of the main entry point. @returns {string}
javascript
lib/internal/process/pre_execution.js
490
[]
false
2
6.96
nodejs/node
114,839
jsdoc
false
mirror_inductor_external_kernels
def mirror_inductor_external_kernels() -> None: """ Copy external kernels into Inductor so they are importable. """ cuda_is_disabled = not str2bool(os.getenv("USE_CUDA")) paths = [ ( CWD / "torch/_inductor/kernel/vendored_templates/cutedsl_grouped_gemm.py", CWD / "third_party/cutlass/examples/python/CuTeDSL/blackwell/grouped_gemm.py", True, ), ] for new_path, orig_path, allow_missing_if_cuda_is_disabled in paths: # Create the dirs involved in new_path if they don't exist if not new_path.exists(): new_path.parent.mkdir(parents=True, exist_ok=True) # Add `__init__.py` for find_packages to see `new_path.parent` as a submodule (new_path.parent / "__init__.py").touch(exist_ok=True) # Copy the files from the orig location to the new location if orig_path.is_file(): shutil.copyfile(orig_path, new_path) continue if orig_path.is_dir(): if new_path.exists(): # copytree fails if the tree exists already, so remove it. shutil.rmtree(new_path) shutil.copytree(orig_path, new_path) continue if ( not orig_path.exists() and allow_missing_if_cuda_is_disabled and cuda_is_disabled ): continue raise RuntimeError( "Check the file paths in `mirror_inductor_external_kernels()`" )
Copy external kernels into Inductor so they are importable.
python
setup.py
633
[]
None
true
9
6.88
pytorch/pytorch
96,034
unknown
false
_get_expected_output_shape
def _get_expected_output_shape(self) -> list: """Get the expected output shape from iteration variables. Iteration variables are shaped for broadcasting. For 2D outputs: - First var (e.g., y0) gets shape (1, N) - innermost dimension - Second var (e.g., x1) gets shape (M, 1) - outermost dimension The broadcast result is (M, N). """ # Collect variable lengths var_items = list(self.range_tree_nodes.items()) broadcast_vars = [] for var_sym, entry in var_items: try: length = int(entry.length) if hasattr(entry.length, "__int__") else None if length is not None: broadcast_vars.append(length) except (TypeError, ValueError): pass if len(broadcast_vars) <= 1: return broadcast_vars # For 2D case: variables are reshaped in reverse order # First var is innermost (last dim), second var is outermost (first dim) # So output shape is [second_var_length, first_var_length, ...] return list(reversed(broadcast_vars))
Get the expected output shape from iteration variables. Iteration variables are shaped for broadcasting. For 2D outputs: - First var (e.g., y0) gets shape (1, N) - innermost dimension - Second var (e.g., x1) gets shape (M, 1) - outermost dimension The broadcast result is (M, N).
python
torch/_inductor/codegen/pallas.py
1,033
[ "self" ]
list
true
5
6
pytorch/pytorch
96,034
unknown
false
getElementsAnnotatedOrMetaAnnotatedWith
List<Element> getElementsAnnotatedOrMetaAnnotatedWith(Element element, TypeElement annotationType) { LinkedList<Element> stack = new LinkedList<>(); stack.push(element); collectElementsAnnotatedOrMetaAnnotatedWith(annotationType, stack); stack.removeFirst(); return Collections.unmodifiableList(stack); }
Collect the annotations that are annotated or meta-annotated with the specified {@link TypeElement annotation}. @param element the element to inspect @param annotationType the annotation to discover @return the annotations that are annotated or meta-annotated with this annotation
java
configuration-metadata/spring-boot-configuration-processor/src/main/java/org/springframework/boot/configurationprocessor/MetadataGenerationEnvironment.java
287
[ "element", "annotationType" ]
true
1
6.08
spring-projects/spring-boot
79,428
javadoc
false
repeat
def repeat(self, repeats, axis: None = None) -> Self: """ Repeat elements of an Index. Returns a new Index where each element of the current Index is repeated consecutively a given number of times. Parameters ---------- repeats : int or array of ints The number of repetitions for each element. This should be a non-negative integer. Repeating 0 times will return an empty Index. axis : None Must be ``None``. Has no effect but is accepted for compatibility with numpy. Returns ------- Index Newly created Index with repeated elements. See Also -------- Series.repeat : Equivalent function for Series. numpy.repeat : Similar method for :class:`numpy.ndarray`. Examples -------- >>> idx = pd.Index(["a", "b", "c"]) >>> idx Index(['a', 'b', 'c'], dtype='object') >>> idx.repeat(2) Index(['a', 'a', 'b', 'b', 'c', 'c'], dtype='object') >>> idx.repeat([1, 2, 3]) Index(['a', 'b', 'b', 'c', 'c', 'c'], dtype='object') """ repeats = ensure_platform_int(repeats) nv.validate_repeat((), {"axis": axis}) res_values = self._values.repeat(repeats) # _constructor so RangeIndex-> Index with an int64 dtype return self._constructor._simple_new(res_values, name=self.name)
Repeat elements of an Index. Returns a new Index where each element of the current Index is repeated consecutively a given number of times. Parameters ---------- repeats : int or array of ints The number of repetitions for each element. This should be a non-negative integer. Repeating 0 times will return an empty Index. axis : None Must be ``None``. Has no effect but is accepted for compatibility with numpy. Returns ------- Index Newly created Index with repeated elements. See Also -------- Series.repeat : Equivalent function for Series. numpy.repeat : Similar method for :class:`numpy.ndarray`. Examples -------- >>> idx = pd.Index(["a", "b", "c"]) >>> idx Index(['a', 'b', 'c'], dtype='object') >>> idx.repeat(2) Index(['a', 'a', 'b', 'b', 'c', 'c'], dtype='object') >>> idx.repeat([1, 2, 3]) Index(['a', 'b', 'b', 'c', 'c', 'c'], dtype='object')
python
pandas/core/indexes/base.py
1,329
[ "self", "repeats", "axis" ]
Self
true
1
7.28
pandas-dev/pandas
47,362
numpy
false
containsElements
private boolean containsElements(final Collection<?> coll) { if (coll == null || coll.isEmpty()) { return false; } return coll.stream().anyMatch(Objects::nonNull); }
Learn whether the specified Collection contains non-null elements. @param coll to check @return {@code true} if some Object was found, {@code false} otherwise.
java
src/main/java/org/apache/commons/lang3/text/ExtendedMessageFormat.java
250
[ "coll" ]
true
3
8.24
apache/commons-lang
2,896
javadoc
false
_find_executor_class_name
def _find_executor_class_name() -> str | None: """Inspect the call stack looking for any executor classes and returning the first found.""" stack = inspect.stack() # Fetch class objects on all frames, looking for one containing an executor (since it # will inherit from BaseExecutor) for frame in stack: classes = [] for name, obj in frame[0].f_globals.items(): if inspect.isclass(obj): classes.append(name) if "BaseExecutor" in classes: return classes[-1] return None
Inspect the call stack looking for any executor classes and returning the first found.
python
providers/amazon/src/airflow/providers/amazon/aws/hooks/base_aws.py
553
[]
str | None
true
5
7.04
apache/airflow
43,597
unknown
false
toStringOnOff
public static String toStringOnOff(final boolean bool) { return toString(bool, ON, OFF); }
Converts a boolean to a String returning {@code 'on'} or {@code 'off'}. <pre> BooleanUtils.toStringOnOff(true) = "on" BooleanUtils.toStringOnOff(false) = "off" </pre> @param bool the Boolean to check @return {@code 'on'}, {@code 'off'}, or {@code null}
java
src/main/java/org/apache/commons/lang3/BooleanUtils.java
1,056
[ "bool" ]
String
true
1
6.48
apache/commons-lang
2,896
javadoc
false
print_value_stack
def print_value_stack( self, *, file: Optional[TextIO] = None, stacklevel: int = 0 ) -> None: """ Print the current Python value stack. Note that this is NOT the same as the traceback; use print_bt() to print that. Note that at stacklevel=0, this will typically be empty, as comptime cannot currently be used in an expression context where there would be intermediates on the stack. If you would find this useful, please file a bug at https://github.com/pytorch/pytorch/ NB: Stack grows downwards in our print """ tx = self.__get_tx(stacklevel) for s in tx.stack: print(f"- {s.debug_repr()}", file=file)
Print the current Python value stack. Note that this is NOT the same as the traceback; use print_bt() to print that. Note that at stacklevel=0, this will typically be empty, as comptime cannot currently be used in an expression context where there would be intermediates on the stack. If you would find this useful, please file a bug at https://github.com/pytorch/pytorch/ NB: Stack grows downwards in our print
python
torch/_dynamo/comptime.py
258
[ "self", "file", "stacklevel" ]
None
true
2
6.88
pytorch/pytorch
96,034
unknown
false
flush
@SuppressWarnings("IdentifierName") // See Closeables.close public static void flush(Flushable flushable, boolean swallowIOException) throws IOException { try { flushable.flush(); } catch (IOException e) { if (swallowIOException) { logger.log(Level.WARNING, "IOException thrown while flushing Flushable.", e); } else { throw e; } } }
Flush a {@link Flushable}, with control over whether an {@code IOException} may be thrown. <p>If {@code swallowIOException} is true, then we don't rethrow {@code IOException}, but merely log it. @param flushable the {@code Flushable} object to be flushed. @param swallowIOException if true, don't propagate IO exceptions thrown by the {@code flush} method @throws IOException if {@code swallowIOException} is false and {@link Flushable#flush} throws an {@code IOException}. @see Closeables#close
java
android/guava/src/com/google/common/io/Flushables.java
51
[ "flushable", "swallowIOException" ]
void
true
3
6.56
google/guava
51,352
javadoc
false
revoke_by_stamped_headers
def revoke_by_stamped_headers(state, headers, terminate=False, signal=None, **kwargs): """Revoke task by header (or list of headers). Keyword Arguments: headers(dictionary): Dictionary that contains stamping scheme name as keys and stamps as values. If headers is a list, it will be converted to a dictionary. terminate (bool): Also terminate the process if the task is active. signal (str): Name of signal to use for terminate (e.g., ``KILL``). Sample headers input: {'mtask_id': [id1, id2, id3]} """ # pylint: disable=redefined-outer-name # XXX Note that this redefines `terminate`: # Outside of this scope that is a function. # supports list argument since 3.1 signum = _signals.signum(signal or TERM_SIGNAME) if isinstance(headers, list): headers = {h.split('=')[0]: h.split('=')[1] for h in headers} for header, stamps in headers.items(): updated_stamps = maybe_list(worker_state.revoked_stamps.get(header) or []) + list(maybe_list(stamps)) worker_state.revoked_stamps[header] = updated_stamps if not terminate: return ok(f'headers {headers} flagged as revoked, but not terminated') active_requests = list(worker_state.active_requests) terminated_scheme_to_stamps_mapping = defaultdict(set) # Terminate all running tasks of matching headers # Go through all active requests, and check if one of the # requests has a stamped header that matches the given headers to revoke for req in active_requests: # Check stamps exist if hasattr(req, "stamps") and req.stamps: # if so, check if any stamps match a revoked stamp for expected_header_key, expected_header_value in headers.items(): if expected_header_key in req.stamps: expected_header_value = maybe_list(expected_header_value) actual_header = maybe_list(req.stamps[expected_header_key]) matching_stamps_for_request = set(actual_header) & set(expected_header_value) # Check any possible match regardless if the stamps are a sequence or not if matching_stamps_for_request: terminated_scheme_to_stamps_mapping[expected_header_key].update(matching_stamps_for_request) req.terminate(state.consumer.pool, signal=signum) if not terminated_scheme_to_stamps_mapping: return ok(f'headers {headers} were not terminated') return ok(f'headers {terminated_scheme_to_stamps_mapping} revoked')
Revoke task by header (or list of headers). Keyword Arguments: headers(dictionary): Dictionary that contains stamping scheme name as keys and stamps as values. If headers is a list, it will be converted to a dictionary. terminate (bool): Also terminate the process if the task is active. signal (str): Name of signal to use for terminate (e.g., ``KILL``). Sample headers input: {'mtask_id': [id1, id2, id3]}
python
celery/worker/control.py
160
[ "state", "headers", "terminate", "signal" ]
false
13
6.24
celery/celery
27,741
unknown
false
transformEnumMemberDeclarationValue
function transformEnumMemberDeclarationValue(member: EnumMember, constantValue: string | number | undefined): Expression { if (constantValue !== undefined) { return typeof constantValue === "string" ? factory.createStringLiteral(constantValue) : constantValue < 0 ? factory.createPrefixUnaryExpression(SyntaxKind.MinusToken, factory.createNumericLiteral(-constantValue)) : factory.createNumericLiteral(constantValue); } else { enableSubstitutionForNonQualifiedEnumMembers(); if (member.initializer) { return Debug.checkDefined(visitNode(member.initializer, visitor, isExpression)); } else { return factory.createVoidZero(); } } }
Transforms the value of an enum member. @param member The enum member node.
typescript
src/compiler/transformers/ts.ts
1,948
[ "member", "constantValue" ]
true
7
6.56
microsoft/TypeScript
107,154
jsdoc
false
newMetadataRequestBuilder
@Override public synchronized MetadataRequest.Builder newMetadataRequestBuilder() { if (subscription.hasPatternSubscription()) { // Consumer subscribed to client-side regex => request all topics to compute regex return MetadataRequest.Builder.allTopics(); } if (subscription.hasRe2JPatternSubscription() && transientTopics.isEmpty()) { // Consumer subscribed to broker-side regex and no need for transient topic names metadata => request topic IDs return MetadataRequest.Builder.forTopicIds(subscription.assignedTopicIds()); } // Subscription to explicit topic names or transient topics present. // Note that in the case of RE2J broker-side regex subscription, we may end up in this path // if there are transient topics. They are just needed temporarily (lifetime of offsets-related API calls), // so we'll request them to unblock their APIs, then go back to requesting assigned topic IDs as needed List<String> topics = new ArrayList<>(); topics.addAll(subscription.metadataTopics()); topics.addAll(transientTopics); return MetadataRequest.Builder.forTopicNames(topics, allowAutoTopicCreation); }
Constructs a metadata request builder for fetching cluster metadata for the topics the consumer needs. This will include: <ul> <li>topics the consumer is subscribed to using topic names (calls to subscribe with topic name list or client-side regex)</li> <li>topics the consumer is subscribed to using topic IDs (calls to subscribe with broker-side regex RE2J)</li> <li>topics involved in calls for fetching offsets (transient topics)</li> </ul> Note that this will generate a request for all topics in the cluster only when the consumer is subscribed to a client-side regex.
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerMetadata.java
80
[]
true
4
6.56
apache/kafka
31,560
javadoc
false
compare
public static int compare(final boolean x, final boolean y) { if (x == y) { return 0; } return x ? 1 : -1; }
Compares two {@code boolean} values. This is the same functionality as provided in Java 7. @param x the first {@code boolean} to compare @param y the second {@code boolean} to compare @return the value {@code 0} if {@code x == y}; a value less than {@code 0} if {@code !x && y}; and a value greater than {@code 0} if {@code x && !y} @since 3.4
java
src/main/java/org/apache/commons/lang3/BooleanUtils.java
157
[ "x", "y" ]
true
3
8.08
apache/commons-lang
2,896
javadoc
false
equals
@Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj == null || getClass() != obj.getClass()) { return false; } return Objects.equals(this.text, ((PemContent) obj).text); }
Parse and return the {@link PrivateKey private keys} from the PEM content or {@code null} if there is no private key. @param password the password to decrypt the private keys or {@code null} @return the private keys
java
core/spring-boot/src/main/java/org/springframework/boot/ssl/pem/PemContent.java
90
[ "obj" ]
true
4
7.92
spring-projects/spring-boot
79,428
javadoc
false
parseMap
Map<String, Object> parseMap(@Nullable String json) throws JsonParseException;
Parse the specified JSON string into a Map. @param json the JSON to parse @return the parsed JSON as a map @throws JsonParseException if the JSON cannot be parsed
java
core/spring-boot/src/main/java/org/springframework/boot/json/JsonParser.java
42
[ "json" ]
true
1
6.32
spring-projects/spring-boot
79,428
javadoc
false
wait_for_state
def wait_for_state(self, instance_id: str, target_state: str, check_interval: float) -> None: """ Wait EC2 instance until its state is equal to the target_state. :param instance_id: id of the AWS EC2 instance :param target_state: target state of instance :param check_interval: time in seconds that the job should wait in between each instance state checks until operation is completed :return: None """ instance_state = self.get_instance_state(instance_id=instance_id) while instance_state != target_state: time.sleep(check_interval) instance_state = self.get_instance_state(instance_id=instance_id) self.log.info( "instance state: %s. Same as target: %s", instance_state, instance_state == target_state )
Wait EC2 instance until its state is equal to the target_state. :param instance_id: id of the AWS EC2 instance :param target_state: target state of instance :param check_interval: time in seconds that the job should wait in between each instance state checks until operation is completed :return: None
python
providers/amazon/src/airflow/providers/amazon/aws/hooks/ec2.py
192
[ "self", "instance_id", "target_state", "check_interval" ]
None
true
2
7.76
apache/airflow
43,597
sphinx
false
getProperty
@Override public @Nullable Value getProperty(String name) { PropertyFile propertyFile = this.propertyFiles.get(name); return (propertyFile != null) ? propertyFile.getContent() : null; }
Create a new {@link ConfigTreePropertySource} instance. @param name the name of the property source @param sourceDirectory the underlying source directory @param options the property source options
java
core/spring-boot/src/main/java/org/springframework/boot/env/ConfigTreePropertySource.java
125
[ "name" ]
Value
true
2
6.24
spring-projects/spring-boot
79,428
javadoc
false
startupTimes
ImmutableMap<Service, Long> startupTimes() { List<Entry<Service, Long>> loadTimes; monitor.enter(); try { loadTimes = Lists.newArrayListWithCapacity(startupTimers.size()); // N.B. There will only be an entry in the map if the service has started for (Entry<Service, Stopwatch> entry : startupTimers.entrySet()) { Service service = entry.getKey(); Stopwatch stopwatch = entry.getValue(); if (!stopwatch.isRunning() && !(service instanceof NoOpService)) { loadTimes.add(Maps.immutableEntry(service, stopwatch.elapsed(MILLISECONDS))); } } } finally { monitor.leave(); } sort(loadTimes, Ordering.natural().onResultOf(Entry::getValue)); return ImmutableMap.copyOf(loadTimes); }
Marks the {@link State} as ready to receive transitions. Returns true if no transitions have been observed yet.
java
android/guava/src/com/google/common/util/concurrent/ServiceManager.java
645
[]
true
3
7.2
google/guava
51,352
javadoc
false
validateGenerators
async function validateGenerators(generators: GeneratorConfig[]): Promise<void> { const binaryTarget = await getBinaryTargetForCurrentPlatform() for (const generator of generators) { if (generator.binaryTargets) { const binaryTargets = generator.binaryTargets && generator.binaryTargets.length > 0 ? generator.binaryTargets : [{ fromEnvVar: null, value: 'native' }] const resolvedBinaryTargets: string[] = binaryTargets .flatMap((object) => parseBinaryTargetsEnvValue(object)) .map((p) => (p === 'native' ? binaryTarget : p)) for (const resolvedBinaryTarget of resolvedBinaryTargets) { if (oldToNewBinaryTargetsMapping[resolvedBinaryTarget]) { throw new Error( `Binary target ${red(bold(resolvedBinaryTarget))} is deprecated. Please use ${green( bold(oldToNewBinaryTargetsMapping[resolvedBinaryTarget]), )} instead.`, ) } if (!knownBinaryTargets.includes(resolvedBinaryTarget as BinaryTarget)) { throw new Error( `Unknown binary target ${red(resolvedBinaryTarget)} in generator ${bold(generator.name)}. Possible binaryTargets: ${green(knownBinaryTargets.join(', '))}`, ) } } // Only show warning if resolvedBinaryTargets // is missing current platform if (!resolvedBinaryTargets.includes(binaryTarget)) { const originalBinaryTargetsConfig = getOriginalBinaryTargetsValue(generator.binaryTargets) console.log(`${yellow('Warning:')} Your current platform \`${bold( binaryTarget, )}\` is not included in your generator's \`binaryTargets\` configuration ${JSON.stringify( originalBinaryTargetsConfig, )}. To fix it, use this generator config in your ${bold('schema.prisma')}: ${green( printGeneratorConfig({ ...generator, binaryTargets: fixBinaryTargets(generator.binaryTargets, binaryTarget), }), )} ${gray( `Note, that by providing \`native\`, Prisma Client automatically resolves \`${binaryTarget}\`. Read more about deploying Prisma Client: ${underline( 'https://www.prisma.io/docs/reference/tools-and-interfaces/prisma-schema/generators', )}`, )}\n`) } } } }
Shortcut for getGenerators, if there is only one generator defined. Useful for testing. @param schemaPath path to schema.prisma @param aliases Aliases like `photonjs` -> `node_modules/photonjs/gen.js` @param version Version of the binary, commit hash of https://github.com/prisma/prisma-engine/commits/master @param printDownloadProgress `boolean` to print download progress or not
typescript
packages/internals/src/get-generators/getGenerators.ts
355
[ "generators" ]
true
8
6.32
prisma/prisma
44,834
jsdoc
true
pendingToString
@Override protected final @Nullable String pendingToString() { @RetainedLocalRef ImmutableCollection<? extends Future<?>> localFutures = futures; if (localFutures != null) { return "futures=" + localFutures; } return super.pendingToString(); }
The input futures. After {@link #init}, this field is read only by {@link #afterDone()} (to propagate cancellation) and {@link #toString()}. To access the futures' <i>values</i>, {@code AggregateFuture} attaches listeners that hold references to one or more inputs. And in the case of {@link CombinedFuture}, the user-supplied callback usually has its own references to inputs.
java
android/guava/src/com/google/common/util/concurrent/AggregateFuture.java
97
[]
String
true
2
6.4
google/guava
51,352
javadoc
false
getAnnotations
public Annotation[] getAnnotations() { if (this.field != null) { Annotation[] fieldAnnotations = this.fieldAnnotations; if (fieldAnnotations == null) { fieldAnnotations = this.field.getAnnotations(); this.fieldAnnotations = fieldAnnotations; } return fieldAnnotations; } else { return obtainMethodParameter().getParameterAnnotations(); } }
Obtain the annotations associated with the wrapped field or method/constructor parameter.
java
spring-beans/src/main/java/org/springframework/beans/factory/InjectionPoint.java
121
[]
true
3
6.4
spring-projects/spring-framework
59,386
javadoc
false
areNeighbours
public static boolean areNeighbours(long origin, long destination) { // Make sure they're hexagon indexes if (H3Index.H3_get_mode(origin) != Constants.H3_CELL_MODE) { throw new IllegalArgumentException("Invalid cell: " + origin); } if (H3Index.H3_get_mode(destination) != Constants.H3_CELL_MODE) { throw new IllegalArgumentException("Invalid cell: " + destination); } // Hexagons cannot be neighbors with themselves if (origin == destination) { return false; } final int resolution = H3Index.H3_get_resolution(origin); // Only hexagons in the same resolution can be neighbors if (resolution != H3Index.H3_get_resolution(destination)) { return false; } // H3 Indexes that share the same parent are very likely to be neighbors // Child 0 is neighbor with all of its parent's 'offspring', the other // children are neighbors with 3 of the 7 children. So a simple comparison // of origin and destination parents and then a lookup table of the children // is a super-cheap way to possibly determine they are neighbors. if (resolution > 1) { long originParent = H3.h3ToParent(origin); long destinationParent = H3.h3ToParent(destination); if (originParent == destinationParent) { int originResDigit = H3Index.H3_get_index_digit(origin, resolution); int destinationResDigit = H3Index.H3_get_index_digit(destination, resolution); if (originResDigit == CoordIJK.Direction.CENTER_DIGIT.digit() || destinationResDigit == CoordIJK.Direction.CENTER_DIGIT.digit()) { return true; } if (originResDigit >= CoordIJK.Direction.INVALID_DIGIT.digit()) { // Prevent indexing off the end of the array below throw new IllegalArgumentException(""); } if ((originResDigit == CoordIJK.Direction.K_AXES_DIGIT.digit() || destinationResDigit == CoordIJK.Direction.K_AXES_DIGIT.digit()) && H3.isPentagon(originParent)) { // If these are invalid cells, fail rather than incorrectly // reporting neighbors. For pentagon cells that are actually // neighbors across the deleted subsequence, they will fail the // optimized check below, but they will be accepted by the // gridDisk check below that. throw new IllegalArgumentException("Undefined error checking for neighbors"); } // These sets are the relevant neighbors in the clockwise // and counter-clockwise if (NEIGHBORSETCLOCKWISE[originResDigit].digit() == destinationResDigit || NEIGHBORSETCOUNTERCLOCKWISE[originResDigit].digit() == destinationResDigit) { return true; } } } // Otherwise, we have to determine the neighbor relationship the "hard" way. for (int i = 0; i < 6; i++) { long neighbor = h3NeighborInDirection(origin, DIRECTIONS[i].digit()); if (neighbor != -1) { // -1 is an expected case when trying to traverse off of // pentagons. if (destination == neighbor) { return true; } } } return false; }
Returns whether or not the provided H3Indexes are neighbors. @param origin The origin H3 index. @param destination The destination H3 index. @return true if the indexes are neighbors, false otherwise
java
libs/h3/src/main/java/org/elasticsearch/h3/HexRing.java
591
[ "origin", "destination" ]
true
18
7.12
elastic/elasticsearch
75,680
javadoc
false
findDefaultHomeDir
private File findDefaultHomeDir() { String userDir = System.getProperty("user.dir"); return new File(StringUtils.hasLength(userDir) ? userDir : "."); }
Create a new {@link ApplicationHome} instance for the specified source class. @param sourceClass the source class or {@code null}
java
core/spring-boot/src/main/java/org/springframework/boot/system/ApplicationHome.java
151
[]
File
true
2
6.64
spring-projects/spring-boot
79,428
javadoc
false
readAsn1Object
public Asn1Object readAsn1Object() throws IOException { int tag = derInputStream.read(); if (tag == -1) { throw new IOException("Invalid DER: stream too short, missing tag"); } int length = getLength(); // getLength() can return any 32 bit integer, so ensure that a corrupted encoding won't // force us into allocating a very large array if (length > maxAsnObjectLength) { throw new IOException( "Invalid DER: size of ASN.1 object to be parsed appears to be larger than the size of the key file " + "itself." ); } byte[] value = new byte[length]; int n = derInputStream.read(value); if (n < length) { throw new IOException( "Invalid DER: stream too short, missing value. " + "Could only read " + n + " out of " + length + " bytes" ); } return new Asn1Object(tag, length, value); }
Read an object and verify its type @param requiredType The expected type code @throws IOException if data can not be parsed @throws IllegalStateException if the parsed object is of the wrong type
java
libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/DerParser.java
81
[]
Asn1Object
true
4
6.24
elastic/elasticsearch
75,680
javadoc
false
parseExpectedTokenJSDoc
function parseExpectedTokenJSDoc(t: JSDocSyntaxKind): Node { const optional = parseOptionalTokenJSDoc(t); if (optional) return optional; Debug.assert(isKeywordOrPunctuation(t)); return createMissingNode(t, /*reportAtCurrentPosition*/ false, Diagnostics._0_expected, tokenToString(t)); }
Reports a diagnostic error for the current token being an invalid name. @param blankDiagnostic Diagnostic to report for the case of the name being blank (matched tokenIfBlankName). @param nameDiagnostic Diagnostic to report for all other cases. @param tokenIfBlankName Current token if the name was invalid for being blank (not provided / skipped).
typescript
src/compiler/parser.ts
2,546
[ "t" ]
true
2
6.72
microsoft/TypeScript
107,154
jsdoc
false
dag_state
def dag_state(args, session: Session = NEW_SESSION) -> None: """ Return the state (and conf if exists) of a DagRun at the command line. >>> airflow dags state tutorial 2015-01-01T00:00:00.000000 running >>> airflow dags state a_dag_with_conf_passed 2015-01-01T00:00:00.000000 failed, {"name": "bob", "age": "42"} """ dag = DagModel.get_dagmodel(args.dag_id, session=session) if not dag: raise SystemExit(f"DAG: {args.dag_id} does not exist in 'dag' table") dr, _ = fetch_dag_run_from_run_id_or_logical_date_string( dag_id=dag.dag_id, value=args.logical_date_or_run_id, session=session, ) if not dr: print(None) elif dr.conf: print(f"{dr.state}, {json.dumps(dr.conf)}") else: print(dr.state)
Return the state (and conf if exists) of a DagRun at the command line. >>> airflow dags state tutorial 2015-01-01T00:00:00.000000 running >>> airflow dags state a_dag_with_conf_passed 2015-01-01T00:00:00.000000 failed, {"name": "bob", "age": "42"}
python
airflow-core/src/airflow/cli/commands/dag_command.py
280
[ "args", "session" ]
None
true
5
6.96
apache/airflow
43,597
unknown
false
doFindMatchingMethod
@SuppressWarnings("NullAway") // Dataflow analysis limitation protected @Nullable Method doFindMatchingMethod(@Nullable Object[] arguments) { TypeConverter converter = getTypeConverter(); if (converter != null) { String targetMethod = getTargetMethod(); Method matchingMethod = null; int argCount = arguments.length; Class<?> targetClass = getTargetClass(); Assert.state(targetClass != null, "No target class set"); Method[] candidates = ReflectionUtils.getAllDeclaredMethods(targetClass); int minTypeDiffWeight = Integer.MAX_VALUE; @Nullable Object[] argumentsToUse = null; for (Method candidate : candidates) { if (candidate.getName().equals(targetMethod)) { // Check if the inspected method has the correct number of parameters. int parameterCount = candidate.getParameterCount(); if (parameterCount == argCount) { Class<?>[] paramTypes = candidate.getParameterTypes(); @Nullable Object[] convertedArguments = new Object[argCount]; boolean match = true; for (int j = 0; j < argCount && match; j++) { // Verify that the supplied argument is assignable to the method parameter. try { convertedArguments[j] = converter.convertIfNecessary(arguments[j], paramTypes[j]); } catch (TypeMismatchException ex) { // Ignore -> simply doesn't match. match = false; } } if (match) { int typeDiffWeight = getTypeDifferenceWeight(paramTypes, convertedArguments); if (typeDiffWeight < minTypeDiffWeight) { minTypeDiffWeight = typeDiffWeight; matchingMethod = candidate; argumentsToUse = convertedArguments; } } } } } if (matchingMethod != null) { setArguments(argumentsToUse); return matchingMethod; } } return null; }
Actually find a method with matching parameter type, i.e. where each argument value is assignable to the corresponding parameter type. @param arguments the argument values to match against method parameters @return a matching method, or {@code null} if none
java
spring-beans/src/main/java/org/springframework/beans/support/ArgumentConvertingMethodInvoker.java
133
[ "arguments" ]
Method
true
10
8.24
spring-projects/spring-framework
59,386
javadoc
false
stream
public static Stream<Throwable> stream(final Throwable throwable) { // No point building a custom Iterable as it would keep track of visited elements to avoid infinite loops return getThrowableList(throwable).stream(); }
Streams causes of a Throwable. <p> A throwable without cause will return a stream containing one element - the input throwable. A throwable with one cause will return a stream containing two elements. - the input throwable and the cause throwable. A {@code null} throwable will return a stream of count zero. </p> <p> This method handles recursive cause chains that might otherwise cause infinite loops. The cause chain is processed until the end, or until the next item in the chain is already in the result. </p> @param throwable The Throwable to traverse. @return A new Stream of Throwable causes. @since 3.13.0
java
src/main/java/org/apache/commons/lang3/exception/ExceptionUtils.java
894
[ "throwable" ]
true
1
7.12
apache/commons-lang
2,896
javadoc
false
ones
def ones(shape, dtype=None, order='C', *, device=None, like=None): """ Return a new array of given shape and type, filled with ones. Parameters ---------- shape : int or sequence of ints Shape of the new array, e.g., ``(2, 3)`` or ``2``. dtype : data-type, optional The desired data-type for the array, e.g., `numpy.int8`. Default is `numpy.float64`. order : {'C', 'F'}, optional, default: C Whether to store multi-dimensional data in row-major (C-style) or column-major (Fortran-style) order in memory. device : str, optional The device on which to place the created array. Default: None. For Array-API interoperability only, so must be ``"cpu"`` if passed. .. versionadded:: 2.0.0 ${ARRAY_FUNCTION_LIKE} .. versionadded:: 1.20.0 Returns ------- out : ndarray Array of ones with the given shape, dtype, and order. See Also -------- ones_like : Return an array of ones with shape and type of input. empty : Return a new uninitialized array. zeros : Return a new array setting values to zero. full : Return a new array of given shape filled with value. Examples -------- >>> import numpy as np >>> np.ones(5) array([1., 1., 1., 1., 1.]) >>> np.ones((5,), dtype=np.int_) array([1, 1, 1, 1, 1]) >>> np.ones((2, 1)) array([[1.], [1.]]) >>> s = (2,2) >>> np.ones(s) array([[1., 1.], [1., 1.]]) """ if like is not None: return _ones_with_like( like, shape, dtype=dtype, order=order, device=device ) a = empty(shape, dtype, order, device=device) multiarray.copyto(a, 1, casting='unsafe') return a
Return a new array of given shape and type, filled with ones. Parameters ---------- shape : int or sequence of ints Shape of the new array, e.g., ``(2, 3)`` or ``2``. dtype : data-type, optional The desired data-type for the array, e.g., `numpy.int8`. Default is `numpy.float64`. order : {'C', 'F'}, optional, default: C Whether to store multi-dimensional data in row-major (C-style) or column-major (Fortran-style) order in memory. device : str, optional The device on which to place the created array. Default: None. For Array-API interoperability only, so must be ``"cpu"`` if passed. .. versionadded:: 2.0.0 ${ARRAY_FUNCTION_LIKE} .. versionadded:: 1.20.0 Returns ------- out : ndarray Array of ones with the given shape, dtype, and order. See Also -------- ones_like : Return an array of ones with shape and type of input. empty : Return a new uninitialized array. zeros : Return a new array setting values to zero. full : Return a new array of given shape filled with value. Examples -------- >>> import numpy as np >>> np.ones(5) array([1., 1., 1., 1., 1.]) >>> np.ones((5,), dtype=np.int_) array([1, 1, 1, 1, 1]) >>> np.ones((2, 1)) array([[1.], [1.]]) >>> s = (2,2) >>> np.ones(s) array([[1., 1.], [1., 1.]])
python
numpy/_core/numeric.py
172
[ "shape", "dtype", "order", "device", "like" ]
false
2
7.6
numpy/numpy
31,054
numpy
false
lastIndexOf
public int lastIndexOf(final char ch) { return lastIndexOf(ch, size - 1); }
Searches the string builder to find the last reference to the specified char. @param ch the character to find @return the last index of the character, or -1 if not found
java
src/main/java/org/apache/commons/lang3/text/StrBuilder.java
2,312
[ "ch" ]
true
1
6.96
apache/commons-lang
2,896
javadoc
false
registerPostProcessor
private static BeanDefinitionHolder registerPostProcessor( BeanDefinitionRegistry registry, RootBeanDefinition definition, String beanName) { definition.setRole(BeanDefinition.ROLE_INFRASTRUCTURE); registry.registerBeanDefinition(beanName, definition); return new BeanDefinitionHolder(definition, beanName); }
Register all relevant annotation post processors in the given registry. @param registry the registry to operate on @param source the configuration source element (already extracted) that this registration was triggered from. May be {@code null}. @return a Set of BeanDefinitionHolders, containing all bean definitions that have actually been registered by this call
java
spring-context/src/main/java/org/springframework/context/annotation/AnnotationConfigUtils.java
207
[ "registry", "definition", "beanName" ]
BeanDefinitionHolder
true
1
6.24
spring-projects/spring-framework
59,386
javadoc
false
isClassLoaderAccepted
private static boolean isClassLoaderAccepted(ClassLoader classLoader) { for (ClassLoader acceptedLoader : acceptedClassLoaders) { if (isUnderneathClassLoader(classLoader, acceptedLoader)) { return true; } } return false; }
Check whether this CachedIntrospectionResults class is configured to accept the given ClassLoader. @param classLoader the ClassLoader to check @return whether the given ClassLoader is accepted @see #acceptClassLoader
java
spring-beans/src/main/java/org/springframework/beans/CachedIntrospectionResults.java
180
[ "classLoader" ]
true
2
7.28
spring-projects/spring-framework
59,386
javadoc
false
isLazyLookup
boolean isLazyLookup(RegisteredBean registeredBean) { AnnotatedElement ae = getAnnotatedElement(registeredBean); Lazy lazy = ae.getAnnotation(Lazy.class); return (lazy != null && lazy.value()); }
Create a suitable {@link DependencyDescriptor} for the specified bean. @param registeredBean the registered bean @return a descriptor for that bean
java
spring-context/src/main/java/org/springframework/context/annotation/ResourceElementResolver.java
144
[ "registeredBean" ]
true
2
7.28
spring-projects/spring-framework
59,386
javadoc
false
initials
public static String initials(final String str) { return initials(str, null); }
Extracts the initial characters from each word in the String. <p>All first characters after whitespace are returned as a new string. Their case is not changed.</p> <p>Whitespace is defined by {@link Character#isWhitespace(char)}. A {@code null} input String returns {@code null}.</p> <pre> WordUtils.initials(null) = null WordUtils.initials("") = "" WordUtils.initials("Ben John Lee") = "BJL" WordUtils.initials("Ben J.Lee") = "BJ" </pre> @param str the String to get initials from, may be null. @return String of initial letters, {@code null} if null String input. @see #initials(String,char[]) @since 2.2
java
src/main/java/org/apache/commons/lang3/text/WordUtils.java
230
[ "str" ]
String
true
1
6.48
apache/commons-lang
2,896
javadoc
false
addTriggerToScheduler
private boolean addTriggerToScheduler(Trigger trigger) throws SchedulerException { boolean triggerExists = (getScheduler().getTrigger(trigger.getKey()) != null); if (triggerExists && !this.overwriteExistingJobs) { return false; } // Check if the Trigger is aware of an associated JobDetail. JobDetail jobDetail = (JobDetail) trigger.getJobDataMap().remove("jobDetail"); if (triggerExists) { if (jobDetail != null && this.jobDetails != null && !this.jobDetails.contains(jobDetail) && addJobToScheduler(jobDetail)) { this.jobDetails.add(jobDetail); } try { getScheduler().rescheduleJob(trigger.getKey(), trigger); } catch (ObjectAlreadyExistsException ex) { if (logger.isDebugEnabled()) { logger.debug("Unexpectedly encountered existing trigger on rescheduling, assumably due to " + "cluster race condition: " + ex.getMessage() + " - can safely be ignored"); } } } else { try { if (jobDetail != null && this.jobDetails != null && !this.jobDetails.contains(jobDetail) && (this.overwriteExistingJobs || getScheduler().getJobDetail(jobDetail.getKey()) == null)) { getScheduler().scheduleJob(jobDetail, trigger); this.jobDetails.add(jobDetail); } else { getScheduler().scheduleJob(trigger); } } catch (ObjectAlreadyExistsException ex) { if (logger.isDebugEnabled()) { logger.debug("Unexpectedly encountered existing trigger on job scheduling, assumably due to " + "cluster race condition: " + ex.getMessage() + " - can safely be ignored"); } if (this.overwriteExistingJobs) { getScheduler().rescheduleJob(trigger.getKey(), trigger); } } } return true; }
Add the given trigger to the Scheduler, if it doesn't already exist. Overwrites the trigger in any case if "overwriteExistingJobs" is set. @param trigger the trigger to add @return {@code true} if the trigger was actually added, {@code false} if it already existed before @see #setOverwriteExistingJobs
java
spring-context-support/src/main/java/org/springframework/scheduling/quartz/SchedulerAccessor.java
291
[ "trigger" ]
true
18
6.72
spring-projects/spring-framework
59,386
javadoc
false
afterPropertiesSet
@Override public void afterPropertiesSet() { if (!isActive()) { refresh(); } }
Triggers {@link #refresh()} if not refreshed in the concrete context's constructor already.
java
spring-context/src/main/java/org/springframework/context/support/AbstractRefreshableConfigApplicationContext.java
149
[]
void
true
2
6.4
spring-projects/spring-framework
59,386
javadoc
false
nextTo
public String nextTo(String excluded) { if (excluded == null) { throw new NullPointerException("excluded == null"); } return nextToInternal(excluded).trim(); }
Returns the current position and the entire input string. @return the current position and the entire input string.
java
cli/spring-boot-cli/src/json-shade/java/org/springframework/boot/cli/json/JSONTokener.java
507
[ "excluded" ]
String
true
2
6.88
spring-projects/spring-boot
79,428
javadoc
false
getPropertyName
private String getPropertyName(@Nullable String path, String key) { if (!StringUtils.hasText(path)) { return key; } if (key.startsWith("[")) { return path + key; } return path + "." + key; }
Create a new {@link CloudFoundryVcapEnvironmentPostProcessor} instance. @param logFactory the log factory to use @since 3.0.0
java
core/spring-boot/src/main/java/org/springframework/boot/cloud/CloudFoundryVcapEnvironmentPostProcessor.java
222
[ "path", "key" ]
String
true
3
6.24
spring-projects/spring-boot
79,428
javadoc
false
getSemverFromPatchBranch
function getSemverFromPatchBranch(version: string) { // the branch name must match // number.number.x like 3.0.x or 2.29.x // as an exact match, no character before or after const regex = /^(\d+)\.(\d+)\.x$/ const match = regex.exec(version) if (match) { return { major: Number(match[1]), minor: Number(match[2]), } } return undefined }
Only used when publishing to the `dev` and `integration` npm channels (see `getNewDevVersion()` and `getNewIntegrationVersion()`) @returns The next minor version for the `latest` channel Example: If latest is `4.9.0` it will return `4.10.0`
typescript
scripts/ci/publish.ts
441
[ "version" ]
false
2
6.64
prisma/prisma
44,834
jsdoc
false
getImportedMessage
private CharSequence getImportedMessage(Set<ConfigDataResolutionResult> results) { if (results.isEmpty()) { return "Nothing imported"; } StringBuilder message = new StringBuilder(); message.append("Imported " + results.size() + " resource" + ((results.size() != 1) ? "s " : " ")); message.append(results.stream().map(ConfigDataResolutionResult::getResource).toList()); return message; }
Processes imports from all active contributors and return a new {@link ConfigDataEnvironmentContributors} instance. @param importer the importer used to import {@link ConfigData} @param activationContext the current activation context or {@code null} if the context has not yet been created @return a {@link ConfigDataEnvironmentContributors} instance with all relevant imports have been processed
java
core/spring-boot/src/main/java/org/springframework/boot/context/config/ConfigDataEnvironmentContributors.java
142
[ "results" ]
CharSequence
true
3
7.28
spring-projects/spring-boot
79,428
javadoc
false
create_iter_data_given_by
def create_iter_data_given_by( data: DataFrame, kind: str = "hist" ) -> dict[Hashable, DataFrame | Series]: """ Create data for iteration given `by` is assigned or not, and it is only used in both hist and boxplot. If `by` is assigned, return a dictionary of DataFrames in which the key of dictionary is the values in groups. If `by` is not assigned, return input as is, and this preserves current status of iter_data. Parameters ---------- data : reformatted grouped data from `_compute_plot_data` method. kind : str, plot kind. This function is only used for `hist` and `box` plots. Returns ------- iter_data : DataFrame or Dictionary of DataFrames Examples -------- If `by` is assigned: >>> import numpy as np >>> tuples = [("h1", "a"), ("h1", "b"), ("h2", "a"), ("h2", "b")] >>> mi = pd.MultiIndex.from_tuples(tuples) >>> value = [[1, 3, np.nan, np.nan], [3, 4, np.nan, np.nan], [np.nan, np.nan, 5, 6]] >>> data = pd.DataFrame(value, columns=mi) >>> create_iter_data_given_by(data) {'h1': h1 a b 0 1.0 3.0 1 3.0 4.0 2 NaN NaN, 'h2': h2 a b 0 NaN NaN 1 NaN NaN 2 5.0 6.0} """ # For `hist` plot, before transformation, the values in level 0 are values # in groups and subplot titles, and later used for column subselection and # iteration; For `box` plot, values in level 1 are column names to show, # and are used for iteration and as subplots titles. if kind == "hist": level = 0 else: level = 1 # Select sub-columns based on the value of level of MI, and if `by` is # assigned, data must be a MI DataFrame assert isinstance(data.columns, MultiIndex) return { col: data.loc[:, data.columns.get_level_values(level) == col] for col in data.columns.levels[level] }
Create data for iteration given `by` is assigned or not, and it is only used in both hist and boxplot. If `by` is assigned, return a dictionary of DataFrames in which the key of dictionary is the values in groups. If `by` is not assigned, return input as is, and this preserves current status of iter_data. Parameters ---------- data : reformatted grouped data from `_compute_plot_data` method. kind : str, plot kind. This function is only used for `hist` and `box` plots. Returns ------- iter_data : DataFrame or Dictionary of DataFrames Examples -------- If `by` is assigned: >>> import numpy as np >>> tuples = [("h1", "a"), ("h1", "b"), ("h2", "a"), ("h2", "b")] >>> mi = pd.MultiIndex.from_tuples(tuples) >>> value = [[1, 3, np.nan, np.nan], [3, 4, np.nan, np.nan], [np.nan, np.nan, 5, 6]] >>> data = pd.DataFrame(value, columns=mi) >>> create_iter_data_given_by(data) {'h1': h1 a b 0 1.0 3.0 1 3.0 4.0 2 NaN NaN, 'h2': h2 a b 0 NaN NaN 1 NaN NaN 2 5.0 6.0}
python
pandas/plotting/_matplotlib/groupby.py
27
[ "data", "kind" ]
dict[Hashable, DataFrame | Series]
true
3
8.56
pandas-dev/pandas
47,362
numpy
false
min
@GwtIncompatible( "Available in GWT! Annotation is to avoid conflict with GWT specialization of base class.") public static double min(double... array) { checkArgument(array.length > 0); double min = array[0]; for (int i = 1; i < array.length; i++) { min = Math.min(min, array[i]); } return min; }
Returns the least value present in {@code array}, using the same rules of comparison as {@link Math#min(double, double)}. @param array a <i>nonempty</i> array of {@code double} values @return the value present in {@code array} that is less than or equal to every other value in the array @throws IllegalArgumentException if {@code array} is empty
java
android/guava/src/com/google/common/primitives/Doubles.java
209
[]
true
2
7.6
google/guava
51,352
javadoc
false
compare
public abstract int compare(String str1, String str2);
Compare two Strings lexicographically, like {@link String#compareTo(String)}. <p> The return values are: </p> <ul> <li>{@code int = 0}, if {@code str1} is equal to {@code str2} (or both {@code null})</li> <li>{@code int < 0}, if {@code str1} is less than {@code str2}</li> <li>{@code int > 0}, if {@code str1} is greater than {@code str2}</li> </ul> <p> This is a {@code null} safe version of : </p> <pre> str1.compareTo(str2) </pre> <p> {@code null} value is considered less than non-{@code null} value. Two {@code null} references are considered equal. </p> <p> Case-sensitive examples </p> <pre>{@code Strings.CS.compare(null, null) = 0 Strings.CS.compare(null , "a") < 0 Strings.CS.compare("a", null) > 0 Strings.CS.compare("abc", "abc") = 0 Strings.CS.compare("a", "b") < 0 Strings.CS.compare("b", "a") > 0 Strings.CS.compare("a", "B") > 0 Strings.CS.compare("ab", "abc") < 0 }</pre> <p> Case-insensitive examples </p> <pre>{@code Strings.CI.compare(null, null) = 0 Strings.CI.compare(null , "a") < 0 Strings.CI.compare("a", null) > 0 Strings.CI.compare("abc", "abc") = 0 Strings.CI.compare("abc", "ABC") = 0 Strings.CI.compare("a", "b") < 0 Strings.CI.compare("b", "a") > 0 Strings.CI.compare("a", "B") < 0 Strings.CI.compare("A", "b") < 0 Strings.CI.compare("ab", "ABC") < 0 }</pre> @see String#compareTo(String) @param str1 the String to compare from @param str2 the String to compare to @return &lt; 0, 0, &gt; 0, if {@code str1} is respectively less, equal or greater than {@code str2}
java
src/main/java/org/apache/commons/lang3/Strings.java
477
[ "str1", "str2" ]
true
1
6.32
apache/commons-lang
2,896
javadoc
false
workOnQueue
@SuppressWarnings("CatchingUnchecked") // sneaky checked exception private void workOnQueue() { boolean interruptedDuringTask = false; boolean hasSetRunning = false; try { while (true) { synchronized (queue) { // Choose whether this thread will run or not after acquiring the lock on the first // iteration if (!hasSetRunning) { if (workerRunningState == RUNNING) { // Don't want to have two workers pulling from the queue. return; } else { // Increment the run counter to avoid the ABA problem of a submitter marking the // thread as QUEUED after it already ran and exhausted the queue before returning // from execute(). workerRunCount++; workerRunningState = RUNNING; hasSetRunning = true; } } task = queue.poll(); if (task == null) { workerRunningState = IDLE; return; } } // Remove the interrupt bit before each task. The interrupt is for the "current task" when // it is sent, so subsequent tasks in the queue should not be caused to be interrupted // by a previous one in the queue being interrupted. interruptedDuringTask |= Thread.interrupted(); try { task.run(); } catch (Exception e) { // sneaky checked exception log.get().log(Level.SEVERE, "Exception while executing runnable " + task, e); } finally { task = null; } } } finally { // Ensure that if the thread was interrupted at all while processing the task queue, it // is returned to the delegate Executor interrupted so that it may handle the // interruption if it likes. if (interruptedDuringTask) { Thread.currentThread().interrupt(); } } }
Continues executing tasks from {@link #queue} until it is empty. <p>The thread's interrupt bit is cleared before execution of each task. <p>If the Thread in use is interrupted before or during execution of the tasks in {@link #queue}, the Executor will complete its tasks, and then restore the interruption. This means that once the Thread returns to the Executor that this Executor composes, the interruption will still be present. If the composed Executor is an ExecutorService, it can respond to shutdown() by returning tasks queued on that Thread after {@link #worker} drains the queue.
java
android/guava/src/com/google/common/util/concurrent/SequentialExecutor.java
206
[]
void
true
7
6.88
google/guava
51,352
javadoc
false
updatePatternSubscription
private void updatePatternSubscription(MembershipManagerShim membershipManager, Cluster cluster) { final Set<String> topicsToSubscribe = cluster.topics().stream() .filter(subscriptions::matchesSubscribedPattern) .collect(Collectors.toSet()); if (subscriptions.subscribeFromPattern(topicsToSubscribe)) { this.metadataVersionSnapshot = metadata.requestUpdateForNewTopics(); } // Join the group if not already part of it, or just send the updated subscription // to the broker on the next poll. Note that this is done even if no topics matched // the regex, to ensure the member joins the group if needed (with empty subscription). membershipManager.onSubscriptionUpdated(); }
This function evaluates the regex that the consumer subscribed to against the list of topic names from metadata, and updates the list of topics in subscription state accordingly @param cluster Cluster from which we get the topics
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEventProcessor.java
825
[ "membershipManager", "cluster" ]
void
true
2
6.4
apache/kafka
31,560
javadoc
false
get_current_task_execution_arn
def get_current_task_execution_arn(self, task_arn: str) -> str | None: """ Get current TaskExecutionArn (if one exists) for the specified ``task_arn``. :param task_arn: TaskArn :return: CurrentTaskExecutionArn for this ``task_arn`` or None. :raises AirflowBadRequest: if ``task_arn`` is empty. """ if not task_arn: raise AirflowBadRequest("task_arn not specified") task_description = self.get_task_description(task_arn) if "CurrentTaskExecutionArn" in task_description: return task_description["CurrentTaskExecutionArn"] return None
Get current TaskExecutionArn (if one exists) for the specified ``task_arn``. :param task_arn: TaskArn :return: CurrentTaskExecutionArn for this ``task_arn`` or None. :raises AirflowBadRequest: if ``task_arn`` is empty.
python
providers/amazon/src/airflow/providers/amazon/aws/hooks/datasync.py
280
[ "self", "task_arn" ]
str | None
true
3
7.6
apache/airflow
43,597
sphinx
false
handleFetchFailure
protected void handleFetchFailure(final Node fetchTarget, final FetchSessionHandler.FetchRequestData data, final Throwable t) { try { final FetchSessionHandler handler = sessionHandler(fetchTarget.id()); if (handler != null) { handler.handleError(t); handler.sessionTopicPartitions().forEach(subscriptions::clearPreferredReadReplica); } } finally { removePendingFetchRequest(fetchTarget, data.metadata().sessionId()); } }
Implements the core logic for a failed fetch response. @param fetchTarget {@link Node} from which the fetch data was requested @param data {@link FetchSessionHandler.FetchRequestData} from request @param t {@link Throwable} representing the error that resulted in the failure
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractFetch.java
266
[ "fetchTarget", "data", "t" ]
void
true
2
6.24
apache/kafka
31,560
javadoc
false
copyTo
@CanIgnoreReturnValue public long copyTo(CharSink sink) throws IOException { checkNotNull(sink); Closer closer = Closer.create(); try { Reader reader = closer.register(openStream()); Writer writer = closer.register(sink.openStream()); return CharStreams.copy(reader, writer); } catch (Throwable e) { throw closer.rethrow(e); } finally { closer.close(); } }
Copies the contents of this source to the given sink. @return the number of characters copied @throws IOException if an I/O error occurs while reading from this source or writing to {@code sink}
java
android/guava/src/com/google/common/io/CharSource.java
271
[ "sink" ]
true
2
6.72
google/guava
51,352
javadoc
false
_maybe_coerce_freq
def _maybe_coerce_freq(code) -> str: """we might need to coerce a code to a rule_code and uppercase it Parameters ---------- source : str or DateOffset Frequency converting from Returns ------- str """ assert code is not None if isinstance(code, DateOffset): code = PeriodDtype(to_offset(code.name))._freqstr if code in {"h", "min", "s", "ms", "us", "ns"}: return code else: return code.upper()
we might need to coerce a code to a rule_code and uppercase it Parameters ---------- source : str or DateOffset Frequency converting from Returns ------- str
python
pandas/tseries/frequencies.py
568
[ "code" ]
str
true
4
6.08
pandas-dev/pandas
47,362
numpy
false
count
int count(@CompatibleWith("E") @Nullable Object element);
Returns the number of occurrences of an element in this multiset (the <i>count</i> of the element). Note that for an {@link Object#equals}-based multiset, this gives the same result as {@link Collections#frequency} (which would presumably perform more poorly). <p><b>Note:</b> the utility method {@link Iterables#frequency} generalizes this operation; it correctly delegates to this method when dealing with a multiset, but it can also accept any other iterable type. @param element the element to count occurrences of @return the number of occurrences of the element in this multiset; possibly zero but never negative
java
android/guava/src/com/google/common/collect/Multiset.java
115
[ "element" ]
true
1
6.32
google/guava
51,352
javadoc
false
startAsync
@CanIgnoreReturnValue Service startAsync();
If the service state is {@link State#NEW}, this initiates service startup and returns immediately. A stopped service may not be restarted. @return this @throws IllegalStateException if the service is not {@link State#NEW} @since 15.0
java
android/guava/src/com/google/common/util/concurrent/Service.java
67
[]
Service
true
1
6.48
google/guava
51,352
javadoc
false
createPropertyDescriptor
PropertyDescriptor createPropertyDescriptor(String name, PropertyDescriptor propertyDescriptor) { String key = ConventionUtils.toDashedCase(name); if (this.items.containsKey(key)) { ItemMetadata itemMetadata = this.items.get(key); ItemHint itemHint = this.hints.get(key); return new SourcePropertyDescriptor(propertyDescriptor, itemMetadata, itemHint); } return propertyDescriptor; }
Create a {@link PropertyDescriptor} for the given property name. @param name the name of a property @param propertyDescriptor the descriptor of the property @return a property descriptor that applies additional source metadata if necessary
java
configuration-metadata/spring-boot-configuration-processor/src/main/java/org/springframework/boot/configurationprocessor/ConfigurationPropertiesSourceResolver.java
106
[ "name", "propertyDescriptor" ]
PropertyDescriptor
true
2
7.28
spring-projects/spring-boot
79,428
javadoc
false
after
function after(n, func) { if (typeof func != 'function') { throw new TypeError(FUNC_ERROR_TEXT); } n = toInteger(n); return function() { if (--n < 1) { return func.apply(this, arguments); } }; }
The opposite of `_.before`; this method creates a function that invokes `func` once it's called `n` or more times. @static @memberOf _ @since 0.1.0 @category Function @param {number} n The number of calls before `func` is invoked. @param {Function} func The function to restrict. @returns {Function} Returns the new restricted function. @example var saves = ['profile', 'settings']; var done = _.after(saves.length, function() { console.log('done saving!'); }); _.forEach(saves, function(type) { asyncSave({ 'type': type, 'complete': done }); }); // => Logs 'done saving!' after the two async saves have completed.
javascript
lodash.js
10,097
[ "n", "func" ]
false
3
7.68
lodash/lodash
61,490
jsdoc
false
estimate_bandwidth
def estimate_bandwidth(X, *, quantile=0.3, n_samples=None, random_state=0, n_jobs=None): """Estimate the bandwidth to use with the mean-shift algorithm. This function takes time at least quadratic in `n_samples`. For large datasets, it is wise to subsample by setting `n_samples`. Alternatively, the parameter `bandwidth` can be set to a small value without estimating it. Parameters ---------- X : array-like of shape (n_samples, n_features) Input points. quantile : float, default=0.3 Should be between [0, 1] 0.5 means that the median of all pairwise distances is used. n_samples : int, default=None The number of samples to use. If not given, all samples are used. random_state : int, RandomState instance, default=None The generator used to randomly select the samples from input points for bandwidth estimation. Use an int to make the randomness deterministic. See :term:`Glossary <random_state>`. n_jobs : int, default=None The number of parallel jobs to run for neighbors search. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. Returns ------- bandwidth : float The bandwidth parameter. Examples -------- >>> import numpy as np >>> from sklearn.cluster import estimate_bandwidth >>> X = np.array([[1, 1], [2, 1], [1, 0], ... [4, 7], [3, 5], [3, 6]]) >>> estimate_bandwidth(X, quantile=0.5) np.float64(1.61) """ X = check_array(X) random_state = check_random_state(random_state) if n_samples is not None: idx = random_state.permutation(X.shape[0])[:n_samples] X = X[idx] n_neighbors = int(X.shape[0] * quantile) if n_neighbors < 1: # cannot fit NearestNeighbors with n_neighbors = 0 n_neighbors = 1 nbrs = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=n_jobs) nbrs.fit(X) bandwidth = 0.0 for batch in gen_batches(len(X), 500): d, _ = nbrs.kneighbors(X[batch, :], return_distance=True) bandwidth += np.max(d, axis=1).sum() return bandwidth / X.shape[0]
Estimate the bandwidth to use with the mean-shift algorithm. This function takes time at least quadratic in `n_samples`. For large datasets, it is wise to subsample by setting `n_samples`. Alternatively, the parameter `bandwidth` can be set to a small value without estimating it. Parameters ---------- X : array-like of shape (n_samples, n_features) Input points. quantile : float, default=0.3 Should be between [0, 1] 0.5 means that the median of all pairwise distances is used. n_samples : int, default=None The number of samples to use. If not given, all samples are used. random_state : int, RandomState instance, default=None The generator used to randomly select the samples from input points for bandwidth estimation. Use an int to make the randomness deterministic. See :term:`Glossary <random_state>`. n_jobs : int, default=None The number of parallel jobs to run for neighbors search. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. Returns ------- bandwidth : float The bandwidth parameter. Examples -------- >>> import numpy as np >>> from sklearn.cluster import estimate_bandwidth >>> X = np.array([[1, 1], [2, 1], [1, 0], ... [4, 7], [3, 5], [3, 6]]) >>> estimate_bandwidth(X, quantile=0.5) np.float64(1.61)
python
sklearn/cluster/_mean_shift.py
41
[ "X", "quantile", "n_samples", "random_state", "n_jobs" ]
false
4
7.44
scikit-learn/scikit-learn
64,340
numpy
false
noneLookup
public static StrLookup<?> noneLookup() { return NONE_LOOKUP; }
Returns a lookup which always returns null. @return a lookup that always returns null, not null.
java
src/main/java/org/apache/commons/lang3/text/StrLookup.java
130
[]
true
1
6.96
apache/commons-lang
2,896
javadoc
false