function_name
stringlengths
1
57
function_code
stringlengths
20
4.99k
documentation
stringlengths
50
2k
language
stringclasses
5 values
file_path
stringlengths
8
166
line_number
int32
4
16.7k
parameters
listlengths
0
20
return_type
stringlengths
0
131
has_type_hints
bool
2 classes
complexity
int32
1
51
quality_score
float32
6
9.68
repo_name
stringclasses
34 values
repo_stars
int32
2.9k
242k
docstring_style
stringclasses
7 values
is_async
bool
2 classes
indexOf
@Override public int indexOf(Advisor advisor) { Assert.notNull(advisor, "Advisor must not be null"); return this.advisors.indexOf(advisor); }
Remove a proxied interface. <p>Does nothing if the given interface isn't proxied. @param ifc the interface to remove from the proxy @return {@code true} if the interface was removed; {@code false} if the interface was not found and hence could not be removed
java
spring-aop/src/main/java/org/springframework/aop/framework/AdvisedSupport.java
348
[ "advisor" ]
true
1
7.04
spring-projects/spring-framework
59,386
javadoc
false
tryUpdatingPreferredReadReplica
public synchronized boolean tryUpdatingPreferredReadReplica(TopicPartition tp, int preferredReadReplicaId, LongSupplier timeMs) { final TopicPartitionState state = assignedStateOrNull(tp); if (state != null) { assignedState(tp).updatePreferredReadReplica(preferredReadReplicaId, timeMs); return true; } return false; }
Tries to set the preferred read replica with a lease timeout. After this time, the replica will no longer be valid and {@link #preferredReadReplica(TopicPartition, long)} will return an empty result. If the preferred replica of the partition could not be updated (e.g. because the partition is not assigned) this method will return {@code false}, otherwise it will return {@code true}. @param tp The topic partition @param preferredReadReplicaId The preferred read replica @param timeMs The time at which this preferred replica is no longer valid @return {@code true} if the preferred read replica was updated, {@code false} otherwise.
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/SubscriptionState.java
733
[ "tp", "preferredReadReplicaId", "timeMs" ]
true
2
7.92
apache/kafka
31,560
javadoc
false
baseHasIn
function baseHasIn(object, key) { return object != null && key in Object(object); }
The base implementation of `_.hasIn` without support for deep paths. @private @param {Object} [object] The object to query. @param {Array|string} key The key to check. @returns {boolean} Returns `true` if `key` exists, else `false`.
javascript
lodash.js
3,147
[ "object", "key" ]
false
2
6
lodash/lodash
61,490
jsdoc
false
default_dtypes
def default_dtypes(self, *, device=None): """ The default data types used for new PyTorch arrays. Parameters ---------- device : Device, optional The device to get the default data types for. Unused for PyTorch, as all devices use the same default dtypes. Returns ------- dtypes : dict A dictionary describing the default data types used for new PyTorch arrays. See Also -------- __array_namespace_info__.capabilities, __array_namespace_info__.default_device, __array_namespace_info__.dtypes, __array_namespace_info__.devices Examples -------- >>> info = xp.__array_namespace_info__() >>> info.default_dtypes() {'real floating': torch.float32, 'complex floating': torch.complex64, 'integral': torch.int64, 'indexing': torch.int64} """ # Note: if the default is set to float64, the devices like MPS that # don't support float64 will error. We still return the default_dtype # value here because this error doesn't represent a different default # per-device. default_floating = torch.get_default_dtype() default_complex = torch.complex64 if default_floating == torch.float32 else torch.complex128 default_integral = torch.int64 return { "real floating": default_floating, "complex floating": default_complex, "integral": default_integral, "indexing": default_integral, }
The default data types used for new PyTorch arrays. Parameters ---------- device : Device, optional The device to get the default data types for. Unused for PyTorch, as all devices use the same default dtypes. Returns ------- dtypes : dict A dictionary describing the default data types used for new PyTorch arrays. See Also -------- __array_namespace_info__.capabilities, __array_namespace_info__.default_device, __array_namespace_info__.dtypes, __array_namespace_info__.devices Examples -------- >>> info = xp.__array_namespace_info__() >>> info.default_dtypes() {'real floating': torch.float32, 'complex floating': torch.complex64, 'integral': torch.int64, 'indexing': torch.int64}
python
sklearn/externals/array_api_compat/torch/_info.py
126
[ "self", "device" ]
false
2
7.2
scikit-learn/scikit-learn
64,340
numpy
false
getKeyParameters
public CacheInvocationParameter[] getKeyParameters(@Nullable Object... values) { List<CacheInvocationParameter> result = new ArrayList<>(); for (CacheParameterDetail keyParameterDetail : this.keyParameterDetails) { int parameterPosition = keyParameterDetail.getParameterPosition(); if (parameterPosition >= values.length) { throw new IllegalStateException("Values mismatch, key parameter at position " + parameterPosition + " cannot be matched against " + values.length + " value(s)"); } result.add(keyParameterDetail.toCacheInvocationParameter(values[parameterPosition])); } return result.toArray(new CacheInvocationParameter[0]); }
Return the {@link CacheInvocationParameter} for the parameters that are to be used to compute the key. <p>Per the spec, if some method parameters are annotated with {@link javax.cache.annotation.CacheKey}, only those parameters should be part of the key. If none are annotated, all parameters except the parameter annotated with {@link javax.cache.annotation.CacheValue} should be part of the key. <p>The method arguments must match the signature of the related method invocation @param values the parameters value for a particular invocation @return the {@link CacheInvocationParameter} instances for the parameters to be used to compute the key
java
spring-context-support/src/main/java/org/springframework/cache/jcache/interceptor/AbstractJCacheKeyOperation.java
79
[]
true
2
7.44
spring-projects/spring-framework
59,386
javadoc
false
get_params
def get_params(self, deep=True): """Get parameters of this kernel. Parameters ---------- deep : bool, default=True If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns ------- params : dict Parameter names mapped to their values. """ params = dict() # introspect the constructor arguments to find the model parameters # to represent cls = self.__class__ init = getattr(cls.__init__, "deprecated_original", cls.__init__) init_sign = signature(init) args, varargs = [], [] for parameter in init_sign.parameters.values(): if parameter.kind != parameter.VAR_KEYWORD and parameter.name != "self": args.append(parameter.name) if parameter.kind == parameter.VAR_POSITIONAL: varargs.append(parameter.name) if len(varargs) != 0: raise RuntimeError( "scikit-learn kernels should always " "specify their parameters in the signature" " of their __init__ (no varargs)." " %s doesn't follow this convention." % (cls,) ) for arg in args: params[arg] = getattr(self, arg) return params
Get parameters of this kernel. Parameters ---------- deep : bool, default=True If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns ------- params : dict Parameter names mapped to their values.
python
sklearn/gaussian_process/kernels.py
178
[ "self", "deep" ]
false
7
6.08
scikit-learn/scikit-learn
64,340
numpy
false
put
public JSONArray put(boolean value) { this.values.add(value); return this; }
Appends {@code value} to the end of this array. @param value the value @return this array.
java
cli/spring-boot-cli/src/json-shade/java/org/springframework/boot/cli/json/JSONArray.java
133
[ "value" ]
JSONArray
true
1
6.96
spring-projects/spring-boot
79,428
javadoc
false
getOrder
public int getOrder(String beanName, Object beanInstance) { OrderComparator comparator = (getDependencyComparator() instanceof OrderComparator orderComparator ? orderComparator : OrderComparator.INSTANCE); return comparator.getOrder(beanInstance, new FactoryAwareOrderSourceProvider(Collections.singletonMap(beanInstance, beanName))); }
Public method to determine the applicable order value for a given bean. @param beanName the name of the bean @param beanInstance the bean instance to check @return the corresponding order value (default is {@link Ordered#LOWEST_PRECEDENCE}) @since 7.0 @see #getOrder(String)
java
spring-beans/src/main/java/org/springframework/beans/factory/support/DefaultListableBeanFactory.java
2,375
[ "beanName", "beanInstance" ]
true
2
7.44
spring-projects/spring-framework
59,386
javadoc
false
humanReadable
public boolean humanReadable() { return this.humanReadable; }
@return the value of the "human readable" flag. When the value is equal to true, some types of values are written in a format easier to read for a human.
java
libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentBuilder.java
323
[]
true
1
6.96
elastic/elasticsearch
75,680
javadoc
false
matches
public static boolean matches(MethodMatcher mm, Method method, Class<?> targetClass, boolean hasIntroductions) { Assert.notNull(mm, "MethodMatcher must not be null"); return (mm instanceof IntroductionAwareMethodMatcher iamm ? iamm.matches(method, targetClass, hasIntroductions) : mm.matches(method, targetClass)); }
Apply the given MethodMatcher to the given Method, supporting an {@link org.springframework.aop.IntroductionAwareMethodMatcher} (if applicable). @param mm the MethodMatcher to apply (may be an IntroductionAwareMethodMatcher) @param method the candidate method @param targetClass the target class @param hasIntroductions {@code true} if the object on whose behalf we are asking is the subject on one or more introductions; {@code false} otherwise @return whether this method matches statically
java
spring-aop/src/main/java/org/springframework/aop/support/MethodMatchers.java
109
[ "mm", "method", "targetClass", "hasIntroductions" ]
true
2
7.44
spring-projects/spring-framework
59,386
javadoc
false
_nan_mask
def _nan_mask(a, out=None): """ Parameters ---------- a : array-like Input array with at least 1 dimension. out : ndarray, optional Alternate output array in which to place the result. The default is ``None``; if provided, it must have the same shape as the expected output and will prevent the allocation of a new array. Returns ------- y : bool ndarray or True A bool array where ``np.nan`` positions are marked with ``False`` and other positions are marked with ``True``. If the type of ``a`` is such that it can't possibly contain ``np.nan``, returns ``True``. """ # we assume that a is an array for this private function if a.dtype.kind not in 'fc': return True y = np.isnan(a, out=out) y = np.invert(y, out=y) return y
Parameters ---------- a : array-like Input array with at least 1 dimension. out : ndarray, optional Alternate output array in which to place the result. The default is ``None``; if provided, it must have the same shape as the expected output and will prevent the allocation of a new array. Returns ------- y : bool ndarray or True A bool array where ``np.nan`` positions are marked with ``False`` and other positions are marked with ``True``. If the type of ``a`` is such that it can't possibly contain ``np.nan``, returns ``True``.
python
numpy/lib/_nanfunctions_impl.py
43
[ "a", "out" ]
false
2
6.24
numpy/numpy
31,054
numpy
false
meanBy
function meanBy(array, iteratee) { return baseMean(array, getIteratee(iteratee, 2)); }
This method is like `_.mean` except that it accepts `iteratee` which is invoked for each element in `array` to generate the value to be averaged. The iteratee is invoked with one argument: (value). @static @memberOf _ @since 4.7.0 @category Math @param {Array} array The array to iterate over. @param {Function} [iteratee=_.identity] The iteratee invoked per element. @returns {number} Returns the mean. @example var objects = [{ 'n': 4 }, { 'n': 2 }, { 'n': 8 }, { 'n': 6 }]; _.meanBy(objects, function(o) { return o.n; }); // => 5 // The `_.property` iteratee shorthand. _.meanBy(objects, 'n'); // => 5
javascript
lodash.js
16,491
[ "array", "iteratee" ]
false
1
6.24
lodash/lodash
61,490
jsdoc
false
toString
@Override public String toString() { ToStringCreator creator = new ToStringCreator(this); creator.append("type", this.type); creator.append("value", (this.value != null) ? "provided" : "none"); creator.append("annotations", this.annotations); creator.append("bindMethod", this.bindMethod); return creator.toString(); }
Returns the {@link BindMethod method} to be used to bind this bindable, or {@code null} if no specific binding method is required. @return the bind method or {@code null} @since 3.0.8
java
core/spring-boot/src/main/java/org/springframework/boot/context/properties/bind/Bindable.java
164
[]
String
true
2
8.08
spring-projects/spring-boot
79,428
javadoc
false
check_for_bucket
def check_for_bucket(self, bucket_name: str | None = None) -> bool: """ Check if bucket_name exists. .. seealso:: - :external+boto3:py:meth:`S3.Client.head_bucket` :param bucket_name: the name of the bucket :return: True if it exists and False if not. """ try: self.get_conn().head_bucket(Bucket=bucket_name) return True except ClientError as e: # The head_bucket api is odd in that it cannot return proper # exception objects, so error codes must be used. Only 200, 404 and 403 # are ever returned. See the following links for more details: # https://github.com/boto/boto3/issues/2499 # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.head_bucket return_code = int(e.response["Error"]["Code"]) if return_code == 404: self.log.info('Bucket "%s" does not exist', bucket_name) elif return_code == 403: self.log.error( 'Access to bucket "%s" is forbidden or there was an error with the request', bucket_name, ) self.log.error(e) return False
Check if bucket_name exists. .. seealso:: - :external+boto3:py:meth:`S3.Client.head_bucket` :param bucket_name: the name of the bucket :return: True if it exists and False if not.
python
providers/amazon/src/airflow/providers/amazon/aws/hooks/s3.py
296
[ "self", "bucket_name" ]
bool
true
3
7.76
apache/airflow
43,597
sphinx
false
create_multiarch_bundle
def create_multiarch_bundle(code_objects: dict, output_bundle_path: str) -> bool: """ Bundle multiple architecture code objects into a single multi-arch bundle. Uses clang-offload-bundler to create a fat binary that HIP runtime can load. The runtime automatically selects the correct architecture at load time. Args: code_objects: Dict mapping architecture to code object path output_bundle_path: Path for output bundle Returns: True if successful """ if not code_objects: return False os.makedirs(os.path.dirname(output_bundle_path), exist_ok=True) try: bundler = get_rocm_bundler() except RuntimeError: return False # Build targets and inputs lists for clang-offload-bundler targets = ["host-x86_64-unknown-linux-gnu"] # We include a dummy host entry to satisfy the bundler format inputs = ["/dev/null"] for arch, path in sorted(code_objects.items()): if not os.path.exists(path): continue # hipv4 = HIP version 4 code object format # amdgcn-amd-amdhsa = target triple for ROCm/HSA runtime # arch = specific GPU (gfx90a, gfx942, etc.) targets.append(f"hipv4-amdgcn-amd-amdhsa--{arch}") inputs.append(path) if len(inputs) == 1: # Only host, no device code return False cmd = [ bundler, "--type=o", # CRITICAL: HIP runtime expects 4096-byte alignment for loading bundles # Without this, hipModuleLoadData gives segmentation fault "-bundle-align=4096", # CRITICAL: Required by HIP runtime! f"--targets={','.join(targets)}", ] for input_file in inputs: cmd.append(f"--input={input_file}") cmd.append(f"--output={output_bundle_path}") try: subprocess.run(cmd, capture_output=True, text=True, check=True) if not os.path.exists(output_bundle_path): return False return True except subprocess.CalledProcessError: return False
Bundle multiple architecture code objects into a single multi-arch bundle. Uses clang-offload-bundler to create a fat binary that HIP runtime can load. The runtime automatically selects the correct architecture at load time. Args: code_objects: Dict mapping architecture to code object path output_bundle_path: Path for output bundle Returns: True if successful
python
torch/_inductor/rocm_multiarch_utils.py
147
[ "code_objects", "output_bundle_path" ]
bool
true
7
7.6
pytorch/pytorch
96,034
google
false
load_file
def load_file( self, filename: Path | str, key: str, bucket_name: str | None = None, replace: bool = False, encrypt: bool = False, gzip: bool = False, acl_policy: str | None = None, ) -> None: """ Load a local file to S3. .. seealso:: - :external+boto3:py:meth:`S3.Client.upload_file` :param filename: path to the file to load. :param key: S3 key that will point to the file :param bucket_name: Name of the bucket in which to store the file :param replace: A flag to decide whether or not to overwrite the key if it already exists. If replace is False and the key exists, an error will be raised. :param encrypt: If True, the file will be encrypted on the server-side by S3 and will be stored in an encrypted form while at rest in S3. :param gzip: If True, the file will be compressed locally :param acl_policy: String specifying the canned ACL policy for the file being uploaded to the S3 bucket. """ filename = str(filename) if not replace and self.check_for_key(key, bucket_name): raise ValueError(f"The key {key} already exists.") extra_args = self.extra_args if encrypt: extra_args["ServerSideEncryption"] = "AES256" if gzip: with open(filename, "rb") as f_in: filename_gz = f"{f_in.name}.gz" with gz.open(filename_gz, "wb") as f_out: shutil.copyfileobj(f_in, f_out) filename = filename_gz if acl_policy: extra_args["ACL"] = acl_policy if self._requester_pays: extra_args["RequestPayer"] = "requester" client = self.get_conn() client.upload_file( filename, bucket_name, key, ExtraArgs=extra_args, Config=self.transfer_config, ) get_hook_lineage_collector().add_input_asset( context=self, scheme="file", asset_kwargs={"path": filename} ) get_hook_lineage_collector().add_output_asset( context=self, scheme="s3", asset_kwargs={"bucket": bucket_name, "key": key} )
Load a local file to S3. .. seealso:: - :external+boto3:py:meth:`S3.Client.upload_file` :param filename: path to the file to load. :param key: S3 key that will point to the file :param bucket_name: Name of the bucket in which to store the file :param replace: A flag to decide whether or not to overwrite the key if it already exists. If replace is False and the key exists, an error will be raised. :param encrypt: If True, the file will be encrypted on the server-side by S3 and will be stored in an encrypted form while at rest in S3. :param gzip: If True, the file will be compressed locally :param acl_policy: String specifying the canned ACL policy for the file being uploaded to the S3 bucket.
python
providers/amazon/src/airflow/providers/amazon/aws/hooks/s3.py
1,172
[ "self", "filename", "key", "bucket_name", "replace", "encrypt", "gzip", "acl_policy" ]
None
true
7
6.8
apache/airflow
43,597
sphinx
false
get_variable
def get_variable(self, key: str, team_name: str | None = None) -> str | None: """ Get Airflow Variable. :param key: Variable Key :param team_name: Team name associated to the task trying to access the variable (if any) :return: Variable Value """ if self.variables_prefix is None: return None return self._get_secret(self.variables_prefix, key, self.variables_lookup_pattern)
Get Airflow Variable. :param key: Variable Key :param team_name: Team name associated to the task trying to access the variable (if any) :return: Variable Value
python
providers/amazon/src/airflow/providers/amazon/aws/secrets/secrets_manager.py
228
[ "self", "key", "team_name" ]
str | None
true
2
7.92
apache/airflow
43,597
sphinx
false
toString
@Override public String toString() { try { return this.file.getCanonicalPath(); } catch (IOException ex) { throw new IllegalStateException(ex); } }
Create a new {@link ProcessBuilder} that will run with the Java executable. @param arguments the command arguments @return a {@link ProcessBuilder}
java
loader/spring-boot-loader-tools/src/main/java/org/springframework/boot/loader/tools/JavaExecutable.java
61
[]
String
true
2
7.44
spring-projects/spring-boot
79,428
javadoc
false
create
public static <T extends @Nullable Object> BloomFilter<T> create( Funnel<? super T> funnel, int expectedInsertions, double fpp) { return create(funnel, (long) expectedInsertions, fpp); }
Creates a {@link BloomFilter} with the expected number of insertions and expected false positive probability. <p>Note that overflowing a {@code BloomFilter} with significantly more elements than specified, will result in its saturation, and a sharp deterioration of its false positive probability. <p>The constructed {@code BloomFilter} will be serializable if the provided {@code Funnel<T>} is. <p>It is recommended that the funnel be implemented as a Java enum. This has the benefit of ensuring proper serialization and deserialization, which is important since {@link #equals} also relies on object identity of funnels. @param funnel the funnel of T's that the constructed {@code BloomFilter} will use @param expectedInsertions the number of expected insertions to the constructed {@code BloomFilter}; must be positive @param fpp the desired false positive probability (must be positive and less than 1.0) @return a {@code BloomFilter}
java
android/guava/src/com/google/common/hash/BloomFilter.java
396
[ "funnel", "expectedInsertions", "fpp" ]
true
1
6.48
google/guava
51,352
javadoc
false
equals
@Override public boolean equals(@Nullable Object other) { return (this == other || (other instanceof AnnotationClassFilter otherCf && this.annotationType.equals(otherCf.annotationType) && this.checkInherited == otherCf.checkInherited)); }
Create a new AnnotationClassFilter for the given annotation type. @param annotationType the annotation type to look for @param checkInherited whether to also check the superclasses and interfaces as well as meta-annotations for the annotation type (i.e. whether to use {@link AnnotatedElementUtils#hasAnnotation} semantics instead of standard Java {@link Class#isAnnotationPresent})
java
spring-aop/src/main/java/org/springframework/aop/support/annotation/AnnotationClassFilter.java
70
[ "other" ]
true
4
6.24
spring-projects/spring-framework
59,386
javadoc
false
countNumberOfUnboundAnnotationArguments
private int countNumberOfUnboundAnnotationArguments() { int count = 0; for (int i = 0; i < this.argumentTypes.length; i++) { if (isUnbound(i) && isSubtypeOf(Annotation.class, i)) { count++; } } return count; }
Return {@code true} if the given argument type is a subclass of the given supertype.
java
spring-aop/src/main/java/org/springframework/aop/aspectj/AspectJAdviceParameterNameDiscoverer.java
699
[]
true
4
7.04
spring-projects/spring-framework
59,386
javadoc
false
iterator
public static Iterator<?> iterator(final Object calendar, final int rangeStyle) { Objects.requireNonNull(calendar, "calendar"); if (calendar instanceof Date) { return iterator((Date) calendar, rangeStyle); } if (calendar instanceof Calendar) { return iterator((Calendar) calendar, rangeStyle); } throw new ClassCastException("Could not iterate based on " + calendar); }
Constructs an {@link Iterator} over each day in a date range defined by a focus date and range style. <p>For instance, passing Thursday, July 4, 2002 and a {@code RANGE_MONTH_SUNDAY} will return an {@link Iterator} that starts with Sunday, June 30, 2002 and ends with Saturday, August 3, 2002, returning a Calendar instance for each intermediate day.</p> @param calendar the date to work with, either {@link Date} or {@link Calendar}, not null. @param rangeStyle the style constant to use. Must be one of the range styles listed for the {@link #iterator(Calendar, int)} method. @return the date iterator, not null. @throws NullPointerException if the date is {@code null}. @throws ClassCastException if the object type is not a {@link Date} or {@link Calendar}.
java
src/main/java/org/apache/commons/lang3/time/DateUtils.java
1,087
[ "calendar", "rangeStyle" ]
true
3
8.08
apache/commons-lang
2,896
javadoc
false
setNameFormat
@CanIgnoreReturnValue public ThreadFactoryBuilder setNameFormat(String nameFormat) { String unused = format(nameFormat, 0); // fail fast if the format is bad or null this.nameFormat = nameFormat; return this; }
Sets the naming format to use when naming threads ({@link Thread#setName}) which are created with this ThreadFactory. <p><b>Java 21+ users:</b> use {@link Thread.Builder#name(String, long)} instead. Note that {@link #setNameFormat} accepts a thread name <i>format string</i> (e.g., {@code threadFactoryBuilder.setNameFormat("rpc-pool-%d")}), while {@code threadBuilder.name()} accepts a thread name <i>prefix</i> and initial counter value (e.g., {@code threadBuilder.name("rpc-pool-", 0)}. @param nameFormat a {@link String#format(String, Object...)}-compatible format String, to which a unique integer (0, 1, etc.) will be supplied as the single parameter. This integer will be unique to the built instance of the ThreadFactory and will be assigned sequentially. For example, {@code "rpc-pool-%d"} will generate thread names like {@code "rpc-pool-0"}, {@code "rpc-pool-1"}, {@code "rpc-pool-2"}, etc. @return this for the builder pattern
java
android/guava/src/com/google/common/util/concurrent/ThreadFactoryBuilder.java
88
[ "nameFormat" ]
ThreadFactoryBuilder
true
1
6.4
google/guava
51,352
javadoc
false
when
default ValueExtractor<T> when(Predicate<? super @Nullable T> predicate) { return (instance) -> test(extract(instance), predicate); }
Only extract when the given predicate matches. @param predicate the predicate to test @return a new {@link ValueExtractor}
java
core/spring-boot/src/main/java/org/springframework/boot/json/JsonWriter.java
712
[ "predicate" ]
true
1
6.48
spring-projects/spring-boot
79,428
javadoc
false
_fill_limit_area_1d
def _fill_limit_area_1d( mask: npt.NDArray[np.bool_], limit_area: Literal["outside", "inside"] ) -> None: """Prepare 1d mask for ffill/bfill with limit_area. Caller is responsible for checking at least one value of mask is False. When called, mask will no longer faithfully represent when the corresponding are NA or not. Parameters ---------- mask : np.ndarray[bool, ndim=1] Mask representing NA values when filling. limit_area : { "outside", "inside" } Whether to limit filling to outside or inside the outer most non-NA value. """ neg_mask = ~mask first = neg_mask.argmax() last = len(neg_mask) - neg_mask[::-1].argmax() - 1 if limit_area == "inside": mask[:first] = False mask[last + 1 :] = False elif limit_area == "outside": mask[first + 1 : last] = False
Prepare 1d mask for ffill/bfill with limit_area. Caller is responsible for checking at least one value of mask is False. When called, mask will no longer faithfully represent when the corresponding are NA or not. Parameters ---------- mask : np.ndarray[bool, ndim=1] Mask representing NA values when filling. limit_area : { "outside", "inside" } Whether to limit filling to outside or inside the outer most non-NA value.
python
pandas/core/missing.py
966
[ "mask", "limit_area" ]
None
true
3
6.72
pandas-dev/pandas
47,362
numpy
false
parseList
List<Object> parseList(@Nullable String json) throws JsonParseException;
Parse the specified JSON string into a List. @param json the JSON to parse @return the parsed JSON as a list @throws JsonParseException if the JSON cannot be parsed
java
core/spring-boot/src/main/java/org/springframework/boot/json/JsonParser.java
50
[ "json" ]
true
1
6.32
spring-projects/spring-boot
79,428
javadoc
false
listConsumerGroups
@Deprecated(since = "4.1", forRemoval = true) ListConsumerGroupsResult listConsumerGroups(ListConsumerGroupsOptions options);
List the consumer groups available in the cluster. @deprecated Since 4.1. Use {@link Admin#listGroups(ListGroupsOptions)} instead. @param options The options to use when listing the consumer groups. @return The ListConsumerGroupsResult.
java
clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
887
[ "options" ]
ListConsumerGroupsResult
true
1
6
apache/kafka
31,560
javadoc
false
printStackTrace
@Override public void printStackTrace(PrintWriter pw) { if (ObjectUtils.isEmpty(this.messageExceptions)) { super.printStackTrace(pw); } else { pw.println(super.toString() + "; message exception details (" + this.messageExceptions.length + ") are:"); for (int i = 0; i < this.messageExceptions.length; i++) { Exception subEx = this.messageExceptions[i]; pw.println("Failed message " + (i + 1) + ":"); subEx.printStackTrace(pw); } } }
Return an array with thrown message exceptions. <p>Note that a general mail server connection failure will not result in failed messages being returned here: A message will only be contained here if actually sending it was attempted but failed. @return the array of thrown message exceptions, or an empty array if no failed messages
java
spring-context-support/src/main/java/org/springframework/mail/MailSendException.java
182
[ "pw" ]
void
true
3
6.88
spring-projects/spring-framework
59,386
javadoc
false
getLibFileFromReference
function getLibFileFromReference(ref: FileReference) { const libFileName = getLibFileNameFromLibReference(ref); const actualFileName = libFileName && resolvedLibReferences?.get(libFileName)?.actual; return actualFileName !== undefined ? getSourceFile(actualFileName) : undefined; }
@returns The line index marked as preceding the diagnostic, or -1 if none was.
typescript
src/compiler/program.ts
3,416
[ "ref" ]
false
3
6.24
microsoft/TypeScript
107,154
jsdoc
false
topicNameValues
public Map<String, KafkaFuture<Void>> topicNameValues() { return nameFutures; }
Use when {@link Admin#deleteTopics(TopicCollection, DeleteTopicsOptions)} used a TopicNameCollection @return a map from topic names to futures which can be used to check the status of individual deletions if the deleteTopics request used topic names. Otherwise return null.
java
clients/src/main/java/org/apache/kafka/clients/admin/DeleteTopicsResult.java
65
[]
true
1
6
apache/kafka
31,560
javadoc
false
max
public abstract double max(double q, double compression, double n);
Computes the maximum relative size a cluster can have at quantile q. Note that exactly where within the range spanned by a cluster that q should be isn't clear. That means that this function usually has to be taken at multiple points and the smallest value used. <p> Note that this is the relative size of a cluster. To get the max number of samples in the cluster, multiply this value times the total number of samples in the digest. @param q The quantile @param compression The compression factor, typically delta in the literature @param n The number of samples seen so far in the digest @return The maximum number of samples that can be in the cluster
java
libs/tdigest/src/main/java/org/elasticsearch/tdigest/ScaleFunction.java
543
[ "q", "compression", "n" ]
true
1
6.64
elastic/elasticsearch
75,680
javadoc
false
nop
@SuppressWarnings("unchecked") static <T, E extends Throwable> FailableToBooleanFunction<T, E> nop() { return NOP; }
Gets the NOP singleton. @param <T> the type of the argument to the function @param <E> The kind of thrown exception or error. @return The NOP singleton.
java
src/main/java/org/apache/commons/lang3/function/FailableToBooleanFunction.java
41
[]
true
1
6.96
apache/commons-lang
2,896
javadoc
false
getAllLoggers
private Map<String, LoggerConfig> getAllLoggers() { Map<String, LoggerConfig> loggers = new LinkedHashMap<>(); for (Logger logger : getLoggerContext().getLoggers()) { addLogger(loggers, logger.getName()); } getLoggerContext().getConfiguration().getLoggers().keySet().forEach((name) -> addLogger(loggers, name)); return loggers; }
Return the configuration location. The result may be: <ul> <li>{@code null}: if DefaultConfiguration is used (no explicit config loaded)</li> <li>A file path: if provided explicitly by the user</li> <li>A URI: if loaded from the classpath default or a custom location</li> </ul> @param configuration the source configuration @return the config location or {@code null}
java
core/spring-boot/src/main/java/org/springframework/boot/logging/log4j2/Log4J2LoggingSystem.java
404
[]
true
1
6.08
spring-projects/spring-boot
79,428
javadoc
false
_replace_coerce
def _replace_coerce( self, to_replace, value, mask: npt.NDArray[np.bool_], inplace: bool = True, regex: bool = False, ) -> list[Block]: """ Replace value corresponding to the given boolean array with another value. Parameters ---------- to_replace : object or pattern Scalar to replace or regular expression to match. value : object Replacement object. mask : np.ndarray[bool] True indicate corresponding element is ignored. inplace : bool, default True Perform inplace modification. regex : bool, default False If true, perform regular expression substitution. Returns ------- List[Block] """ if should_use_regex(regex, to_replace): return self._replace_regex( to_replace, value, inplace=inplace, mask=mask, ) else: if value is None: # gh-45601, gh-45836, gh-46634 if mask.any(): has_ref = self.refs.has_reference() nb = self.astype(np.dtype(object)) if not inplace: nb = nb.copy(deep=True) elif inplace and has_ref and nb.refs.has_reference(): # no copy in astype and we had refs before nb = nb.copy(deep=True) putmask_inplace(nb.values, mask, value) return [nb] return [self.copy(deep=False)] return self.replace( to_replace=to_replace, value=value, inplace=inplace, mask=mask, )
Replace value corresponding to the given boolean array with another value. Parameters ---------- to_replace : object or pattern Scalar to replace or regular expression to match. value : object Replacement object. mask : np.ndarray[bool] True indicate corresponding element is ignored. inplace : bool, default True Perform inplace modification. regex : bool, default False If true, perform regular expression substitution. Returns ------- List[Block]
python
pandas/core/internals/blocks.py
882
[ "self", "to_replace", "value", "mask", "inplace", "regex" ]
list[Block]
true
9
6.32
pandas-dev/pandas
47,362
numpy
false
_generate_kernel_call_helper
def _generate_kernel_call_helper( self, kernel_name: str, call_args, *, device=None, triton=True, arg_types=None, raw_keys=None, raw_args=None, triton_meta=None, graph_name="", original_fxnode_name=None, ): """ Generates kernel call code. triton: Defines whether the GPU backend uses Triton for codegen. Otherwise it uses the CUDA language for codegen. Only valid when cuda == True. """ assert arg_types is not None and len(call_args) == len(arg_types), ( "Mismatch call_args and arg_types in generate_kernel_call:\n" f"call_args: {call_args}\n" f"arg_types: {arg_types}" ) new_args = [] for idx, arg in enumerate(call_args): if isinstance(arg_types[idx], str) and "*" in arg_types[idx]: new_args.append(f"({arg_types[idx]})({arg}.data_ptr())") else: # arg is a scalar - ensure it's a string for C++ codegen # With Triton support, arg might be a SymPy expression or other type new_args.append(str(arg) if not isinstance(arg, str) else arg) # debug printer related logic for cpp kernel type. debug_printer_manager = V.graph.wrapper_code.debug_printer debug_printer_manager.set_printer_args( call_args, kernel_name, None, None, "cpp", ) with debug_printer_manager: self.writeline(self.wrap_kernel_call(kernel_name, new_args))
Generates kernel call code. triton: Defines whether the GPU backend uses Triton for codegen. Otherwise it uses the CUDA language for codegen. Only valid when cuda == True.
python
torch/_inductor/codegen/cpp_wrapper_cpu.py
135
[ "self", "kernel_name", "call_args", "device", "triton", "arg_types", "raw_keys", "raw_args", "triton_meta", "graph_name", "original_fxnode_name" ]
true
7
6.88
pytorch/pytorch
96,034
unknown
false
left
public static String left(final String str, final int len) { if (str == null) { return null; } if (len < 0) { return EMPTY; } if (str.length() <= len) { return str; } return str.substring(0, len); }
Gets the leftmost {@code len} characters of a String. <p> If {@code len} characters are not available, or the String is {@code null}, the String will be returned without an exception. An empty String is returned if len is negative. </p> <pre> StringUtils.left(null, *) = null StringUtils.left(*, -ve) = "" StringUtils.left("", *) = "" StringUtils.left("abc", 0) = "" StringUtils.left("abc", 2) = "ab" StringUtils.left("abc", 4) = "abc" </pre> @param str the String to get the leftmost characters from, may be null. @param len the length of the required String. @return the leftmost characters, {@code null} if null String input.
java
src/main/java/org/apache/commons/lang3/StringUtils.java
5,058
[ "str", "len" ]
String
true
4
8.08
apache/commons-lang
2,896
javadoc
false
getNodesForSpan
function getNodesForSpan(file: SourceFile, span: TextSpan): Node[] | undefined { // Span is the whole file if (textSpanContainsTextRange(span, file)) { return undefined; } const endToken = findTokenOnLeftOfPosition(file, textSpanEnd(span)) || file; const enclosingNode = findAncestor(endToken, node => textRangeContainsTextSpan(node, span))!; const nodes: Node[] = []; chooseOverlappingNodes(span, enclosingNode, nodes); if (file.end === span.start + span.length) { nodes.push(file.endOfFileToken); } // Span would include the whole file if (some(nodes, isSourceFile)) { return undefined; } return nodes; }
Gets nodes that overlap the given span to be partially checked. @returns an array of nodes that overlap the span and are source element nodes (c.f. {@link isSourceElement}), or undefined if a partial check would be the same as a whole file check.
typescript
src/services/services.ts
2,128
[ "file", "span" ]
true
5
7.04
microsoft/TypeScript
107,154
jsdoc
false
toBeStarted
private boolean toBeStarted(String beanName, Lifecycle bean) { Set<String> stoppedBeans = this.stoppedBeans; return (stoppedBeans != null ? stoppedBeans.contains(beanName) : (!(bean instanceof SmartLifecycle smartLifecycle) || smartLifecycle.isAutoStartup())); }
Start the specified bean as part of the given set of Lifecycle beans, making sure that any beans that it depends on are started first. @param lifecycleBeans a Map with bean name as key and Lifecycle instance as value @param beanName the name of the bean to start
java
spring-context/src/main/java/org/springframework/context/support/DefaultLifecycleProcessor.java
430
[ "beanName", "bean" ]
true
3
6.72
spring-projects/spring-framework
59,386
javadoc
false
_acquire_lock_with_timeout
def _acquire_lock_with_timeout( lock: Lock, timeout: float | None = None, ) -> Generator[None, None, None]: """Context manager that safely acquires a threading.Lock with timeout and automatically releases it. This function provides a safe way to acquire a lock with timeout support, ensuring the lock is always released even if an exception occurs during execution. Args: lock: The threading.Lock object to acquire timeout: Timeout in seconds. If None, uses _DEFAULT_TIMEOUT. - Use _BLOCKING (-1.0) for infinite wait - Use _NON_BLOCKING (0.0) for immediate return - Use positive value for finite timeout Yields: None: Yields control to the caller while holding the lock Raises: LockTimeoutError: If the lock cannot be acquired within the timeout period Example: with _acquire_lock_with_timeout(my_lock, timeout=30.0): # Critical section - lock is held perform_critical_operation() # Lock is automatically released here """ _unsafe_acquire_lock_with_timeout(lock, timeout=timeout) try: yield finally: lock.release()
Context manager that safely acquires a threading.Lock with timeout and automatically releases it. This function provides a safe way to acquire a lock with timeout support, ensuring the lock is always released even if an exception occurs during execution. Args: lock: The threading.Lock object to acquire timeout: Timeout in seconds. If None, uses _DEFAULT_TIMEOUT. - Use _BLOCKING (-1.0) for infinite wait - Use _NON_BLOCKING (0.0) for immediate return - Use positive value for finite timeout Yields: None: Yields control to the caller while holding the lock Raises: LockTimeoutError: If the lock cannot be acquired within the timeout period Example: with _acquire_lock_with_timeout(my_lock, timeout=30.0): # Critical section - lock is held perform_critical_operation() # Lock is automatically released here
python
torch/_inductor/runtime/caching/locks.py
48
[ "lock", "timeout" ]
Generator[None, None, None]
true
1
7.12
pytorch/pytorch
96,034
google
false
get_printoptions
def get_printoptions(): """ Return the current print options. Returns ------- print_opts : dict Dictionary of current print options with keys - precision : int - threshold : int - edgeitems : int - linewidth : int - suppress : bool - nanstr : str - infstr : str - sign : str - formatter : dict of callables - floatmode : str - legacy : str or False For a full description of these options, see `set_printoptions`. Notes ----- These print options apply only to NumPy ndarrays, not to scalars. **Concurrency note:** see :ref:`text_formatting_options` See Also -------- set_printoptions, printoptions Examples -------- >>> import numpy as np >>> np.get_printoptions() {'edgeitems': 3, 'threshold': 1000, ..., 'override_repr': None} >>> np.get_printoptions()['linewidth'] 75 >>> np.set_printoptions(linewidth=100) >>> np.get_printoptions()['linewidth'] 100 """ opts = format_options.get().copy() opts['legacy'] = { 113: '1.13', 121: '1.21', 125: '1.25', 201: '2.1', 202: '2.2', sys.maxsize: False, }[opts['legacy']] return opts
Return the current print options. Returns ------- print_opts : dict Dictionary of current print options with keys - precision : int - threshold : int - edgeitems : int - linewidth : int - suppress : bool - nanstr : str - infstr : str - sign : str - formatter : dict of callables - floatmode : str - legacy : str or False For a full description of these options, see `set_printoptions`. Notes ----- These print options apply only to NumPy ndarrays, not to scalars. **Concurrency note:** see :ref:`text_formatting_options` See Also -------- set_printoptions, printoptions Examples -------- >>> import numpy as np >>> np.get_printoptions() {'edgeitems': 3, 'threshold': 1000, ..., 'override_repr': None} >>> np.get_printoptions()['linewidth'] 75 >>> np.set_printoptions(linewidth=100) >>> np.get_printoptions()['linewidth'] 100
python
numpy/_core/arrayprint.py
336
[]
false
1
6.32
numpy/numpy
31,054
unknown
false
getDeclarationDiagnosticsForFileNoCache
function getDeclarationDiagnosticsForFileNoCache(sourceFile: SourceFile, cancellationToken: CancellationToken | undefined): readonly DiagnosticWithLocation[] { return runWithCancellationToken(() => { const resolver = getTypeChecker().getEmitResolver(sourceFile, cancellationToken); // Don't actually write any files since we're just getting diagnostics. return ts_getDeclarationDiagnostics(getEmitHost(noop), resolver, sourceFile) || emptyArray; }); }
@returns The line index marked as preceding the diagnostic, or -1 if none was.
typescript
src/compiler/program.ts
3,250
[ "sourceFile", "cancellationToken" ]
true
2
7.2
microsoft/TypeScript
107,154
jsdoc
false
newCopyOnWriteArraySet
@J2ktIncompatible @GwtIncompatible // CopyOnWriteArraySet public static <E extends @Nullable Object> CopyOnWriteArraySet<E> newCopyOnWriteArraySet( Iterable<? extends E> elements) { // We copy elements to an ArrayList first, rather than incurring the // quadratic cost of adding them to the COWAS directly. Collection<? extends E> elementsCollection = (elements instanceof Collection) ? (Collection<? extends E>) elements : Lists.newArrayList(elements); return new CopyOnWriteArraySet<>(elementsCollection); }
Creates a {@code CopyOnWriteArraySet} instance containing the given elements. @param elements the elements that the set should contain, in order @return a new {@code CopyOnWriteArraySet} containing those elements @since 12.0
java
android/guava/src/com/google/common/collect/Sets.java
479
[ "elements" ]
true
2
7.44
google/guava
51,352
javadoc
false
getObjectName
@Override public ObjectName getObjectName(Object managedBean, @Nullable String beanKey) throws MalformedObjectNameException { ObjectName name = super.getObjectName(managedBean, beanKey); if (this.ensureUniqueRuntimeObjectNames) { return JmxUtils.appendIdentityToObjectName(name, managedBean); } if (parentContextContainsSameBean(this.applicationContext, beanKey)) { return appendToObjectName(name, "context", ObjectUtils.getIdentityHexString(this.applicationContext)); } return name; }
Set if unique runtime object names should be ensured. @param ensureUniqueRuntimeObjectNames {@code true} if unique names should be ensured.
java
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/jmx/ParentAwareNamingStrategy.java
68
[ "managedBean", "beanKey" ]
ObjectName
true
3
6.24
spring-projects/spring-boot
79,428
javadoc
false
cleanUpJavaDoc
private String cleanUpJavaDoc(String javadoc) { StringBuilder result = new StringBuilder(javadoc.length()); char lastChar = '.'; for (int i = 0; i < javadoc.length(); i++) { char ch = javadoc.charAt(i); boolean repeatedSpace = ch == ' ' && lastChar == ' '; if (ch != '\r' && ch != '\n' && !repeatedSpace) { result.append(ch); lastChar = ch; } } return result.toString().trim(); }
Return the {@link PrimitiveType} of the specified type or {@code null} if the type does not represent a valid wrapper type. @param typeMirror a type @return the primitive type or {@code null} if the type is not a wrapper type
java
configuration-metadata/spring-boot-configuration-processor/src/main/java/org/springframework/boot/configurationprocessor/TypeUtils.java
270
[ "javadoc" ]
String
true
6
7.92
spring-projects/spring-boot
79,428
javadoc
false
_unbox
def _unbox(self, other) -> np.int64 | np.datetime64 | np.timedelta64 | np.ndarray: """ Unbox either a scalar with _unbox_scalar or an instance of our own type. """ if lib.is_scalar(other): other = self._unbox_scalar(other) else: # same type as self self._check_compatible_with(other) other = other._ndarray return other
Unbox either a scalar with _unbox_scalar or an instance of our own type.
python
pandas/core/arrays/datetimelike.py
738
[ "self", "other" ]
np.int64 | np.datetime64 | np.timedelta64 | np.ndarray
true
3
6
pandas-dev/pandas
47,362
unknown
false
add
protected void add(final String str) { if (str == null) { return; } final int len = str.length(); int pos = 0; while (pos < len) { final int remainder = len - pos; if (remainder >= 4 && str.charAt(pos) == '^' && str.charAt(pos + 2) == '-') { // negated range set.add(CharRange.isNotIn(str.charAt(pos + 1), str.charAt(pos + 3))); pos += 4; } else if (remainder >= 3 && str.charAt(pos + 1) == '-') { // range set.add(CharRange.isIn(str.charAt(pos), str.charAt(pos + 2))); pos += 3; } else if (remainder >= 2 && str.charAt(pos) == '^') { // negated char set.add(CharRange.isNot(str.charAt(pos + 1))); pos += 2; } else { // char set.add(CharRange.is(str.charAt(pos))); pos += 1; } } }
Add a set definition string to the {@link CharSet}. @param str set definition string
java
src/main/java/org/apache/commons/lang3/CharSet.java
184
[ "str" ]
void
true
10
6.88
apache/commons-lang
2,896
javadoc
false
polynomial_kernel
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1): """ Compute the polynomial kernel between X and Y. .. code-block:: text K(X, Y) = (gamma <X, Y> + coef0) ^ degree Read more in the :ref:`User Guide <polynomial_kernel>`. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples_X, n_features) A feature array. Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features), default=None An optional second feature array. If `None`, uses `Y=X`. degree : float, default=3 Kernel degree. gamma : float, default=None Coefficient of the vector inner product. If None, defaults to 1.0 / n_features. coef0 : float, default=1 Constant offset added to scaled inner product. Returns ------- kernel : ndarray of shape (n_samples_X, n_samples_Y) The polynomial kernel. Examples -------- >>> from sklearn.metrics.pairwise import polynomial_kernel >>> X = [[0, 0, 0], [1, 1, 1]] >>> Y = [[1, 0, 0], [1, 1, 0]] >>> polynomial_kernel(X, Y, degree=2) array([[1. , 1. ], [1.77, 2.77]]) """ X, Y = check_pairwise_arrays(X, Y) if gamma is None: gamma = 1.0 / X.shape[1] K = safe_sparse_dot(X, Y.T, dense_output=True) K *= gamma K += coef0 K **= degree return K
Compute the polynomial kernel between X and Y. .. code-block:: text K(X, Y) = (gamma <X, Y> + coef0) ^ degree Read more in the :ref:`User Guide <polynomial_kernel>`. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples_X, n_features) A feature array. Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features), default=None An optional second feature array. If `None`, uses `Y=X`. degree : float, default=3 Kernel degree. gamma : float, default=None Coefficient of the vector inner product. If None, defaults to 1.0 / n_features. coef0 : float, default=1 Constant offset added to scaled inner product. Returns ------- kernel : ndarray of shape (n_samples_X, n_samples_Y) The polynomial kernel. Examples -------- >>> from sklearn.metrics.pairwise import polynomial_kernel >>> X = [[0, 0, 0], [1, 1, 1]] >>> Y = [[1, 0, 0], [1, 1, 0]] >>> polynomial_kernel(X, Y, degree=2) array([[1. , 1. ], [1.77, 2.77]])
python
sklearn/metrics/pairwise.py
1,444
[ "X", "Y", "degree", "gamma", "coef0" ]
false
2
7.52
scikit-learn/scikit-learn
64,340
numpy
false
roots
def roots(p): """ Return the roots of a polynomial with coefficients given in p. .. note:: This forms part of the old polynomial API. Since version 1.4, the new polynomial API defined in `numpy.polynomial` is preferred. A summary of the differences can be found in the :doc:`transition guide </reference/routines.polynomials>`. The values in the rank-1 array `p` are coefficients of a polynomial. If the length of `p` is n+1 then the polynomial is described by:: p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n] Parameters ---------- p : array_like Rank-1 array of polynomial coefficients. Returns ------- out : ndarray An array containing the roots of the polynomial. Raises ------ ValueError When `p` cannot be converted to a rank-1 array. See also -------- poly : Find the coefficients of a polynomial with a given sequence of roots. polyval : Compute polynomial values. polyfit : Least squares polynomial fit. poly1d : A one-dimensional polynomial class. Notes ----- The algorithm relies on computing the eigenvalues of the companion matrix [1]_. References ---------- .. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK: Cambridge University Press, 1999, pp. 146-7. Examples -------- >>> import numpy as np >>> coeff = [3.2, 2, 1] >>> np.roots(coeff) array([-0.3125+0.46351241j, -0.3125-0.46351241j]) """ # If input is scalar, this makes it an array p = atleast_1d(p) if p.ndim != 1: raise ValueError("Input must be a rank-1 array.") # find non-zero array entries non_zero = NX.nonzero(NX.ravel(p))[0] # Return an empty array if polynomial is all zeros if len(non_zero) == 0: return NX.array([]) # find the number of trailing zeros -- this is the number of roots at 0. trailing_zeros = len(p) - non_zero[-1] - 1 # strip leading and trailing zeros p = p[int(non_zero[0]):int(non_zero[-1]) + 1] # casting: if incoming array isn't floating point, make it floating point. if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)): p = p.astype(float) N = len(p) if N > 1: # build companion matrix and find its eigenvalues (the roots) A = diag(NX.ones((N - 2,), p.dtype), -1) A[0, :] = -p[1:] / p[0] roots = eigvals(A) else: roots = NX.array([]) # tack any zeros onto the back of the array roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype))) return roots
Return the roots of a polynomial with coefficients given in p. .. note:: This forms part of the old polynomial API. Since version 1.4, the new polynomial API defined in `numpy.polynomial` is preferred. A summary of the differences can be found in the :doc:`transition guide </reference/routines.polynomials>`. The values in the rank-1 array `p` are coefficients of a polynomial. If the length of `p` is n+1 then the polynomial is described by:: p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n] Parameters ---------- p : array_like Rank-1 array of polynomial coefficients. Returns ------- out : ndarray An array containing the roots of the polynomial. Raises ------ ValueError When `p` cannot be converted to a rank-1 array. See also -------- poly : Find the coefficients of a polynomial with a given sequence of roots. polyval : Compute polynomial values. polyfit : Least squares polynomial fit. poly1d : A one-dimensional polynomial class. Notes ----- The algorithm relies on computing the eigenvalues of the companion matrix [1]_. References ---------- .. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK: Cambridge University Press, 1999, pp. 146-7. Examples -------- >>> import numpy as np >>> coeff = [3.2, 2, 1] >>> np.roots(coeff) array([-0.3125+0.46351241j, -0.3125-0.46351241j])
python
numpy/lib/_polynomial_impl.py
171
[ "p" ]
false
6
7.6
numpy/numpy
31,054
numpy
false
removeAllOccurrences
public static int[] removeAllOccurrences(final int[] array, final int element) { return (int[]) removeAt(array, indexesOf(array, element)); }
Removes the occurrences of the specified element from the specified int array. <p> All subsequent elements are shifted to the left (subtracts one from their indices). If the array doesn't contain such an element, no elements are removed from the array. {@code null} will be returned if the input array is {@code null}. </p> @param array the input array, will not be modified, and may be {@code null}. @param element the element to remove. @return A new array containing the existing elements except the occurrences of the specified element. @since 3.10
java
src/main/java/org/apache/commons/lang3/ArrayUtils.java
5,521
[ "array", "element" ]
true
1
6.96
apache/commons-lang
2,896
javadoc
false
erroneousCompletionException
private UnsupportedOperationException erroneousCompletionException() { return new UnsupportedOperationException("User code should not complete futures returned from Kafka clients"); }
Completes this future exceptionally. For internal use by the Kafka clients, not by user code. @param throwable the exception. @return {@code true} if this invocation caused this CompletableFuture to transition to a completed state, else {@code false}
java
clients/src/main/java/org/apache/kafka/common/internals/KafkaCompletableFuture.java
92
[]
UnsupportedOperationException
true
1
6.64
apache/kafka
31,560
javadoc
false
enqueueCopyImage
inline cl_int enqueueCopyImage( const Image& src, const Image& dst, const size_t<3>& src_origin, const size_t<3>& dst_origin, const size_t<3>& region, const VECTOR_CLASS<Event>* events = NULL, Event* event = NULL) { cl_int error; CommandQueue queue = CommandQueue::getDefault(&error); if (error != CL_SUCCESS) { return error; } return queue.enqueueCopyImage( src, dst, src_origin, dst_origin, region, events, event); }
Blocking copy operation between iterators and a buffer.
cpp
3rdparty/include/opencl/1.2/CL/cl.hpp
6,456
[]
true
2
6.4
opencv/opencv
85,374
doxygen
false
param_parse
def param_parse(d, params): """Recursively parse array dimensions. Parses the declaration of an array variable or parameter `dimension` keyword, and is called recursively if the dimension for this array is a previously defined parameter (found in `params`). Parameters ---------- d : str Fortran expression describing the dimension of an array. params : dict Previously parsed parameters declared in the Fortran source file. Returns ------- out : str Parsed dimension expression. Examples -------- * If the line being analyzed is `integer, parameter, dimension(2) :: pa = (/ 3, 5 /)` then `d = 2` and we return immediately, with >>> d = '2' >>> param_parse(d, params) 2 * If the line being analyzed is `integer, parameter, dimension(pa) :: pb = (/1, 2, 3/)` then `d = 'pa'`; since `pa` is a previously parsed parameter, and `pa = 3`, we call `param_parse` recursively, to obtain >>> d = 'pa' >>> params = {'pa': 3} >>> param_parse(d, params) 3 * If the line being analyzed is `integer, parameter, dimension(pa(1)) :: pb = (/1, 2, 3/)` then `d = 'pa(1)'`; since `pa` is a previously parsed parameter, and `pa(1) = 3`, we call `param_parse` recursively, to obtain >>> d = 'pa(1)' >>> params = dict(pa={1: 3, 2: 5}) >>> param_parse(d, params) 3 """ if "(" in d: # this dimension expression is an array dname = d[:d.find("(")] ddims = d[d.find("(") + 1:d.rfind(")")] # this dimension expression is also a parameter; # parse it recursively index = int(param_parse(ddims, params)) return str(params[dname][index]) elif d in params: return str(params[d]) else: for p in params: re_1 = re.compile( r'(?P<before>.*?)\b' + p + r'\b(?P<after>.*)', re.I ) m = re_1.match(d) while m: d = m.group('before') + \ str(params[p]) + m.group('after') m = re_1.match(d) return d
Recursively parse array dimensions. Parses the declaration of an array variable or parameter `dimension` keyword, and is called recursively if the dimension for this array is a previously defined parameter (found in `params`). Parameters ---------- d : str Fortran expression describing the dimension of an array. params : dict Previously parsed parameters declared in the Fortran source file. Returns ------- out : str Parsed dimension expression. Examples -------- * If the line being analyzed is `integer, parameter, dimension(2) :: pa = (/ 3, 5 /)` then `d = 2` and we return immediately, with >>> d = '2' >>> param_parse(d, params) 2 * If the line being analyzed is `integer, parameter, dimension(pa) :: pb = (/1, 2, 3/)` then `d = 'pa'`; since `pa` is a previously parsed parameter, and `pa = 3`, we call `param_parse` recursively, to obtain >>> d = 'pa' >>> params = {'pa': 3} >>> param_parse(d, params) 3 * If the line being analyzed is `integer, parameter, dimension(pa(1)) :: pb = (/1, 2, 3/)` then `d = 'pa(1)'`; since `pa` is a previously parsed parameter, and `pa(1) = 3`, we call `param_parse` recursively, to obtain >>> d = 'pa(1)' >>> params = dict(pa={1: 3, 2: 5}) >>> param_parse(d, params) 3
python
numpy/f2py/crackfortran.py
3,026
[ "d", "params" ]
false
6
7.44
numpy/numpy
31,054
numpy
false
getNanoTime
public long getNanoTime() { switch (runningState) { case STOPPED: case SUSPENDED: return stopTimeNanos - startTimeNanos; case UNSTARTED: return 0; case RUNNING: return System.nanoTime() - startTimeNanos; default: break; } throw new IllegalStateException("Illegal running state has occurred."); }
Gets the <em>elapsed</em> time in nanoseconds. <p> This is either the time between the start and the moment this method is called, or the amount of time between start and stop. </p> @return the <em>elapsed</em> time in nanoseconds. @see System#nanoTime() @since 3.0
java
src/main/java/org/apache/commons/lang3/time/StopWatch.java
395
[]
true
1
6.88
apache/commons-lang
2,896
javadoc
false
removeAllOccurences
@Deprecated public static boolean[] removeAllOccurences(final boolean[] array, final boolean element) { return (boolean[]) removeAt(array, indexesOf(array, element)); }
Removes the occurrences of the specified element from the specified boolean array. <p> All subsequent elements are shifted to the left (subtracts one from their indices). If the array doesn't contain such an element, no elements are removed from the array. {@code null} will be returned if the input array is {@code null}. </p> @param array the input array, will not be modified, and may be {@code null}. @param element the element to remove. @return A new array containing the existing elements except the occurrences of the specified element. @since 3.5 @deprecated Use {@link #removeAllOccurrences(boolean[], boolean)}.
java
src/main/java/org/apache/commons/lang3/ArrayUtils.java
5,265
[ "array", "element" ]
true
1
6.64
apache/commons-lang
2,896
javadoc
false
all
public KafkaFuture<Void> all() { final KafkaFutureImpl<Void> result = new KafkaFutureImpl<>(); this.future.whenComplete((topicResults, throwable) -> { if (throwable != null) { result.completeExceptionally(throwable); } else { for (String topic : topics) { if (maybeCompleteExceptionally(topicResults, topic, result)) { return; } } result.complete(null); } }); return result; }
Return a future which succeeds only if all the deletions succeed. If not, the first topic error shall be returned.
java
clients/src/main/java/org/apache/kafka/clients/admin/DeleteShareGroupOffsetsResult.java
43
[]
true
3
7.04
apache/kafka
31,560
javadoc
false
maybeSetMetadataError
private void maybeSetMetadataError(Cluster cluster) { clearRecoverableErrors(); checkInvalidTopics(cluster); checkUnauthorizedTopics(cluster); }
Updates the partition-leadership info in the metadata. Update is done by merging existing metadata with the input leader information and nodes. This is called whenever partition-leadership updates are returned in a response from broker(ex - ProduceResponse & FetchResponse). Note that the updates via Metadata RPC are handled separately in ({@link #update}). Both partitionLeader and leaderNodes override the existing metadata. Non-overlapping metadata is kept as it is. @param partitionLeaders map of new leadership information for partitions. @param leaderNodes a list of nodes for leaders in the above map. @return a set of partitions, for which leaders were updated.
java
clients/src/main/java/org/apache/kafka/clients/Metadata.java
463
[ "cluster" ]
void
true
1
6.56
apache/kafka
31,560
javadoc
false
asof
def asof(self, label): """ Return the label from the index, or, if not present, the previous one. Assuming that the index is sorted, return the passed index label if it is in the index, or return the previous index label if the passed one is not in the index. Parameters ---------- label : object The label up to which the method returns the latest index label. Returns ------- object The passed label if it is in the index. The previous label if the passed label is not in the sorted index or `NaN` if there is no such label. See Also -------- Series.asof : Return the latest value in a Series up to the passed index. merge_asof : Perform an asof merge (similar to left join but it matches on nearest key rather than equal key). Index.get_loc : An `asof` is a thin wrapper around `get_loc` with method='pad'. Examples -------- `Index.asof` returns the latest index label up to the passed label. >>> idx = pd.Index(["2013-12-31", "2014-01-02", "2014-01-03"]) >>> idx.asof("2014-01-01") '2013-12-31' If the label is in the index, the method returns the passed label. >>> idx.asof("2014-01-02") '2014-01-02' If all of the labels in the index are later than the passed label, NaN is returned. >>> idx.asof("1999-01-02") nan If the index is not sorted, an error is raised. >>> idx_not_sorted = pd.Index(["2013-12-31", "2015-01-02", "2014-01-03"]) >>> idx_not_sorted.asof("2013-12-31") Traceback (most recent call last): ValueError: index must be monotonic increasing or decreasing """ self._searchsorted_monotonic(label) # validate sortedness try: loc = self.get_loc(label) except (KeyError, TypeError) as err: # KeyError -> No exact match, try for padded # TypeError -> passed e.g. non-hashable, fall through to get # the tested exception message indexer = self.get_indexer([label], method="pad") if indexer.ndim > 1 or indexer.size > 1: raise TypeError("asof requires scalar valued input") from err loc = indexer.item() if loc == -1: return self._na_value else: if isinstance(loc, slice): return self[loc][-1] return self[loc]
Return the label from the index, or, if not present, the previous one. Assuming that the index is sorted, return the passed index label if it is in the index, or return the previous index label if the passed one is not in the index. Parameters ---------- label : object The label up to which the method returns the latest index label. Returns ------- object The passed label if it is in the index. The previous label if the passed label is not in the sorted index or `NaN` if there is no such label. See Also -------- Series.asof : Return the latest value in a Series up to the passed index. merge_asof : Perform an asof merge (similar to left join but it matches on nearest key rather than equal key). Index.get_loc : An `asof` is a thin wrapper around `get_loc` with method='pad'. Examples -------- `Index.asof` returns the latest index label up to the passed label. >>> idx = pd.Index(["2013-12-31", "2014-01-02", "2014-01-03"]) >>> idx.asof("2014-01-01") '2013-12-31' If the label is in the index, the method returns the passed label. >>> idx.asof("2014-01-02") '2014-01-02' If all of the labels in the index are later than the passed label, NaN is returned. >>> idx.asof("1999-01-02") nan If the index is not sorted, an error is raised. >>> idx_not_sorted = pd.Index(["2013-12-31", "2015-01-02", "2014-01-03"]) >>> idx_not_sorted.asof("2013-12-31") Traceback (most recent call last): ValueError: index must be monotonic increasing or decreasing
python
pandas/core/indexes/base.py
5,640
[ "self", "label" ]
false
6
7.76
pandas-dev/pandas
47,362
numpy
false
toString
public static String toString(final Type type) { Objects.requireNonNull(type, "type"); if (type instanceof Class<?>) { return classToString((Class<?>) type); } if (type instanceof ParameterizedType) { return parameterizedTypeToString((ParameterizedType) type); } if (type instanceof WildcardType) { return wildcardTypeToString((WildcardType) type); } if (type instanceof TypeVariable<?>) { return typeVariableToString((TypeVariable<?>) type); } if (type instanceof GenericArrayType) { return genericArrayTypeToString((GenericArrayType) type); } throw new IllegalArgumentException(ObjectUtils.identityToString(type)); }
Formats a given type as a Java-esque String. @param type the type to create a String representation for, not {@code null}. @return String. @throws NullPointerException if {@code type} is {@code null}. @since 3.2
java
src/main/java/org/apache/commons/lang3/reflect/TypeUtils.java
1,537
[ "type" ]
String
true
6
7.92
apache/commons-lang
2,896
javadoc
false
splitWorker
private static String[] splitWorker(final String str, final char separatorChar, final boolean preserveAllTokens) { // Performance tuned for 2.0 (JDK1.4) if (str == null) { return null; } final int len = str.length(); if (len == 0) { return ArrayUtils.EMPTY_STRING_ARRAY; } final List<String> list = new ArrayList<>(); int i = 0; int start = 0; boolean match = false; boolean lastMatch = false; while (i < len) { if (str.charAt(i) == separatorChar) { if (match || preserveAllTokens) { list.add(str.substring(start, i)); match = false; lastMatch = true; } start = ++i; continue; } lastMatch = false; match = true; i++; } if (match || preserveAllTokens && lastMatch) { list.add(str.substring(start, i)); } return list.toArray(ArrayUtils.EMPTY_STRING_ARRAY); }
Performs the logic for the {@code split} and {@code splitPreserveAllTokens} methods that do not return a maximum array length. @param str the String to parse, may be {@code null}. @param separatorChar the separate character. @param preserveAllTokens if {@code true}, adjacent separators are treated as empty token separators; if {@code false}, adjacent separators are treated as one separator. @return an array of parsed Strings, {@code null} if null String input.
java
src/main/java/org/apache/commons/lang3/StringUtils.java
7,562
[ "str", "separatorChar", "preserveAllTokens" ]
true
10
8.08
apache/commons-lang
2,896
javadoc
false
allByBrokerId
public KafkaFuture<Map<Integer, Collection<TransactionListing>>> allByBrokerId() { KafkaFutureImpl<Map<Integer, Collection<TransactionListing>>> allFuture = new KafkaFutureImpl<>(); Map<Integer, Collection<TransactionListing>> allListingsMap = new HashMap<>(); future.whenComplete((map, topLevelException) -> { if (topLevelException != null) { allFuture.completeExceptionally(topLevelException); return; } Set<Integer> remainingResponses = new HashSet<>(map.keySet()); map.forEach((brokerId, future) -> future.whenComplete((listings, brokerException) -> { if (brokerException != null) { allFuture.completeExceptionally(brokerException); } else if (!allFuture.isDone()) { allListingsMap.put(brokerId, listings); remainingResponses.remove(brokerId); if (remainingResponses.isEmpty()) { allFuture.complete(allListingsMap); } } }) ); }); return allFuture; }
Get all transaction listings in a map which is keyed by the ID of respective broker that is currently managing them. If any of the underlying requests fail, then the future returned from this method will also fail with the first encountered error. @return A future containing a map from the broker ID to the transactions hosted by that broker respectively. This future completes when all transaction listings are available and fails after any non-retriable error.
java
clients/src/main/java/org/apache/kafka/clients/admin/ListTransactionsResult.java
92
[]
true
5
7.92
apache/kafka
31,560
javadoc
false
ediff1d
def ediff1d(arr, to_end=None, to_begin=None): """ Compute the differences between consecutive elements of an array. This function is the equivalent of `numpy.ediff1d` that takes masked values into account, see `numpy.ediff1d` for details. See Also -------- numpy.ediff1d : Equivalent function for ndarrays. Examples -------- >>> import numpy as np >>> arr = np.ma.array([1, 2, 4, 7, 0]) >>> np.ma.ediff1d(arr) masked_array(data=[ 1, 2, 3, -7], mask=False, fill_value=999999) """ arr = ma.asanyarray(arr).flat ed = arr[1:] - arr[:-1] arrays = [ed] # if to_begin is not None: arrays.insert(0, to_begin) if to_end is not None: arrays.append(to_end) # if len(arrays) != 1: # We'll save ourselves a copy of a potentially large array in the common # case where neither to_begin or to_end was given. ed = hstack(arrays) # return ed
Compute the differences between consecutive elements of an array. This function is the equivalent of `numpy.ediff1d` that takes masked values into account, see `numpy.ediff1d` for details. See Also -------- numpy.ediff1d : Equivalent function for ndarrays. Examples -------- >>> import numpy as np >>> arr = np.ma.array([1, 2, 4, 7, 0]) >>> np.ma.ediff1d(arr) masked_array(data=[ 1, 2, 3, -7], mask=False, fill_value=999999)
python
numpy/ma/extras.py
1,229
[ "arr", "to_end", "to_begin" ]
false
4
6.16
numpy/numpy
31,054
unknown
false
__call__
def __call__(self, X, Y=None, eval_gradient=False): """Return the kernel k(X, Y) and optionally its gradient. Parameters ---------- X : ndarray of shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Y : ndarray of shape (n_samples_Y, n_features), default=None Right argument of the returned kernel k(X, Y). If None, k(X, X) if evaluated instead. eval_gradient : bool, default=False Determines whether the gradient with respect to the log of the kernel hyperparameter is computed. Only supported when Y is None. Returns ------- K : ndarray of shape (n_samples_X, n_samples_Y) Kernel k(X, Y) K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\ optional The gradient of the kernel k(X, X) with respect to the log of the hyperparameter of the kernel. Only returned when `eval_gradient` is True. """ pairwise_kernels_kwargs = self.pairwise_kernels_kwargs if self.pairwise_kernels_kwargs is None: pairwise_kernels_kwargs = {} X = np.atleast_2d(X) K = pairwise_kernels( X, Y, metric=self.metric, gamma=self.gamma, filter_params=True, **pairwise_kernels_kwargs, ) if eval_gradient: if self.hyperparameter_gamma.fixed: return K, np.empty((X.shape[0], X.shape[0], 0)) else: # approximate gradient numerically def f(gamma): # helper function return pairwise_kernels( X, Y, metric=self.metric, gamma=np.exp(gamma), filter_params=True, **pairwise_kernels_kwargs, ) return K, _approx_fprime(self.theta, f, 1e-10) else: return K
Return the kernel k(X, Y) and optionally its gradient. Parameters ---------- X : ndarray of shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Y : ndarray of shape (n_samples_Y, n_features), default=None Right argument of the returned kernel k(X, Y). If None, k(X, X) if evaluated instead. eval_gradient : bool, default=False Determines whether the gradient with respect to the log of the kernel hyperparameter is computed. Only supported when Y is None. Returns ------- K : ndarray of shape (n_samples_X, n_samples_Y) Kernel k(X, Y) K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\ optional The gradient of the kernel k(X, X) with respect to the log of the hyperparameter of the kernel. Only returned when `eval_gradient` is True.
python
sklearn/gaussian_process/kernels.py
2,321
[ "self", "X", "Y", "eval_gradient" ]
false
6
6
scikit-learn/scikit-learn
64,340
numpy
false
password
@Nullable String password();
The password used when {@link KeyStore#setKeyEntry(String, java.security.Key, char[], java.security.cert.Certificate[]) setting key entries} in the {@link KeyStore}. @return the password
java
core/spring-boot/src/main/java/org/springframework/boot/ssl/pem/PemSslStore.java
59
[]
String
true
1
6
spring-projects/spring-boot
79,428
javadoc
false
forString
@CanIgnoreReturnValue // TODO(b/219820829): consider removing public static InetAddress forString(String ipString) { Scope scope = new Scope(); byte[] addr = ipStringToBytes(ipString, scope); // The argument was malformed, i.e. not an IP string literal. if (addr == null) { throw formatIllegalArgumentException("'%s' is not an IP string literal.", ipString); } return bytesToInetAddress(addr, scope.scope); }
Returns the {@link InetAddress} having the given string representation. <p>This deliberately avoids all nameservice lookups (e.g. no DNS). <p>This method accepts non-ASCII digits, for example {@code "192.168.0.1"} (those are fullwidth characters). That is consistent with {@link InetAddress}, but not with various RFCs. If you want to accept ASCII digits only, you can use something like {@code CharMatcher.ascii().matchesAllOf(ipString)}. <p>The scope ID is validated against the interfaces on the machine, which requires permissions under Android. <p><b>Android users on API >= 29:</b> Prefer {@code InetAddresses.parseNumericAddress}. @param ipString {@code String} containing an IPv4 or IPv6 string literal, e.g. {@code "192.168.0.1"} or {@code "2001:db8::1"} or with a scope ID, e.g. {@code "2001:db8::1%eth0"} @return {@link InetAddress} representing the argument @throws IllegalArgumentException if the argument is not a valid IP string literal or if the address has a scope ID that fails validation against the interfaces on the machine (as required by Java's {@link InetAddress})
java
android/guava/src/com/google/common/net/InetAddresses.java
156
[ "ipString" ]
InetAddress
true
2
7.76
google/guava
51,352
javadoc
false
to_pickle
def to_pickle( obj: Any, filepath_or_buffer: FilePath | WriteBuffer[bytes], compression: CompressionOptions = "infer", protocol: int = pickle.HIGHEST_PROTOCOL, storage_options: StorageOptions | None = None, ) -> None: """ Pickle (serialize) object to file. Parameters ---------- obj : any object Any python object. filepath_or_buffer : str, path object, or file-like object String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. Also accepts URL. URL has to be of S3 or GCS. {compression_options} protocol : int Int which indicates which protocol should be used by the pickler, default HIGHEST_PROTOCOL (see [1], paragraph 12.1.2). The possible values for this parameter depend on the version of Python. For Python 2.x, possible values are 0, 1, 2. For Python>=3.0, 3 is a valid value. For Python >= 3.4, 4 is a valid value. A negative value for the protocol parameter is equivalent to setting its value to HIGHEST_PROTOCOL. {storage_options} .. [1] https://docs.python.org/3/library/pickle.html See Also -------- read_pickle : Load pickled pandas object (or any object) from file. DataFrame.to_hdf : Write DataFrame to an HDF5 file. DataFrame.to_sql : Write DataFrame to a SQL database. DataFrame.to_parquet : Write a DataFrame to the binary parquet format. Examples -------- >>> original_df = pd.DataFrame( ... {{"foo": range(5), "bar": range(5, 10)}} ... ) # doctest: +SKIP >>> original_df # doctest: +SKIP foo bar 0 0 5 1 1 6 2 2 7 3 3 8 4 4 9 >>> pd.to_pickle(original_df, "./dummy.pkl") # doctest: +SKIP >>> unpickled_df = pd.read_pickle("./dummy.pkl") # doctest: +SKIP >>> unpickled_df # doctest: +SKIP foo bar 0 0 5 1 1 6 2 2 7 3 3 8 4 4 9 """ if protocol < 0: protocol = pickle.HIGHEST_PROTOCOL with get_handle( filepath_or_buffer, "wb", compression=compression, is_text=False, storage_options=storage_options, ) as handles: # letting pickle write directly to the buffer is more memory-efficient pickle.dump(obj, handles.handle, protocol=protocol)
Pickle (serialize) object to file. Parameters ---------- obj : any object Any python object. filepath_or_buffer : str, path object, or file-like object String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. Also accepts URL. URL has to be of S3 or GCS. {compression_options} protocol : int Int which indicates which protocol should be used by the pickler, default HIGHEST_PROTOCOL (see [1], paragraph 12.1.2). The possible values for this parameter depend on the version of Python. For Python 2.x, possible values are 0, 1, 2. For Python>=3.0, 3 is a valid value. For Python >= 3.4, 4 is a valid value. A negative value for the protocol parameter is equivalent to setting its value to HIGHEST_PROTOCOL. {storage_options} .. [1] https://docs.python.org/3/library/pickle.html See Also -------- read_pickle : Load pickled pandas object (or any object) from file. DataFrame.to_hdf : Write DataFrame to an HDF5 file. DataFrame.to_sql : Write DataFrame to a SQL database. DataFrame.to_parquet : Write a DataFrame to the binary parquet format. Examples -------- >>> original_df = pd.DataFrame( ... {{"foo": range(5), "bar": range(5, 10)}} ... ) # doctest: +SKIP >>> original_df # doctest: +SKIP foo bar 0 0 5 1 1 6 2 2 7 3 3 8 4 4 9 >>> pd.to_pickle(original_df, "./dummy.pkl") # doctest: +SKIP >>> unpickled_df = pd.read_pickle("./dummy.pkl") # doctest: +SKIP >>> unpickled_df # doctest: +SKIP foo bar 0 0 5 1 1 6 2 2 7 3 3 8 4 4 9
python
pandas/io/pickle.py
42
[ "obj", "filepath_or_buffer", "compression", "protocol", "storage_options" ]
None
true
2
8.4
pandas-dev/pandas
47,362
numpy
false
setupNetworkInspection
function setupNetworkInspection() { if (internalBinding('config').hasInspector && getOptionValue('--experimental-network-inspection')) { const { enable, disable, } = require('internal/inspector_network_tracking'); internalBinding('inspector').setupNetworkTracking(enable, disable); } }
Patch the process object with legacy properties and normalizations. Replace `process.argv[0]` with `process.execPath`, preserving the original `argv[0]` value as `process.argv0`. Replace `process.argv[1]` with the resolved absolute file path of the entry point, if found. @param {boolean} expandArgv1 - Whether to replace `process.argv[1]` with the resolved absolute file path of the main entry point. @returns {string}
javascript
lib/internal/process/pre_execution.js
505
[]
false
3
6.8
nodejs/node
114,839
jsdoc
false
nunique
def nunique(self, axis: Axis = 0, dropna: bool = True) -> Series: """ Count number of distinct elements in specified axis. Return Series with number of distinct elements. Can ignore NaN values. Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise. dropna : bool, default True Don't include NaN in the counts. Returns ------- Series Series with counts of unique values per row or column, depending on `axis`. See Also -------- Series.nunique: Method nunique for Series. DataFrame.count: Count non-NA cells for each column or row. Examples -------- >>> df = pd.DataFrame({"A": [4, 5, 6], "B": [4, 1, 1]}) >>> df.nunique() A 3 B 2 dtype: int64 >>> df.nunique(axis=1) 0 1 1 2 2 2 dtype: int64 """ return self.apply(Series.nunique, axis=axis, dropna=dropna)
Count number of distinct elements in specified axis. Return Series with number of distinct elements. Can ignore NaN values. Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise. dropna : bool, default True Don't include NaN in the counts. Returns ------- Series Series with counts of unique values per row or column, depending on `axis`. See Also -------- Series.nunique: Method nunique for Series. DataFrame.count: Count non-NA cells for each column or row. Examples -------- >>> df = pd.DataFrame({"A": [4, 5, 6], "B": [4, 1, 1]}) >>> df.nunique() A 3 B 2 dtype: int64 >>> df.nunique(axis=1) 0 1 1 2 2 2 dtype: int64
python
pandas/core/frame.py
14,055
[ "self", "axis", "dropna" ]
Series
true
1
7.28
pandas-dev/pandas
47,362
numpy
false
_from_inferred_categories
def _from_inferred_categories( cls, inferred_categories, inferred_codes, dtype, true_values=None ) -> Self: """ Construct a Categorical from inferred values. For inferred categories (`dtype` is None) the categories are sorted. For explicit `dtype`, the `inferred_categories` are cast to the appropriate type. Parameters ---------- inferred_categories : Index inferred_codes : Index dtype : CategoricalDtype or 'category' true_values : list, optional If none are provided, the default ones are "True", "TRUE", and "true." Returns ------- Categorical """ from pandas import ( Index, to_datetime, to_numeric, to_timedelta, ) cats = Index(inferred_categories) known_categories = ( isinstance(dtype, CategoricalDtype) and dtype.categories is not None ) if known_categories: # Convert to a specialized type with `dtype` if specified. if is_any_real_numeric_dtype(dtype.categories.dtype): cats = to_numeric(inferred_categories, errors="coerce") elif lib.is_np_dtype(dtype.categories.dtype, "M"): cats = to_datetime(inferred_categories, errors="coerce") elif lib.is_np_dtype(dtype.categories.dtype, "m"): cats = to_timedelta(inferred_categories, errors="coerce") elif is_bool_dtype(dtype.categories.dtype): if true_values is None: true_values = ["True", "TRUE", "true"] # error: Incompatible types in assignment (expression has type # "ndarray", variable has type "Index") cats = cats.isin(true_values) # type: ignore[assignment] if known_categories: # Recode from observation order to dtype.categories order. categories = dtype.categories codes = recode_for_categories( inferred_codes, cats, categories, copy=False, warn=True ) elif not cats.is_monotonic_increasing: # Sort categories and recode for unknown categories. unsorted = cats.copy() categories = cats.sort_values() codes = recode_for_categories( inferred_codes, unsorted, categories, copy=False, warn=True ) dtype = CategoricalDtype(categories, ordered=False) else: dtype = CategoricalDtype(cats, ordered=False) codes = inferred_codes return cls._simple_new(codes, dtype=dtype)
Construct a Categorical from inferred values. For inferred categories (`dtype` is None) the categories are sorted. For explicit `dtype`, the `inferred_categories` are cast to the appropriate type. Parameters ---------- inferred_categories : Index inferred_codes : Index dtype : CategoricalDtype or 'category' true_values : list, optional If none are provided, the default ones are "True", "TRUE", and "true." Returns ------- Categorical
python
pandas/core/arrays/categorical.py
643
[ "cls", "inferred_categories", "inferred_codes", "dtype", "true_values" ]
Self
true
11
6.32
pandas-dev/pandas
47,362
numpy
false
getErrorReport
@Nullable String getErrorReport() { Map<String, List<PropertyMigration>> content = getContent(LegacyProperties::getUnsupported); if (content.isEmpty()) { return null; } StringBuilder report = new StringBuilder(); report.append(String .format("%nThe use of configuration keys that are no longer supported was found in the environment:%n%n")); append(report, content); report.append(String.format("%n")); report.append("Please refer to the release notes or reference guide for potential alternatives."); report.append(String.format("%n")); return report.toString(); }
Return a report for all the properties that are no longer supported. If no such properties were found, return {@code null}. @return a report with the configurations keys that are no longer supported
java
core/spring-boot-properties-migrator/src/main/java/org/springframework/boot/context/properties/migrator/PropertiesMigrationReport.java
65
[]
String
true
2
8.24
spring-projects/spring-boot
79,428
javadoc
false
generateCodeForConstructor
private CodeBlock generateCodeForConstructor(RegisteredBean registeredBean, Constructor<?> constructor) { ConstructorDescriptor descriptor = new ConstructorDescriptor( registeredBean.getBeanName(), constructor, registeredBean.getBeanClass()); Class<?> publicType = descriptor.publicType(); if (KOTLIN_REFLECT_PRESENT && KotlinDetector.isKotlinType(publicType) && KotlinDelegate.hasConstructorWithOptionalParameter(publicType)) { return generateCodeForInaccessibleConstructor(descriptor, hints -> hints.registerType(publicType, MemberCategory.INVOKE_DECLARED_CONSTRUCTORS)); } if (!isVisible(constructor, constructor.getDeclaringClass()) || registeredBean.getMergedBeanDefinition().hasMethodOverrides()) { return generateCodeForInaccessibleConstructor(descriptor, hints -> hints.registerConstructor(constructor, ExecutableMode.INVOKE)); } return generateCodeForAccessibleConstructor(descriptor); }
Generate the instance supplier code. @param registeredBean the bean to handle @param instantiationDescriptor the executable to use to create the bean @return the generated code @since 6.1.7
java
spring-beans/src/main/java/org/springframework/beans/factory/aot/InstanceSupplierCodeGenerator.java
161
[ "registeredBean", "constructor" ]
CodeBlock
true
6
7.44
spring-projects/spring-framework
59,386
javadoc
false
compile
public static FilterPath[] compile(Set<String> filters) { if (filters == null || filters.isEmpty()) { return null; } FilterPathBuilder builder = new FilterPathBuilder(); for (String filter : filters) { if (filter != null) { filter = filter.trim(); if (filter.length() > 0) { builder.insert(filter); } } } FilterPath filterPath = builder.build(); return Collections.singletonList(filterPath).toArray(new FilterPath[0]); }
check if the name matches filter nodes if the name equals the filter node name, the node will add to nextFilters. if the filter node is a final node, it means the name matches the pattern, and return true if the name don't equal a final node, then return false, continue to check the inner filter node if current node is a double wildcard node, the node will also add to nextFilters. @param name the xcontent property name @param nextFilters nextFilters is a List, used to check the inner property of name @param matchFieldNamesWithDots support dot in field name or not @return true if the name equal a final node, otherwise return false
java
libs/x-content/src/main/java/org/elasticsearch/xcontent/support/filtering/FilterPath.java
208
[ "filters" ]
true
5
7.76
elastic/elasticsearch
75,680
javadoc
false
apply
public final void apply() { apply(null); }
Returns the {@link Console} to use. @return the {@link Console} to use @since 3.5.0
java
core/spring-boot/src/main/java/org/springframework/boot/logging/LoggingSystemProperties.java
107
[]
void
true
1
6.8
spring-projects/spring-boot
79,428
javadoc
false
_raise_if_missing
def _raise_if_missing(self, key, indexer, axis_name: str_t) -> None: """ Check that indexer can be used to return a result. e.g. at least one element was found, unless the list of keys was actually empty. Parameters ---------- key : list-like Targeted labels (only used to show correct error message). indexer: array-like of booleans Indices corresponding to the key, (with -1 indicating not found). axis_name : str Raises ------ KeyError If at least one key was requested but none was found. """ if len(key) == 0: return # Count missing values missing_mask = indexer < 0 nmissing = missing_mask.sum() if nmissing: if nmissing == len(indexer): raise KeyError(f"None of [{key}] are in the [{axis_name}]") not_found = list(ensure_index(key)[missing_mask.nonzero()[0]].unique()) raise KeyError(f"{not_found} not in index")
Check that indexer can be used to return a result. e.g. at least one element was found, unless the list of keys was actually empty. Parameters ---------- key : list-like Targeted labels (only used to show correct error message). indexer: array-like of booleans Indices corresponding to the key, (with -1 indicating not found). axis_name : str Raises ------ KeyError If at least one key was requested but none was found.
python
pandas/core/indexes/base.py
6,231
[ "self", "key", "indexer", "axis_name" ]
None
true
4
6.88
pandas-dev/pandas
47,362
numpy
false
toString
@Override public String toString() { if (toString == null) { toString = getNumerator() + "/" + getDenominator(); } return toString; }
Gets the fraction as a {@link String}. <p> The format used is '<em>numerator</em>/<em>denominator</em>' always. </p> @return a {@link String} form of the fraction
java
src/main/java/org/apache/commons/lang3/math/Fraction.java
916
[]
String
true
2
7.76
apache/commons-lang
2,896
javadoc
false
concat
public static ByteSource concat(Iterable<? extends ByteSource> sources) { return new ConcatenatedByteSource(sources); }
Concatenates multiple {@link ByteSource} instances into a single source. Streams returned from the source will contain the concatenated data from the streams of the underlying sources. <p>Only one underlying stream will be open at a time. Closing the concatenated stream will close the open underlying stream. @param sources the sources to concatenate @return a {@code ByteSource} containing the concatenated data @since 15.0
java
android/guava/src/com/google/common/io/ByteSource.java
374
[ "sources" ]
ByteSource
true
1
6.64
google/guava
51,352
javadoc
false
ledoit_wolf_shrinkage
def ledoit_wolf_shrinkage(X, assume_centered=False, block_size=1000): """Estimate the shrunk Ledoit-Wolf covariance matrix. Read more in the :ref:`User Guide <shrunk_covariance>`. Parameters ---------- X : array-like of shape (n_samples, n_features) Data from which to compute the Ledoit-Wolf shrunk covariance shrinkage. assume_centered : bool, default=False If True, data will not be centered before computation. Useful to work with data whose mean is significantly equal to zero but is not exactly zero. If False, data will be centered before computation. block_size : int, default=1000 Size of blocks into which the covariance matrix will be split. Returns ------- shrinkage : float Coefficient in the convex combination used for the computation of the shrunk estimate. Notes ----- The regularized (shrunk) covariance is: (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features) where mu = trace(cov) / n_features Examples -------- >>> import numpy as np >>> from sklearn.covariance import ledoit_wolf_shrinkage >>> real_cov = np.array([[.4, .2], [.2, .8]]) >>> rng = np.random.RandomState(0) >>> X = rng.multivariate_normal(mean=[0, 0], cov=real_cov, size=50) >>> shrinkage_coefficient = ledoit_wolf_shrinkage(X) >>> shrinkage_coefficient np.float64(0.23) """ X = check_array(X) # for only one feature, the result is the same whatever the shrinkage if len(X.shape) == 2 and X.shape[1] == 1: return 0.0 if X.ndim == 1: X = np.reshape(X, (1, -1)) if X.shape[0] == 1: warnings.warn( "Only one sample available. You may want to reshape your data array" ) n_samples, n_features = X.shape # optionally center data if not assume_centered: X = X - X.mean(0) # A non-blocked version of the computation is present in the tests # in tests/test_covariance.py # number of blocks to split the covariance matrix into n_splits = int(n_features / block_size) X2 = X**2 emp_cov_trace = np.sum(X2, axis=0) / n_samples mu = np.sum(emp_cov_trace) / n_features beta_ = 0.0 # sum of the coefficients of <X2.T, X2> delta_ = 0.0 # sum of the *squared* coefficients of <X.T, X> # starting block computation for i in range(n_splits): for j in range(n_splits): rows = slice(block_size * i, block_size * (i + 1)) cols = slice(block_size * j, block_size * (j + 1)) beta_ += np.sum(np.dot(X2.T[rows], X2[:, cols])) delta_ += np.sum(np.dot(X.T[rows], X[:, cols]) ** 2) rows = slice(block_size * i, block_size * (i + 1)) beta_ += np.sum(np.dot(X2.T[rows], X2[:, block_size * n_splits :])) delta_ += np.sum(np.dot(X.T[rows], X[:, block_size * n_splits :]) ** 2) for j in range(n_splits): cols = slice(block_size * j, block_size * (j + 1)) beta_ += np.sum(np.dot(X2.T[block_size * n_splits :], X2[:, cols])) delta_ += np.sum(np.dot(X.T[block_size * n_splits :], X[:, cols]) ** 2) delta_ += np.sum( np.dot(X.T[block_size * n_splits :], X[:, block_size * n_splits :]) ** 2 ) delta_ /= n_samples**2 beta_ += np.sum( np.dot(X2.T[block_size * n_splits :], X2[:, block_size * n_splits :]) ) # use delta_ to compute beta beta = 1.0 / (n_features * n_samples) * (beta_ / n_samples - delta_) # delta is the sum of the squared coefficients of (<X.T,X> - mu*Id) / p delta = delta_ - 2.0 * mu * emp_cov_trace.sum() + n_features * mu**2 delta /= n_features # get final beta as the min between beta and delta # We do this to prevent shrinking more than "1", which would invert # the value of covariances beta = min(beta, delta) # finally get shrinkage shrinkage = 0 if beta == 0 else beta / delta return shrinkage
Estimate the shrunk Ledoit-Wolf covariance matrix. Read more in the :ref:`User Guide <shrunk_covariance>`. Parameters ---------- X : array-like of shape (n_samples, n_features) Data from which to compute the Ledoit-Wolf shrunk covariance shrinkage. assume_centered : bool, default=False If True, data will not be centered before computation. Useful to work with data whose mean is significantly equal to zero but is not exactly zero. If False, data will be centered before computation. block_size : int, default=1000 Size of blocks into which the covariance matrix will be split. Returns ------- shrinkage : float Coefficient in the convex combination used for the computation of the shrunk estimate. Notes ----- The regularized (shrunk) covariance is: (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features) where mu = trace(cov) / n_features Examples -------- >>> import numpy as np >>> from sklearn.covariance import ledoit_wolf_shrinkage >>> real_cov = np.array([[.4, .2], [.2, .8]]) >>> rng = np.random.RandomState(0) >>> X = rng.multivariate_normal(mean=[0, 0], cov=real_cov, size=50) >>> shrinkage_coefficient = ledoit_wolf_shrinkage(X) >>> shrinkage_coefficient np.float64(0.23)
python
sklearn/covariance/_shrunk_covariance.py
297
[ "X", "assume_centered", "block_size" ]
false
10
7.12
scikit-learn/scikit-learn
64,340
numpy
false
errorCounts
@Override public Map<Errors, Integer> errorCounts() { return Collections.singletonMap(Errors.forCode(data.errorCode()), 1); }
The number of each type of error in the response, including {@link Errors#NONE} and top-level errors as well as more specifically scoped errors (such as topic or partition-level errors). @return A count of errors.
java
clients/src/main/java/org/apache/kafka/common/requests/AllocateProducerIdsResponse.java
48
[]
true
1
6.8
apache/kafka
31,560
javadoc
false
skipFully
public static void skipFully(Reader reader, long n) throws IOException { checkNotNull(reader); while (n > 0) { long amt = reader.skip(n); if (amt == 0) { throw new EOFException(); } n -= amt; } }
Discards {@code n} characters of data from the reader. This method will block until the full amount has been skipped. Does not close the reader. @param reader the reader to read from @param n the number of characters to skip @throws EOFException if this stream reaches the end before skipping all the characters @throws IOException if an I/O error occurs
java
android/guava/src/com/google/common/io/CharStreams.java
271
[ "reader", "n" ]
void
true
3
7.04
google/guava
51,352
javadoc
false
copyTo
void copyTo(DataBlock dataBlock, long pos, ZipEntry zipEntry) throws IOException { int fileNameLength = Short.toUnsignedInt(fileNameLength()); int extraLength = Short.toUnsignedInt(extraFieldLength()); int commentLength = Short.toUnsignedInt(fileCommentLength()); zipEntry.setMethod(Short.toUnsignedInt(compressionMethod())); zipEntry.setTime(decodeMsDosFormatDateTime(lastModFileDate(), lastModFileTime())); zipEntry.setCrc(Integer.toUnsignedLong(crc32())); zipEntry.setCompressedSize(Integer.toUnsignedLong(compressedSize())); zipEntry.setSize(Integer.toUnsignedLong(uncompressedSize())); if (extraLength > 0) { long extraPos = pos + MINIMUM_SIZE + fileNameLength; ByteBuffer buffer = ByteBuffer.allocate(extraLength); dataBlock.readFully(buffer, extraPos); zipEntry.setExtra(buffer.array()); } if (commentLength > 0) { long commentPos = pos + MINIMUM_SIZE + fileNameLength + extraLength; zipEntry.setComment(ZipString.readString(dataBlock, commentPos, commentLength)); } }
Copy values from this block to the given {@link ZipEntry}. @param dataBlock the source data block @param pos the position of this {@link ZipCentralDirectoryFileHeaderRecord} @param zipEntry the destination zip entry @throws IOException on I/O error
java
loader/spring-boot-loader/src/main/java/org/springframework/boot/loader/zip/ZipCentralDirectoryFileHeaderRecord.java
85
[ "dataBlock", "pos", "zipEntry" ]
void
true
3
6.24
spring-projects/spring-boot
79,428
javadoc
false
compile
def compile(pattern: str, base_dir: Path, definition_file: Path) -> _IgnoreRule | None: """Build an ignore rule from the supplied glob pattern and log a useful warning if it is invalid.""" relative_to: Path | None = None if pattern.strip() == "/": # "/" doesn't match anything in gitignore log.warning("Ignoring no-op glob pattern '/' from %s", definition_file) return None if pattern.startswith("/") or "/" in pattern.rstrip("/"): # See https://git-scm.com/docs/gitignore # > If there is a separator at the beginning or middle (or both) of the pattern, then the # > pattern is relative to the directory level of the particular .gitignore file itself. # > Otherwise the pattern may also match at any level below the .gitignore level. relative_to = definition_file.parent ignore_pattern = GitWildMatchPattern(pattern) return _GlobIgnoreRule(wild_match_pattern=ignore_pattern, relative_to=relative_to)
Build an ignore rule from the supplied glob pattern and log a useful warning if it is invalid.
python
airflow-core/src/airflow/utils/file.py
90
[ "pattern", "base_dir", "definition_file" ]
_IgnoreRule | None
true
4
6
apache/airflow
43,597
unknown
false
getMatchOutcome
@Override public ConditionOutcome getMatchOutcome(ConditionContext context, AnnotatedTypeMetadata metadata) { if (context.getEnvironment().containsProperty(this.property)) { return ConditionOutcome.match(startConditionMessage().foundExactly("property " + this.property)); } return getResourceOutcome(context, metadata); }
Create a new condition. @param name the name of the component @param property the configuration property @param resourceLocations default location(s) where the configuration file can be found if the configuration key is not specified @since 2.0.0
java
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/condition/ResourceCondition.java
60
[ "context", "metadata" ]
ConditionOutcome
true
2
6.56
spring-projects/spring-boot
79,428
javadoc
false
duration
public long duration() { return duration; }
@return the number of {@link #timeUnit()} units this value contains
java
libs/core/src/main/java/org/elasticsearch/core/TimeValue.java
110
[]
true
1
6
elastic/elasticsearch
75,680
javadoc
false
awaitPendingRequests
public boolean awaitPendingRequests(Node node, Timer timer) { while (hasPendingRequests(node) && timer.notExpired()) { poll(timer); } return !hasPendingRequests(node); }
Block until all pending requests from the given node have finished. @param node The node to await requests from @param timer Timer bounding how long this method can block @return true If all requests finished, false if the timeout expired first
java
clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkClient.java
353
[ "node", "timer" ]
true
3
8.24
apache/kafka
31,560
javadoc
false
containsEntryImpl
static <K extends @Nullable Object, V extends @Nullable Object> boolean containsEntryImpl( Collection<Entry<K, V>> c, @Nullable Object o) { if (!(o instanceof Entry)) { return false; } return c.contains(unmodifiableEntry((Entry<?, ?>) o)); }
Implements {@code Collection.contains} safely for forwarding collections of map entries. If {@code o} is an instance of {@code Entry}, it is wrapped using {@link #unmodifiableEntry} to protect against a possible nefarious equals method. <p>Note that {@code c} is the backing (delegate) collection, rather than the forwarding collection. @param c the delegate (unwrapped) collection of map entries @param o the object that might be contained in {@code c} @return {@code true} if {@code c} contains {@code o}
java
android/guava/src/com/google/common/collect/Maps.java
3,644
[ "c", "o" ]
true
2
7.92
google/guava
51,352
javadoc
false
determineBrokerUrl
String determineBrokerUrl() { if (this.brokerUrl != null) { return this.brokerUrl; } if (this.embedded.isEnabled()) { return DEFAULT_EMBEDDED_BROKER_URL; } return DEFAULT_NETWORK_BROKER_URL; }
Time to wait on message sends for a response. Set it to 0 to wait forever.
java
module/spring-boot-activemq/src/main/java/org/springframework/boot/activemq/autoconfigure/ActiveMQProperties.java
144
[]
String
true
3
6.72
spring-projects/spring-boot
79,428
javadoc
false
canShortcutWithSource
boolean canShortcutWithSource(ElementType requiredType) { return canShortcutWithSource(requiredType, requiredType); }
Returns if the element source can be used as a shortcut for an operation such as {@code equals} or {@code toString}. @param requiredType the required type @return {@code true} if all elements match at least one of the types
java
core/spring-boot/src/main/java/org/springframework/boot/context/properties/source/ConfigurationPropertyName.java
951
[ "requiredType" ]
true
1
6.64
spring-projects/spring-boot
79,428
javadoc
false
run
def run( command: str, command_args: tuple, backend: str, builder: str, docker_host: str | None, force_build: bool, forward_credentials: bool, github_repository: str, mysql_version: str, platform: str | None, postgres_version: str, project_name: str, python: str, skip_image_upgrade_check: bool, tty: str, use_uv: bool, uv_http_timeout: int, ): """ Run a command in the Breeze environment without entering the interactive shell. This is useful for automated testing, CI workflows, and one-off command execution. The command will be executed in a fresh container that is automatically cleaned up. Each run uses a unique project name to avoid conflicts with other instances. Examples: # Run a specific test breeze run pytest providers/google/tests/unit/google/cloud/operators/test_dataflow.py -v # Check version compatibility breeze run python -c "from airflow.providers.google.version_compat import AIRFLOW_V_3_0_PLUS; print(AIRFLOW_V_3_0_PLUS)" # Run bash commands breeze run bash -c "cd /opt/airflow && python -m pytest providers/google/tests/" # Run with different Python version breeze run --python 3.11 pytest providers/standard/tests/unit/operators/test_bash.py # Run with PostgreSQL backend breeze run --backend postgres pytest providers/postgres/tests/ """ import uuid from airflow_breeze.commands.ci_image_commands import rebuild_or_pull_ci_image_if_needed from airflow_breeze.params.shell_params import ShellParams from airflow_breeze.utils.ci_group import ci_group from airflow_breeze.utils.docker_command_utils import execute_command_in_shell from airflow_breeze.utils.platforms import get_normalized_platform # Generate a unique project name to avoid conflicts with other running instances unique_project_name = f"{project_name}-run-{uuid.uuid4().hex[:8]}" # Build the full command string with proper escaping import shlex if command_args: # Use shlex.join to properly escape arguments full_command = f"{command} {shlex.join(command_args)}" else: full_command = command platform = get_normalized_platform(platform) # Create shell parameters optimized for non-interactive command execution shell_params = ShellParams( backend=backend, builder=builder, docker_host=docker_host, force_build=force_build, forward_credentials=forward_credentials, github_repository=github_repository, mysql_version=mysql_version, platform=platform, postgres_version=postgres_version, project_name=unique_project_name, python=python, skip_image_upgrade_check=skip_image_upgrade_check, use_uv=use_uv, uv_http_timeout=uv_http_timeout, # Optimizations for non-interactive execution quiet=True, skip_environment_initialization=True, tty=tty, # Set extra_args to empty tuple since we'll pass the command directly extra_args=(), ) if get_verbose(): get_console().print(f"[info]Running command in Breeze: {full_command}[/]") get_console().print(f"[info]Using project name: {unique_project_name}[/]") # Build or pull the CI image if needed rebuild_or_pull_ci_image_if_needed(command_params=shell_params) # Execute the command in the shell with ci_group(f"Running command: {command}"): result = execute_command_in_shell( shell_params=shell_params, project_name=unique_project_name, command=full_command, # Always preserve the backend specified by user (or resolved from default) preserve_backend=True, ) # Clean up ownership from airflow_breeze.utils.docker_command_utils import fix_ownership_using_docker fix_ownership_using_docker() # Exit with the same code as the command sys.exit(result.returncode)
Run a command in the Breeze environment without entering the interactive shell. This is useful for automated testing, CI workflows, and one-off command execution. The command will be executed in a fresh container that is automatically cleaned up. Each run uses a unique project name to avoid conflicts with other instances. Examples: # Run a specific test breeze run pytest providers/google/tests/unit/google/cloud/operators/test_dataflow.py -v # Check version compatibility breeze run python -c "from airflow.providers.google.version_compat import AIRFLOW_V_3_0_PLUS; print(AIRFLOW_V_3_0_PLUS)" # Run bash commands breeze run bash -c "cd /opt/airflow && python -m pytest providers/google/tests/" # Run with different Python version breeze run --python 3.11 pytest providers/standard/tests/unit/operators/test_bash.py # Run with PostgreSQL backend breeze run --backend postgres pytest providers/postgres/tests/
python
dev/breeze/src/airflow_breeze/commands/developer_commands.py
1,072
[ "command", "command_args", "backend", "builder", "docker_host", "force_build", "forward_credentials", "github_repository", "mysql_version", "platform", "postgres_version", "project_name", "python", "skip_image_upgrade_check", "tty", "use_uv", "uv_http_timeout" ]
true
4
6.72
apache/airflow
43,597
unknown
false
uncaughtExceptionHandler
public Builder uncaughtExceptionHandler( final Thread.UncaughtExceptionHandler exceptionHandler) { this.exceptionHandler = Objects.requireNonNull(exceptionHandler, "handler"); return this; }
Sets the uncaught exception handler for the threads created by the new {@link BasicThreadFactory}. @param exceptionHandler the {@link UncaughtExceptionHandler} (must not be <strong>null</strong>) @return a reference to this {@link Builder} @throws NullPointerException if the exception handler is <strong>null</strong>
java
src/main/java/org/apache/commons/lang3/concurrent/BasicThreadFactory.java
215
[ "exceptionHandler" ]
Builder
true
1
6.08
apache/commons-lang
2,896
javadoc
false
render_log_filename
def render_log_filename(ti: TaskInstance, try_number, filename_template) -> str: """ Given task instance, try_number, filename_template, return the rendered log filename. :param ti: task instance :param try_number: try_number of the task :param filename_template: filename template, which can be jinja template or python string template """ filename_template, filename_jinja_template = parse_template_string(filename_template) if filename_jinja_template: jinja_context = ti.get_template_context() jinja_context["try_number"] = try_number return _render_template_to_string(filename_jinja_template, jinja_context) return filename_template.format( dag_id=ti.dag_id, task_id=ti.task_id, logical_date=ti.logical_date.isoformat(), try_number=try_number, )
Given task instance, try_number, filename_template, return the rendered log filename. :param ti: task instance :param try_number: try_number of the task :param filename_template: filename template, which can be jinja template or python string template
python
airflow-core/src/airflow/utils/helpers.py
173
[ "ti", "try_number", "filename_template" ]
str
true
2
6.24
apache/airflow
43,597
sphinx
false
watch
void watch(Set<Path> paths, Runnable action) { Assert.notNull(paths, "'paths' must not be null"); Assert.notNull(action, "'action' must not be null"); if (paths.isEmpty()) { return; } synchronized (this.lock) { try { if (this.thread == null) { this.thread = new WatcherThread(); this.thread.start(); } this.thread.register(new Registration(getRegistrationPaths(paths), action)); } catch (IOException ex) { throw new UncheckedIOException("Failed to register paths for watching: " + paths, ex); } } }
Watch the given files or directories for changes. @param paths the files or directories to watch @param action the action to take when changes are detected
java
core/spring-boot-autoconfigure/src/main/java/org/springframework/boot/autoconfigure/ssl/FileWatcher.java
78
[ "paths", "action" ]
void
true
4
7.04
spring-projects/spring-boot
79,428
javadoc
false
parseTypeParameters
function parseTypeParameters(): NodeArray<TypeParameterDeclaration> | undefined { if (token() === SyntaxKind.LessThanToken) { return parseBracketedList(ParsingContext.TypeParameters, parseTypeParameter, SyntaxKind.LessThanToken, SyntaxKind.GreaterThanToken); } }
Reports a diagnostic error for the current token being an invalid name. @param blankDiagnostic Diagnostic to report for the case of the name being blank (matched tokenIfBlankName). @param nameDiagnostic Diagnostic to report for all other cases. @param tokenIfBlankName Current token if the name was invalid for being blank (not provided / skipped).
typescript
src/compiler/parser.ts
3,987
[]
true
2
6.72
microsoft/TypeScript
107,154
jsdoc
false
generateSetBeanDefinitionPropertiesCode
CodeBlock generateSetBeanDefinitionPropertiesCode( GenerationContext generationContext, BeanRegistrationCode beanRegistrationCode, RootBeanDefinition beanDefinition, Predicate<String> attributeFilter);
Generate the code that sets the properties of the bean definition. @param generationContext the generation context @param beanRegistrationCode the bean registration code @param attributeFilter any attribute filtering that should be applied @return the generated code
java
spring-beans/src/main/java/org/springframework/beans/factory/aot/BeanRegistrationCodeFragments.java
91
[ "generationContext", "beanRegistrationCode", "beanDefinition", "attributeFilter" ]
CodeBlock
true
1
6
spring-projects/spring-framework
59,386
javadoc
false
getStringOrNull
protected @Nullable String getStringOrNull(ResourceBundle bundle, String key) { if (bundle.containsKey(key)) { try { return bundle.getString(key); } catch (MissingResourceException ex) { // Assume key not found for some other reason // -> do NOT throw the exception to allow for checking parent message source. } } return null; }
Efficiently retrieve the String value for the specified key, or return {@code null} if not found. <p>As of 4.2, the default implementation checks {@code containsKey} before it attempts to call {@code getString} (which would require catching {@code MissingResourceException} for key not found). <p>Can be overridden in subclasses. @param bundle the ResourceBundle to perform the lookup in @param key the key to look up @return the associated value, or {@code null} if none @since 4.2 @see ResourceBundle#getString(String) @see ResourceBundle#containsKey(String)
java
spring-context/src/main/java/org/springframework/context/support/ResourceBundleMessageSource.java
353
[ "bundle", "key" ]
String
true
3
7.76
spring-projects/spring-framework
59,386
javadoc
false
appendArgumentTypes
private static void appendArgumentTypes(MethodInvocation methodInvocation, Matcher matcher, StringBuilder output) { Class<?>[] argumentTypes = methodInvocation.getMethod().getParameterTypes(); String[] argumentTypeShortNames = new String[argumentTypes.length]; for (int i = 0; i < argumentTypeShortNames.length; i++) { argumentTypeShortNames[i] = ClassUtils.getShortName(argumentTypes[i]); } matcher.appendReplacement(output, Matcher.quoteReplacement(StringUtils.arrayToCommaDelimitedString(argumentTypeShortNames))); }
Adds a comma-separated list of the short {@code Class} names of the method argument types to the output. For example, if a method has signature {@code put(java.lang.String, java.lang.Object)} then the value returned will be {@code String, Object}. @param methodInvocation the {@code MethodInvocation} being logged. Arguments will be retrieved from the corresponding {@code Method}. @param matcher the {@code Matcher} containing the state of the output @param output the {@code StringBuilder} containing the output
java
spring-aop/src/main/java/org/springframework/aop/interceptor/CustomizableTraceInterceptor.java
377
[ "methodInvocation", "matcher", "output" ]
void
true
2
6.4
spring-projects/spring-framework
59,386
javadoc
false
parseFunctionOrConstructorTypeToError
function parseFunctionOrConstructorTypeToError( isInUnionType: boolean, ): TypeNode | undefined { // the function type and constructor type shorthand notation // are not allowed directly in unions and intersections, but we'll // try to parse them gracefully and issue a helpful message. if (isStartOfFunctionTypeOrConstructorType()) { const type = parseFunctionOrConstructorType(); let diagnostic: DiagnosticMessage; if (isFunctionTypeNode(type)) { diagnostic = isInUnionType ? Diagnostics.Function_type_notation_must_be_parenthesized_when_used_in_a_union_type : Diagnostics.Function_type_notation_must_be_parenthesized_when_used_in_an_intersection_type; } else { diagnostic = isInUnionType ? Diagnostics.Constructor_type_notation_must_be_parenthesized_when_used_in_a_union_type : Diagnostics.Constructor_type_notation_must_be_parenthesized_when_used_in_an_intersection_type; } parseErrorAtRange(type, diagnostic); return type; } return undefined; }
Reports a diagnostic error for the current token being an invalid name. @param blankDiagnostic Diagnostic to report for the case of the name being blank (matched tokenIfBlankName). @param nameDiagnostic Diagnostic to report for all other cases. @param tokenIfBlankName Current token if the name was invalid for being blank (not provided / skipped).
typescript
src/compiler/parser.ts
4,794
[ "isInUnionType" ]
true
6
6.88
microsoft/TypeScript
107,154
jsdoc
false
keySet
@Override public Set<K> keySet() { return (keySetView == null) ? keySetView = createKeySet() : keySetView; }
Updates the index an iterator is pointing to after a call to remove: returns the index of the entry that should be looked at after a removal on indexRemoved, with indexBeforeRemove as the index that *was* the next entry that would be looked at.
java
android/guava/src/com/google/common/collect/CompactHashMap.java
670
[]
true
2
6.32
google/guava
51,352
javadoc
false
_nanaverage
def _nanaverage(a, weights=None): """Compute the weighted average, ignoring NaNs. Parameters ---------- a : ndarray Array containing data to be averaged. weights : array-like, default=None An array of weights associated with the values in a. Each value in a contributes to the average according to its associated weight. The weights array can either be 1-D of the same shape as a. If `weights=None`, then all data in a are assumed to have a weight equal to one. Returns ------- weighted_average : float The weighted average. Notes ----- This wrapper to combine :func:`numpy.average` and :func:`numpy.nanmean`, so that :func:`np.nan` values are ignored from the average and weights can be passed. Note that when possible, we delegate to the prime methods. """ xp, _ = get_namespace(a) if a.shape[0] == 0: return xp.nan mask = xp.isnan(a) if xp.all(mask): return xp.nan if weights is None: return _nanmean(a, xp=xp) weights = xp.asarray(weights) a, weights = a[~mask], weights[~mask] try: return _average(a, weights=weights) except ZeroDivisionError: # this is when all weights are zero, then ignore them return _average(a)
Compute the weighted average, ignoring NaNs. Parameters ---------- a : ndarray Array containing data to be averaged. weights : array-like, default=None An array of weights associated with the values in a. Each value in a contributes to the average according to its associated weight. The weights array can either be 1-D of the same shape as a. If `weights=None`, then all data in a are assumed to have a weight equal to one. Returns ------- weighted_average : float The weighted average. Notes ----- This wrapper to combine :func:`numpy.average` and :func:`numpy.nanmean`, so that :func:`np.nan` values are ignored from the average and weights can be passed. Note that when possible, we delegate to the prime methods.
python
sklearn/utils/extmath.py
1,333
[ "a", "weights" ]
false
4
6.24
scikit-learn/scikit-learn
64,340
numpy
false
describeReplicaLogDirs
default DescribeReplicaLogDirsResult describeReplicaLogDirs(Collection<TopicPartitionReplica> replicas) { return describeReplicaLogDirs(replicas, new DescribeReplicaLogDirsOptions()); }
Query the replica log directory information for the specified replicas. <p> This is a convenience method for {@link #describeReplicaLogDirs(Collection, DescribeReplicaLogDirsOptions)} with default options. See the overload for more details. <p> This operation is supported by brokers with version 1.0.0 or higher. @param replicas The replicas to query @return The DescribeReplicaLogDirsResult
java
clients/src/main/java/org/apache/kafka/clients/admin/Admin.java
607
[ "replicas" ]
DescribeReplicaLogDirsResult
true
1
6.32
apache/kafka
31,560
javadoc
false
holidays
def holidays( self, start=None, end=None, return_name: bool = False ) -> DatetimeIndex | Series: """ Returns a curve with holidays between start_date and end_date Parameters ---------- start : starting date, datetime-like, optional end : ending date, datetime-like, optional return_name : bool, optional If True, return a series that has dates and holiday names. False will only return a DatetimeIndex of dates. Returns ------- DatetimeIndex of holidays """ if self.rules is None: raise Exception( f"Holiday Calendar {self.name} does not have any rules specified" ) if start is None: start = AbstractHolidayCalendar.start_date if end is None: end = AbstractHolidayCalendar.end_date start = Timestamp(start) end = Timestamp(end) # If we don't have a cache or the dates are outside the prior cache, we # get them again if self._cache is None or start < self._cache[0] or end > self._cache[1]: pre_holidays = [ rule.dates(start, end, return_name=True) for rule in self.rules ] if pre_holidays: holidays = concat(pre_holidays) else: holidays = Series(index=DatetimeIndex([]), dtype=object) self._cache = (start, end, holidays.sort_index()) holidays = self._cache[2] holidays = holidays[start:end] if return_name: return holidays else: return holidays.index
Returns a curve with holidays between start_date and end_date Parameters ---------- start : starting date, datetime-like, optional end : ending date, datetime-like, optional return_name : bool, optional If True, return a series that has dates and holiday names. False will only return a DatetimeIndex of dates. Returns ------- DatetimeIndex of holidays
python
pandas/tseries/holiday.py
496
[ "self", "start", "end", "return_name" ]
DatetimeIndex | Series
true
11
6.72
pandas-dev/pandas
47,362
numpy
false
readFloat
@CanIgnoreReturnValue // to skip some bytes @Override public float readFloat() throws IOException { return Float.intBitsToFloat(readInt()); }
Reads a {@code float} as specified by {@link DataInputStream#readFloat()}, except using little-endian byte order. @return the next four bytes of the input stream, interpreted as a {@code float} in little-endian byte order @throws IOException if an I/O error occurs
java
android/guava/src/com/google/common/io/LittleEndianDataInputStream.java
156
[]
true
1
6.56
google/guava
51,352
javadoc
false