sequence
stringlengths
492
15.9k
code
stringlengths
75
8.58k
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 9; 2, function_name:gather_categories; 3, parameters; 3, 4; 3, 5; 3, 6; 4, identifier:imap; 5, identifier:header; 6, default_parameter; 6, 7; 6, 8; 7, identifier:categories; 8, None; 9, block; 9, 10; 9, 31; 9, 52; 9, 58; 9, 65; 9, 124; 9, 147; 9, 208; 9, 214; 9, 2...
def gather_categories(imap, header, categories=None): if categories is None: return {"default": DataCategory(set(imap.keys()), {})} cat_ids = [header.index(cat) for cat in categories if cat in header and "=" not in cat] table = OrderedDict() conditions = defaultdict(set) for i...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 10; 2, function_name:color_mapping; 3, parameters; 3, 4; 3, 5; 3, 6; 3, 7; 4, identifier:sample_map; 5, identifier:header; 6, identifier:group_column; 7, default_parameter; 7, 8; 7, 9; 8, identifier:color_column; 9, None; 10, block; 10, 11; 10, 17; 10, 27; 10, 99;...
def color_mapping(sample_map, header, group_column, color_column=None): group_colors = OrderedDict() group_gather = gather_categories(sample_map, header, [group_column]) if color_column is not None: color_gather = gather_categories(sample_map, header, [color_column]) for group in group_gathe...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 29; 2, function_name:shuffle_genome; 3, parameters; 3, 4; 3, 5; 3, 6; 3, 12; 3, 15; 3, 16; 3, 19; 3, 22; 3, 23; 3, 26; 4, identifier:genome; 5, identifier:cat; 6, default_parameter; 6, 7; 6, 8; 7, identifier:fraction; 8, call; 8, 9; 8, 10; 9, identifier:float; 10,...
def shuffle_genome(genome, cat, fraction = float(100), plot = True, \ alpha = 0.1, beta = 100000, \ min_length = 1000, max_length = 200000): header = '>randomized_%s' % (genome.name) sequence = list(''.join([i[1] for i in parse_fasta(genome)])) length = len(sequence) shuffled = [] wh...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 11; 2, function_name:sam2fastq; 3, parameters; 3, 4; 3, 5; 3, 8; 4, identifier:sam; 5, default_parameter; 5, 6; 5, 7; 6, identifier:singles; 7, False; 8, default_parameter; 8, 9; 8, 10; 9, identifier:force; 10, False; 11, block; 11, 12; 11, 20; 12, expression_stat...
def sam2fastq(sam, singles = False, force = False): L, R = None, None for line in sam: if line.startswith('@') is True: continue line = line.strip().split() bit = [True if i == '1' else False \ for i in bin(int(line[1])).split('b')[1][::-1]] while len(...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 6; 2, function_name:sort_sam; 3, parameters; 3, 4; 3, 5; 4, identifier:sam; 5, identifier:sort; 6, block; 6, 7; 6, 29; 6, 141; 7, expression_statement; 7, 8; 8, assignment; 8, 9; 8, 10; 9, identifier:tempdir; 10, binary_operator:%; 10, 11; 10, 12; 11, string:'%s/'...
def sort_sam(sam, sort): tempdir = '%s/' % (os.path.abspath(sam).rsplit('/', 1)[0]) if sort is True: mapping = '%s.sorted.sam' % (sam.rsplit('.', 1)[0]) if sam != '-': if os.path.exists(mapping) is False: os.system("\ sort -k1 --buffer-size=%sG -T ...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 12; 2, function_name:crossmap; 3, parameters; 3, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 3, 10; 3, 11; 4, identifier:fas; 5, identifier:reads; 6, identifier:options; 7, identifier:no_shrink; 8, identifier:keepDB; 9, identifier:threads; 10, identifier:cluster; 11, identif...
def crossmap(fas, reads, options, no_shrink, keepDB, threads, cluster, nodes): if cluster is True: threads = '48' btc = [] for fa in fas: btd = bowtiedb(fa, keepDB) F, R, U = reads if F is not False: if U is False: u = False for i, f in...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 6; 2, function_name:bit_by_bit; 3, parameters; 3, 4; 3, 5; 4, identifier:self; 5, identifier:in_data; 6, block; 6, 7; 6, 25; 6, 31; 6, 97; 6, 135; 6, 152; 7, if_statement; 7, 8; 7, 13; 8, call; 8, 9; 8, 10; 9, identifier:isinstance; 10, argument_list; 10, 11; 10, ...
def bit_by_bit(self, in_data): if isinstance(in_data, str): in_data = [ord(c) for c in in_data] register = self.NonDirectInit for octet in in_data: if self.ReflectIn: octet = self.reflect(octet, 8) for i in range(8): topbit = re...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 6; 2, function_name:parse_ggKbase_tables; 3, parameters; 3, 4; 3, 5; 4, identifier:tables; 5, identifier:id_type; 6, block; 6, 7; 6, 11; 6, 232; 7, expression_statement; 7, 8; 8, assignment; 8, 9; 8, 10; 9, identifier:g2info; 10, dictionary; 11, for_statement; 11,...
def parse_ggKbase_tables(tables, id_type): g2info = {} for table in tables: for line in open(table): line = line.strip().split('\t') if line[0].startswith('name'): header = line header[4] = 'genome size (bp)' header[12] = ' ...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 8; 2, function_name:top_hits; 3, parameters; 3, 4; 3, 5; 3, 6; 3, 7; 4, identifier:hits; 5, identifier:num; 6, identifier:column; 7, identifier:reverse; 8, block; 8, 9; 8, 24; 9, expression_statement; 9, 10; 10, call; 10, 11; 10, 14; 11, attribute; 11, 12; 11, 13;...
def top_hits(hits, num, column, reverse): hits.sort(key = itemgetter(column), reverse = reverse) for hit in hits[0:num]: yield hit
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 8; 2, function_name:numBlast_sort; 3, parameters; 3, 4; 3, 5; 3, 6; 3, 7; 4, identifier:blast; 5, identifier:numHits; 6, identifier:evalueT; 7, identifier:bitT; 8, block; 8, 9; 8, 32; 8, 35; 8, 45; 8, 136; 8, 145; 9, expression_statement; 9, 10; 10, assignment; 10...
def numBlast_sort(blast, numHits, evalueT, bitT): header = [' 'qstart', 'qend', 'tstart', 'tend', 'evalue', 'bitscore'] yield header hmm = {h:[] for h in header} for line in blast: if line.startswith(' continue line = line.strip().split('\t') line[10], l...
0, module; 0, 1; 0, 64; 0, 67; 0, 75; 0, 267; 1, function_definition; 1, 2; 1, 3; 1, 9; 1, 47; 2, function_name:numDomtblout; 3, parameters; 3, 4; 3, 5; 3, 6; 3, 7; 3, 8; 4, identifier:domtblout; 5, identifier:numHits; 6, identifier:evalueT; 7, identifier:bitT; 8, identifier:sort; 9, ERROR; 9, 10; 9, 29; 9, 30; 9, 40; ...
def numDomtblout(domtblout, numHits, evalueT, bitT, sort): if sort is True: for hit in numDomtblout_sort(domtblout, numHits, evalueT, bitT): yield hit return header = [' 'query name', 'query accession', 'qlen', 'full E-value', 'full score', 'full bias', ...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 5; 2, function_name:compare_clades; 3, parameters; 3, 4; 4, identifier:pw; 5, block; 5, 6; 5, 20; 6, expression_statement; 6, 7; 7, assignment; 7, 8; 7, 9; 8, identifier:names; 9, call; 9, 10; 9, 11; 10, identifier:sorted; 11, argument_list; 11, 12; 12, call; 12, ...
def compare_clades(pw): names = sorted(set([i for i in pw])) for i in range(0, 4): wi, bt = {}, {} for a in names: for b in pw[a]: if ';' not in a or ';' not in b: continue pident = pw[a][b] cA, cB = a.split(';')[i],...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 5; 2, function_name:searchAccession; 3, parameters; 3, 4; 4, identifier:acc; 5, block; 5, 6; 5, 16; 5, 91; 5, 101; 5, 176; 5, 186; 5, 261; 5, 292; 6, expression_statement; 6, 7; 7, assignment; 7, 8; 7, 11; 8, pattern_list; 8, 9; 8, 10; 9, identifier:out; 10, ident...
def searchAccession(acc): out, error = entrez('genome', acc) for line in out.splitlines(): line = line.decode('ascii').strip() if 'Assembly_Accession' in line or 'BioSample' in line: newAcc = line.split('>')[1].split('<')[0].split('.')[0].split(',')[0] if len(newAcc) > 0:...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 11; 2, function_name:_configure_logger; 3, parameters; 3, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 3, 10; 4, identifier:fmt; 5, identifier:quiet; 6, identifier:level; 7, identifier:fpath; 8, identifier:pre_hooks; 9, identifier:post_hooks; 10, identifier:metric_grouping_in...
def _configure_logger(fmt, quiet, level, fpath, pre_hooks, post_hooks, metric_grouping_interval): level = getattr(logging, level.upper()) global _GLOBAL_LOG_CONFIGURED if _GLOBAL_LOG_CONFIGURED: return def wrap_hook(fn): @wraps(fn) def processor(logger, method_name, event_dic...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 6; 2, function_name:combine_modifiers; 3, parameters; 3, 4; 3, 5; 4, identifier:self; 5, identifier:graphemes; 6, block; 6, 7; 6, 11; 6, 15; 6, 22; 6, 203; 6, 213; 6, 217; 6, 221; 6, 278; 7, expression_statement; 7, 8; 8, assignment; 8, 9; 8, 10; 9, identifier:res...
def combine_modifiers(self, graphemes): result = [] temp = "" count = len(graphemes) for grapheme in reversed(graphemes): count -= 1 if len(grapheme) == 1 and unicodedata.category(grapheme) == "Lm" \ and not ord(grapheme) in [712, 716]: ...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 9; 2, function_name:check_mismatches; 3, parameters; 3, 4; 3, 5; 3, 6; 3, 7; 3, 8; 4, identifier:read; 5, identifier:pair; 6, identifier:mismatches; 7, identifier:mm_option; 8, identifier:req_map; 9, block; 9, 10; 9, 43; 9, 50; 9, 57; 9, 68; 9, 75; 9, 91; 9, 117; ...
def check_mismatches(read, pair, mismatches, mm_option, req_map): if pair is False: mm = count_mismatches(read) if mm is False: return False if mismatches is False: return True if mm <= mismatches: return True r_mm = count_mismatches(read) ...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 4; 2, function_name:get_steam; 3, parameters; 4, block; 4, 5; 4, 25; 4, 33; 4, 47; 4, 61; 4, 84; 5, expression_statement; 5, 6; 6, assignment; 6, 7; 6, 8; 7, identifier:helper; 8, lambda; 8, 9; 8, 11; 9, lambda_parameters; 9, 10; 10, identifier:udd; 11, conditiona...
def get_steam(): helper = lambda udd: Steam(udd) if os.path.exists(udd) else None plat = platform.system() if plat == 'Darwin': return helper(paths.default_osx_userdata_path()) if plat == 'Linux': return helper(paths.default_linux_userdata_path()) if plat == 'Windows': possible_dir = winutils.find...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 8; 2, function_name:generate_barcodes; 3, parameters; 3, 4; 3, 5; 4, identifier:nIds; 5, default_parameter; 5, 6; 5, 7; 6, identifier:codeLen; 7, integer:12; 8, block; 8, 9; 8, 39; 8, 54; 8, 75; 8, 87; 8, 112; 8, 116; 8, 192; 9, function_definition; 9, 10; 9, 11; ...
def generate_barcodes(nIds, codeLen=12): def next_code(b, c, i): return c[:i] + b + (c[i+1:] if i < -1 else '') def rand_base(): return random.choice(['A', 'T', 'C', 'G']) def rand_seq(n): return ''.join([rand_base() for _ in range(n)]) hpf = re.compile('aaaa|cccc|gggg|tttt', re....
0, module; 0, 1; 1, ERROR; 1, 2; 2, function_definition; 2, 3; 2, 4; 2, 8; 3, function_name:parse_fasta_annotations; 4, parameters; 4, 5; 4, 6; 4, 7; 5, identifier:fastas; 6, identifier:annot_tables; 7, identifier:trans_table; 8, block; 8, 9; 8, 57; 8, 215; 8, 235; 8, 246; 8, 260; 8, 277; 9, if_statement; 9, 10; 9, 13;...
def parse_fasta_annotations(fastas, annot_tables, trans_table): if annot_tables is not False: annots = {} for table in annot_tables: for cds in open(table): ID, start, end, strand = cds.strip().split() annots[ID] = [start, end, int(strand)] for fasta i...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 5; 2, function_name:find_consensus; 3, parameters; 3, 4; 4, identifier:bases; 5, block; 5, 6; 5, 15; 5, 32; 5, 75; 5, 93; 5, 123; 5, 183; 5, 189; 5, 195; 6, expression_statement; 6, 7; 7, assignment; 7, 8; 7, 9; 8, identifier:nucs; 9, list:['A', 'T', 'G', 'C', 'N'...
def find_consensus(bases): nucs = ['A', 'T', 'G', 'C', 'N'] total = sum([bases[nuc] for nuc in nucs if nuc in bases]) try: top = max([bases[nuc] for nuc in nucs if nuc in bases]) except: bases['consensus'] = ('N', 'n/a') bases['consensus frequency'] = 'n/a' bases['referen...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 5; 2, function_name:print_consensus; 3, parameters; 3, 4; 4, identifier:genomes; 5, block; 5, 6; 5, 10; 5, 128; 5, 204; 6, expression_statement; 6, 7; 7, assignment; 7, 8; 7, 9; 8, identifier:cons; 9, dictionary; 10, for_statement; 10, 11; 10, 14; 10, 22; 11, patt...
def print_consensus(genomes): cons = {} for genome, contigs in list(genomes.items()): cons[genome] = {} for contig, samples in list(contigs.items()): for sample, stats in list(samples.items()): if sample not in cons[genome]: cons[genome][sample] = ...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 6; 2, function_name:parse_cov; 3, parameters; 3, 4; 3, 5; 4, identifier:cov_table; 5, identifier:scaffold2genome; 6, block; 6, 7; 6, 11; 6, 15; 6, 179; 6, 195; 6, 255; 7, expression_statement; 7, 8; 8, assignment; 8, 9; 8, 10; 9, identifier:size; 10, dictionary; 1...
def parse_cov(cov_table, scaffold2genome): size = {} mapped = {} for line in open(cov_table): line = line.strip().split('\t') if line[0].startswith(' samples = line[1:] samples = [i.rsplit('/', 1)[-1].split('.', 1)[0] for i in samples] continue s...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 8; 2, function_name:print_genome_matrix; 3, parameters; 3, 4; 3, 5; 3, 6; 3, 7; 4, identifier:hits; 5, identifier:fastas; 6, identifier:id2desc; 7, identifier:file_name; 8, block; 8, 9; 8, 17; 8, 24; 8, 35; 8, 85; 8, 98; 8, 106; 8, 117; 8, 189; 9, expression_state...
def print_genome_matrix(hits, fastas, id2desc, file_name): out = open(file_name, 'w') fastas = sorted(fastas) print(' print(' for fasta in fastas: line = [fasta] for other in fastas: if other == fasta: average = '-' else: averag...
0, module; 0, 1; 1, ERROR; 1, 2; 1, 298; 1, 304; 2, function_definition; 2, 3; 2, 4; 2, 17; 3, function_name:calc_thresholds; 4, parameters; 4, 5; 4, 6; 4, 7; 4, 14; 5, identifier:rbh; 6, identifier:file_name; 7, default_parameter; 7, 8; 7, 9; 8, identifier:thresholds; 9, list:[False, False, False, False]; 9, 10; 9, 11...
def calc_thresholds(rbh, file_name, thresholds = [False, False, False, False], stdevs = 2): calc_threshold = thresholds[-1] norm_threshold = {} for pair in itertools.permutations([i for i in rbh], 2): if pair[0] not in norm_threshold: norm_threshold[pair[0]] = {} norm_threshold[p...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 8; 2, function_name:_update_property; 3, parameters; 3, 4; 3, 5; 3, 6; 3, 7; 4, identifier:tree_to_update; 5, identifier:xpath_root; 6, identifier:xpaths; 7, identifier:values; 8, block; 8, 9; 8, 234; 8, 241; 8, 248; 9, function_definition; 9, 10; 9, 11; 9, 17; 10...
def _update_property(tree_to_update, xpath_root, xpaths, values): def update_element(elem, idx, root, path, vals): has_root = bool(root and len(path) > len(root) and path.startswith(root)) path, attr = get_xpath_tuple(path) if attr: removed = [get_element(elem, path)] ...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 9; 2, function_name:validate_complex_list; 3, parameters; 3, 4; 3, 5; 3, 6; 4, identifier:prop; 5, identifier:value; 6, default_parameter; 6, 7; 6, 8; 7, identifier:xpath_map; 8, None; 9, block; 9, 10; 10, if_statement; 10, 11; 10, 14; 11, comparison_operator:is; ...
def validate_complex_list(prop, value, xpath_map=None): if value is not None: validate_type(prop, value, (dict, list)) if prop in _complex_definitions: complex_keys = _complex_definitions[prop] else: complex_keys = {} if xpath_map is None else xpath_map for id...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 9; 2, function_name:validate_dates; 3, parameters; 3, 4; 3, 5; 3, 6; 4, identifier:prop; 5, identifier:value; 6, default_parameter; 6, 7; 6, 8; 7, identifier:xpath_map; 8, None; 9, block; 9, 10; 10, if_statement; 10, 11; 10, 14; 11, comparison_operator:is; 11, 12;...
def validate_dates(prop, value, xpath_map=None): if value is not None: validate_type(prop, value, dict) date_keys = set(value) if date_keys: if DATE_TYPE not in date_keys or DATE_VALUES not in date_keys: if prop in _complex_definitions: complex...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 5; 2, function_name:getCharacterSet; 3, parameters; 3, 4; 4, identifier:self; 5, block; 5, 6; 5, 8; 5, 12; 5, 16; 5, 20; 5, 24; 5, 236; 6, expression_statement; 6, 7; 7, string:'''Get a character set with individual members or ranges. Current index is on '...
def getCharacterSet(self): '''Get a character set with individual members or ranges. Current index is on '[', the start of the character set. ''' chars = u'' c = None cnt = 1 start = 0 while True: escaped_slash = False c = self.next...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 8; 2, function_name:getSequence; 3, parameters; 3, 4; 3, 5; 4, identifier:self; 5, default_parameter; 5, 6; 5, 7; 6, identifier:level; 7, integer:0; 8, block; 8, 9; 8, 11; 8, 15; 8, 19; 8, 23; 8, 27; 8, 31; 8, 344; 8, 356; 8, 371; 9, expression_statement; 9, 10; 1...
def getSequence(self, level=0): '''Get a sequence of nodes.''' seq = [] op = '' left_operand = None right_operand = None sequence_closed = False while True: c = self.next() if not c: break if c and c not in self....
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 8; 2, function_name:process; 3, parameters; 3, 4; 3, 5; 3, 6; 3, 7; 4, identifier:self; 5, identifier:data; 6, identifier:type; 7, identifier:history; 8, block; 8, 9; 8, 15; 8, 23; 8, 30; 8, 38; 8, 42; 8, 129; 8, 138; 8, 147; 9, if_statement; 9, 10; 9, 13; 10, com...
def process(self, data, type, history): if type in history: return if type.enum(): return history.append(type) resolved = type.resolve() value = None if type.multi_occurrence(): value = [] else: if len(resolved) > 0:...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 8; 2, function_name:_process_tz; 3, parameters; 3, 4; 3, 5; 3, 6; 3, 7; 4, identifier:self; 5, identifier:dt; 6, identifier:naive; 7, identifier:tz; 8, block; 8, 9; 8, 98; 8, 105; 8, 112; 8, 149; 8, 155; 8, 203; 9, function_definition; 9, 10; 9, 11; 9, 13; 10, fun...
def _process_tz(self, dt, naive, tz): def _tz(t): if t in (None, 'naive'): return t if t == 'local': if __debug__ and not localtz: raise ValueError("Requested conversion to local timezone, but `localtz` not installed.") t = localtz if not isinstance(t, tzinfo): if __debug__ and not local...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 8; 2, function_name:__dfs; 3, parameters; 3, 4; 3, 5; 3, 6; 3, 7; 4, identifier:self; 5, identifier:v; 6, identifier:index; 7, identifier:layers; 8, block; 8, 9; 8, 58; 8, 151; 9, if_statement; 9, 10; 9, 13; 10, comparison_operator:==; 10, 11; 10, 12; 11, identifi...
def __dfs(self, v, index, layers): if index == 0: path = [v] while self._dfs_parent[v] != v: path.append(self._dfs_parent[v]) v = self._dfs_parent[v] self._dfs_paths.append(path) return True for neighbour in self._graph[v]: ...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 10; 2, function_name:login; 3, parameters; 3, 4; 3, 5; 3, 6; 3, 7; 4, identifier:self; 5, identifier:username; 6, identifier:password; 7, default_parameter; 7, 8; 7, 9; 8, identifier:login_token; 9, None; 10, block; 10, 11; 10, 43; 10, 66; 10, 103; 11, if_statemen...
def login(self, username, password, login_token=None): if login_token is None: token_doc = self.post(action='query', meta='tokens', type='login') login_token = token_doc['query']['tokens']['logintoken'] login_doc = self.post( action="clientlogin", username=username, p...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 6; 2, function_name:cut_levels; 3, parameters; 3, 4; 3, 5; 4, identifier:nodes; 5, identifier:start_level; 6, block; 6, 7; 6, 11; 6, 15; 6, 146; 6, 162; 7, expression_statement; 7, 8; 8, assignment; 8, 9; 8, 10; 9, identifier:final; 10, list:[]; 11, expression_sta...
def cut_levels(nodes, start_level): final = [] removed = [] for node in nodes: if not hasattr(node, 'level'): remove(node, removed) continue if node.attr.get('soft_root', False): remove(node, removed) continue if node.level == start_lev...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 7; 2, function_name:S; 3, parameters; 3, 4; 3, 5; 4, identifier:Document; 5, list_splat_pattern; 5, 6; 6, identifier:fields; 7, block; 7, 8; 7, 12; 7, 115; 8, expression_statement; 8, 9; 9, assignment; 9, 10; 9, 11; 10, identifier:result; 11, list:[]; 12, for_stat...
def S(Document, *fields): result = [] for field in fields: if isinstance(field, tuple): field, direction = field result.append((field, direction)) continue direction = ASCENDING if not field.startswith('__'): field = field.replace('__', '.') if field[0] == '-': direction = DESCENDING if field...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 6; 2, function_name:valid; 3, parameters; 3, 4; 3, 5; 4, identifier:self; 5, identifier:cnpj; 6, block; 6, 7; 6, 17; 6, 21; 6, 29; 6, 37; 6, 41; 6, 47; 6, 87; 6, 103; 6, 115; 6, 121; 6, 129; 6, 133; 6, 139; 6, 179; 6, 195; 6, 207; 7, if_statement; 7, 8; 7, 14; 8, ...
def valid(self, cnpj): if len(cnpj) != 14: return False tam = 12 nums = cnpj[:tam] digs = cnpj[tam:] tot = 0 pos = tam-7 for i in range(tam, 0, -1): tot = tot + int(nums[tam-i])*pos pos = pos - 1 if pos < 2: ...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 8; 2, function_name:arrayuniqify; 3, parameters; 3, 4; 3, 5; 4, identifier:X; 5, default_parameter; 5, 6; 5, 7; 6, identifier:retainorder; 7, False; 8, block; 8, 9; 8, 17; 8, 23; 8, 45; 9, expression_statement; 9, 10; 10, assignment; 10, 11; 10, 12; 11, identifier...
def arrayuniqify(X, retainorder=False): s = X.argsort() X = X[s] D = np.append([True],X[1:] != X[:-1]) if retainorder: DD = np.append(D.nonzero()[0],len(X)) ind = [min(s[x:DD[i+1]]) for (i,x) in enumerate(DD[:-1])] ind.sort() return ind else: return [D,s]
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 6; 2, function_name:equalspairs; 3, parameters; 3, 4; 3, 5; 4, identifier:X; 5, identifier:Y; 6, block; 6, 7; 6, 15; 6, 37; 6, 58; 6, 75; 6, 84; 6, 100; 6, 116; 6, 127; 6, 143; 7, expression_statement; 7, 8; 8, assignment; 8, 9; 8, 10; 9, identifier:T; 10, call; 1...
def equalspairs(X, Y): T = Y.copy() R = (T[1:] != T[:-1]).nonzero()[0] R = np.append(R,np.array([len(T)-1])) M = R[R.searchsorted(range(len(T)))] D = T.searchsorted(X) T = np.append(T,np.array([0])) M = np.append(M,np.array([0])) A = (T[D] == X) * D B = (T[D] == X) * (M[D] + 1) r...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 22; 2, function_name:loadSV; 3, parameters; 3, 4; 3, 5; 3, 8; 3, 11; 3, 14; 3, 17; 3, 20; 4, identifier:fname; 5, default_parameter; 5, 6; 5, 7; 6, identifier:shape; 7, None; 8, default_parameter; 8, 9; 8, 10; 9, identifier:titles; 10, None; 11, default_parameter;...
def loadSV(fname, shape=None, titles=None, aligned=False, byteorder=None, renamer=None, **kwargs): [columns, metadata] = loadSVcols(fname, **kwargs) if 'names' in metadata.keys(): names = metadata['names'] else: names = None if 'formats' in metadata.keys(): formats =...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 22; 2, function_name:loadSVrecs; 3, parameters; 3, 4; 3, 5; 3, 8; 3, 11; 3, 14; 3, 17; 3, 20; 4, identifier:fname; 5, default_parameter; 5, 6; 5, 7; 6, identifier:uselines; 7, None; 8, default_parameter; 8, 9; 8, 10; 9, identifier:skiprows; 10, integer:0; 11, defa...
def loadSVrecs(fname, uselines=None, skiprows=0, linefixer=None, delimiter_regex=None, verbosity=DEFAULT_VERBOSITY, **metadata): if delimiter_regex and isinstance(delimiter_regex, types.StringType): import re delimiter_regex = re.compile(delimiter_regex) [metadata, inferedlines,...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 9; 1, 13; 2, function_name:dflt_interval; 3, parameters; 3, 4; 3, 5; 4, identifier:self; 5, typed_parameter; 5, 6; 5, 7; 6, identifier:cd_id; 7, type; 7, 8; 8, identifier:str; 9, type; 9, 10; 10, tuple; 10, 11; 10, 12; 11, identifier:int; 12, identifier:int; 13, b...
def dflt_interval(self, cd_id: str) -> (int, int): LOGGER.debug('RevocationCache.dflt_interval >>>') fro = None to = None for rr_id in self: if cd_id != rev_reg_id2cred_def_id(rr_id): continue entry = self[rr_id] if entry.rr_delta_frame...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 13; 1, 15; 2, function_name:parse; 3, parameters; 3, 4; 3, 8; 4, typed_parameter; 4, 5; 4, 6; 5, identifier:base_dir; 6, type; 6, 7; 7, identifier:str; 8, typed_default_parameter; 8, 9; 8, 10; 8, 12; 9, identifier:timestamp; 10, type; 10, 11; 11, identifier:int; 1...
def parse(base_dir: str, timestamp: int = None) -> int: LOGGER.debug('parse >>> base_dir: %s, timestamp: %s', base_dir, timestamp) if not isdir(base_dir): LOGGER.info('No cache archives available: not feeding cache') LOGGER.debug('parse <<< None') return None ...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 6; 2, function_name:_convert_hbf_meta_val_for_xml; 3, parameters; 3, 4; 3, 5; 4, identifier:key; 5, identifier:val; 6, block; 6, 7; 6, 24; 6, 28; 6, 32; 6, 89; 6, 153; 7, if_statement; 7, 8; 7, 13; 8, call; 8, 9; 8, 10; 9, identifier:isinstance; 10, argument_list;...
def _convert_hbf_meta_val_for_xml(key, val): if isinstance(val, list): return [_convert_hbf_meta_val_for_xml(key, i) for i in val] is_literal = True content = None if isinstance(val, dict): ret = val if '@href' in val: is_literal = False else: cont...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 6; 2, function_name:validate_params_match; 3, parameters; 3, 4; 3, 5; 4, identifier:method; 5, identifier:parameters; 6, block; 6, 7; 6, 16; 6, 32; 7, expression_statement; 7, 8; 8, assignment; 8, 9; 8, 10; 9, identifier:argspec; 10, call; 10, 11; 10, 14; 11, attr...
def validate_params_match(method, parameters): argspec = inspect.getargspec(method) default_length = len(argspec.defaults) if argspec.defaults is not None else 0 if isinstance(parameters, list): if len(parameters) > len(argspec.args) and argspec.varargs is None: raise InvalidParamsError(...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 9; 2, function_name:addcols; 3, parameters; 3, 4; 3, 5; 3, 6; 4, identifier:X; 5, identifier:cols; 6, default_parameter; 6, 7; 6, 8; 7, identifier:names; 8, None; 9, block; 9, 10; 9, 34; 9, 188; 9, 208; 9, 238; 10, if_statement; 10, 11; 10, 16; 11, call; 11, 12; 1...
def addcols(X, cols, names=None): if isinstance(names,str): names = [n.strip() for n in names.split(',')] if isinstance(cols, list): if any([isinstance(x,np.ndarray) or isinstance(x,list) or \ isinstance(x,tuple) for x in cols]): assert all([len(x...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 16; 2, function_name:replace; 3, parameters; 3, 4; 3, 5; 3, 6; 3, 7; 3, 10; 3, 13; 4, identifier:X; 5, identifier:old; 6, identifier:new; 7, default_parameter; 7, 8; 7, 9; 8, identifier:strict; 9, True; 10, default_parameter; 10, 11; 10, 12; 11, identifier:cols; 1...
def replace(X, old, new, strict=True, cols=None, rows=None): if cols == None: cols = X.dtype.names elif isinstance(cols, str): cols = cols.split(',') if rows == None: rows = np.ones((len(X),), bool) if strict: new = np.array(new) for a in cols: if X.dt...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 11; 2, function_name:rowstack; 3, parameters; 3, 4; 3, 5; 3, 8; 4, identifier:seq; 5, default_parameter; 5, 6; 5, 7; 6, identifier:mode; 7, string:'nulls'; 8, default_parameter; 8, 9; 8, 10; 9, identifier:nullvals; 10, None; 11, block; 11, 12; 11, 14; 11, 25; 12, ...
def rowstack(seq, mode='nulls', nullvals=None): ''' Vertically stack a sequence of numpy ndarrays with structured dtype Analog of numpy.vstack Implemented by the tabarray method :func:`tabular.tab.tabarray.rowstack` which uses :func:`tabular.tabarray.tab_rowstack`. **Parameters** ...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 11; 2, function_name:colstack; 3, parameters; 3, 4; 3, 5; 3, 8; 4, identifier:seq; 5, default_parameter; 5, 6; 5, 7; 6, identifier:mode; 7, string:'abort'; 8, default_parameter; 8, 9; 8, 10; 9, identifier:returnnaming; 10, False; 11, block; 11, 12; 11, 21; 11, 46;...
def colstack(seq, mode='abort',returnnaming=False): assert mode in ['first','drop','abort','rename'], \ 'mode argument must take on value "first","drop", "rename", or "abort".' AllNames = utils.uniqify(utils.listunion( [list(l.dtype.names) for l in seq])) Na...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 20; 2, function_name:getjp2image; 3, parameters; 3, 4; 3, 5; 3, 8; 3, 11; 3, 14; 3, 17; 4, identifier:date; 5, default_parameter; 5, 6; 5, 7; 6, identifier:sourceId; 7, None; 8, default_parameter; 8, 9; 8, 10; 9, identifier:observatory; 10, None; 11, default_param...
def getjp2image(date, sourceId=None, observatory=None, instrument=None, detector=None, measurement=None): ''' Helioviewer.org and JHelioviewer operate off of JPEG2000 formatted image data generated from science-quality FITS files. U...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 20; 1, 26; 2, function_name:loads_loader; 3, parameters; 3, 4; 3, 10; 4, typed_parameter; 4, 5; 4, 6; 5, identifier:load_module; 6, type; 6, 7; 7, attribute; 7, 8; 7, 9; 8, identifier:types; 9, identifier:ModuleType; 10, typed_parameter; 10, 11; 10, 12; 11, identi...
def loads_loader(load_module: types.ModuleType, pairs: Dict[str, str]) -> Optional[JSGValidateable]: cntxt = load_module._CONTEXT possible_type = pairs[cntxt.TYPE] if cntxt.TYPE in pairs else None target_class = getattr(load_module, possible_type, None) if isinstance(possible_type, str) else None if tar...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 9; 1, 11; 2, function_name:get_cred_def; 3, parameters; 3, 4; 3, 5; 4, identifier:self; 5, typed_parameter; 5, 6; 5, 7; 6, identifier:cd_id; 7, type; 7, 8; 8, identifier:str; 9, type; 9, 10; 10, identifier:str; 11, block; 11, 12; 11, 20; 11, 29; 11, 191; 11, 199; ...
async def get_cred_def(self, cd_id: str) -> str: LOGGER.debug('_BaseAgent.get_cred_def >>> cd_id: %s', cd_id) rv_json = json.dumps({}) with CRED_DEF_CACHE.lock: if cd_id in CRED_DEF_CACHE: LOGGER.info('_BaseAgent.get_cred_def: got cred def for %s from cache', cd_id) ...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 18; 2, function_name:convert_nexson_format; 3, parameters; 3, 4; 3, 5; 3, 6; 3, 9; 3, 12; 3, 15; 4, identifier:blob; 5, identifier:out_nexson_format; 6, default_parameter; 6, 7; 6, 8; 7, identifier:current_format; 8, None; 9, default_parameter; 9, 10; 9, 11; 10, i...
def convert_nexson_format(blob, out_nexson_format, current_format=None, remove_old_structs=True, pristine_if_invalid=False, sort_arbitrary=False): if not current_format: current_...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 5; 2, function_name:_inplace_sort_by_id; 3, parameters; 3, 4; 4, identifier:unsorted_list; 5, block; 5, 6; 5, 15; 5, 30; 5, 36; 5, 41; 6, if_statement; 6, 7; 6, 13; 7, not_operator; 7, 8; 8, call; 8, 9; 8, 10; 9, identifier:isinstance; 10, argument_list; 10, 11; 1...
def _inplace_sort_by_id(unsorted_list): if not isinstance(unsorted_list, list): return sorted_list = [(i.get('@id'), i) for i in unsorted_list] sorted_list.sort() del unsorted_list[:] unsorted_list.extend([i[1] for i in sorted_list])
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 9; 2, function_name:cull_nonmatching_trees; 3, parameters; 3, 4; 3, 5; 3, 6; 4, identifier:nexson; 5, identifier:tree_id; 6, default_parameter; 6, 7; 6, 8; 7, identifier:curr_version; 8, None; 9, block; 9, 10; 9, 22; 9, 37; 9, 44; 9, 50; 9, 54; 9, 117; 9, 134; 10,...
def cull_nonmatching_trees(nexson, tree_id, curr_version=None): if curr_version is None: curr_version = detect_nexson_version(nexson) if not _is_by_id_hbf(curr_version): nexson = convert_nexson_format(nexson, BY_ID_HONEY_BADGERFISH) nexml_el = get_nexml_el(nexson) tree_groups = nexml_el[...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 18; 1, 30; 2, function_name:_validate; 3, parameters; 3, 4; 3, 5; 3, 9; 4, identifier:self; 5, typed_parameter; 5, 6; 5, 7; 6, identifier:val; 7, type; 7, 8; 8, identifier:list; 9, typed_default_parameter; 9, 10; 9, 11; 9, 17; 10, identifier:log; 11, type; 11, 12;...
def _validate(self, val: list, log: Optional[Logger] = None) -> Tuple[bool, List[str]]: errors = [] if not isinstance(val, list): errors.append(f"{self._variable_name}: {repr(val)} is not an array") else: for i in range(0, len(val)): v = val[i] ...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 14; 1, 16; 2, function_name:_sync_revoc; 3, parameters; 3, 4; 3, 5; 3, 9; 4, identifier:self; 5, typed_parameter; 5, 6; 5, 7; 6, identifier:rr_id; 7, type; 7, 8; 8, identifier:str; 9, typed_default_parameter; 9, 10; 9, 11; 9, 13; 10, identifier:rr_size; 11, type; ...
async def _sync_revoc(self, rr_id: str, rr_size: int = None) -> None: LOGGER.debug('Issuer._sync_revoc >>> rr_id: %s, rr_size: %s', rr_id, rr_size) (cd_id, tag) = rev_reg_id2cred_def_id__tag(rr_id) try: await self.get_cred_def(cd_id) except AbsentCredDef: LOGGER.d...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 12; 2, function_name:add_namespace_uri; 3, parameters; 3, 4; 3, 5; 3, 6; 3, 9; 4, identifier:self; 5, identifier:ns_uri; 6, default_parameter; 6, 7; 6, 8; 7, identifier:prefix; 8, None; 9, default_parameter; 9, 10; 9, 11; 10, identifier:schema_location; 11, None; ...
def add_namespace_uri(self, ns_uri, prefix=None, schema_location=None): assert ns_uri if ns_uri in self.__ns_uri_map: ni = self.__lookup_uri(ns_uri) new_ni = copy.deepcopy(ni) if prefix: self.__check_prefix_conflict(ni, prefix) new_ni.p...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 14; 2, function_name:get_schemaloc_string; 3, parameters; 3, 4; 3, 5; 3, 8; 3, 11; 4, identifier:self; 5, default_parameter; 5, 6; 5, 7; 6, identifier:ns_uris; 7, None; 8, default_parameter; 8, 9; 8, 10; 9, identifier:sort; 10, False; 11, default_parameter; 11, 12...
def get_schemaloc_string(self, ns_uris=None, sort=False, delim="\n"): if not ns_uris: ns_uris = six.iterkeys(self.__ns_uri_map) if sort: ns_uris = sorted(ns_uris) schemalocs = [] for ns_uri in ns_uris: ni = self.__lookup_uri(ns_uri) if ni.s...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 20; 2, function_name:tab_join; 3, parameters; 3, 4; 3, 5; 3, 8; 3, 11; 3, 14; 3, 17; 4, identifier:ToMerge; 5, default_parameter; 5, 6; 5, 7; 6, identifier:keycols; 7, None; 8, default_parameter; 8, 9; 8, 10; 9, identifier:nullvals; 10, None; 11, default_parameter...
def tab_join(ToMerge, keycols=None, nullvals=None, renamer=None, returnrenaming=False, Names=None): ''' Database-join for tabular arrays. Wrapper for :func:`tabular.spreadsheet.join` that deals with the coloring and returns the result as a tabarray. Method calls:: data = t...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 26; 2, function_name:aggregate; 3, parameters; 3, 4; 3, 5; 3, 8; 3, 11; 3, 14; 3, 17; 3, 20; 3, 23; 4, identifier:self; 5, default_parameter; 5, 6; 5, 7; 6, identifier:On; 7, None; 8, default_parameter; 8, 9; 8, 10; 9, identifier:AggFuncDict; 10, None; 11, default...
def aggregate(self, On=None, AggFuncDict=None, AggFunc=None, AggList = None, returnsort=False,KeepOthers=True, keyfuncdict=None): if returnsort: [data, s] = spreadsheet.aggregate(X=self, On=On, AggFuncDict=AggFuncDict, ...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 15; 2, function_name:argsort; 3, parameters; 3, 4; 3, 5; 3, 9; 3, 12; 4, identifier:self; 5, default_parameter; 5, 6; 5, 7; 6, identifier:axis; 7, unary_operator:-; 7, 8; 8, integer:1; 9, default_parameter; 9, 10; 9, 11; 10, identifier:kind; 11, string:'quicksort'...
def argsort(self, axis=-1, kind='quicksort', order=None): index_array = np.core.fromnumeric._wrapit(self, 'argsort', axis, kind, order) index_array = index_array.view(np.ndarray) return index_array
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 8; 2, function_name:_finalize_namespaces; 3, parameters; 3, 4; 3, 5; 4, identifier:self; 5, default_parameter; 5, 6; 5, 7; 6, identifier:ns_dict; 7, None; 8, block; 8, 9; 8, 33; 8, 55; 8, 61; 8, 84; 8, 95; 9, if_statement; 9, 10; 9, 11; 10, identifier:ns_dict; 11,...
def _finalize_namespaces(self, ns_dict=None): if ns_dict: for ns, alias in six.iteritems(ns_dict): self._collected_namespaces.add_namespace_uri(ns, alias) self._collected_namespaces.add_namespace_uri( ns_uri=idgen.get_id_namespace(), prefix=idgen.get_i...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 7; 2, function_name:update_empty_fields; 3, parameters; 3, 4; 3, 5; 4, identifier:self; 5, dictionary_splat_pattern; 5, 6; 6, identifier:kwargs; 7, block; 7, 8; 7, 26; 7, 44; 7, 62; 7, 93; 7, 111; 7, 127; 7, 144; 7, 162; 8, if_statement; 8, 9; 8, 14; 9, comparison...
def update_empty_fields(self, **kwargs): if self._is_deprecated is None: self._is_deprecated = kwargs.get('is_deprecated') if self._is_dubious is None: self._is_dubious = kwargs.get('is_dubious') if self._is_synonym is None: self._is_synonym = kwargs.get('is_s...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 19; 1, 25; 2, function_name:get_creds; 3, parameters; 3, 4; 3, 5; 3, 9; 3, 14; 4, identifier:self; 5, typed_parameter; 5, 6; 5, 7; 6, identifier:proof_req_json; 7, type; 7, 8; 8, identifier:str; 9, typed_default_parameter; 9, 10; 9, 11; 9, 13; 10, identifier:filt;...
async def get_creds(self, proof_req_json: str, filt: dict = None, filt_dflt_incl: bool = False) -> (Set[str], str): LOGGER.debug('HolderProver.get_creds >>> proof_req_json: %s, filt: %s', proof_req_json, filt) if filt is None: filt = {} rv = None creds_json = await anoncreds....
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 7; 2, function_name:summary; 3, parameters; 3, 4; 3, 5; 3, 6; 4, identifier:processors; 5, identifier:metrics; 6, identifier:context; 7, block; 7, 8; 7, 42; 7, 76; 7, 114; 7, 118; 7, 199; 7, 206; 7, 243; 7, 248; 7, 255; 7, 264; 7, 296; 7, 305; 8, function_definiti...
def summary(processors, metrics, context): def display_header(processors, before='', after=''): print(before, end=' ') for processor in processors: processor.display_header() print(after) def display_separator(processors, before='', after=''): print(before, end=' ') ...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 13; 2, function_name:run_experiment; 3, parameters; 3, 4; 3, 5; 3, 6; 3, 7; 3, 10; 4, identifier:experiment; 5, identifier:roleouts; 6, identifier:episodes; 7, default_parameter; 7, 8; 7, 9; 8, identifier:in_cloud; 9, False; 10, default_parameter; 10, 11; 10, 12; ...
def run_experiment(experiment, roleouts, episodes, in_cloud=False, dynProfile=None): def run(): if dynProfile is None: maxsteps = len(experiment.profile) else: maxsteps = dynProfile.shape[1] na = len(experiment.agents) ni = roleouts * episod...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 14; 2, function_name:total_cost; 3, parameters; 3, 4; 3, 5; 3, 8; 3, 11; 4, identifier:self; 5, default_parameter; 5, 6; 5, 7; 6, identifier:p; 7, None; 8, default_parameter; 8, 9; 8, 10; 9, identifier:p_cost; 10, None; 11, default_parameter; 11, 12; 11, 13; 12, i...
def total_cost(self, p=None, p_cost=None, pcost_model=None): p = self.p if p is None else p p_cost = self.p_cost if p_cost is None else p_cost pcost_model = self.pcost_model if pcost_model is None else pcost_model p = 0.0 if not self.online else p if pcost_model == PW_LINEAR: ...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 6; 2, function_name:offers_to_pwl; 3, parameters; 3, 4; 3, 5; 4, identifier:self; 5, identifier:offers; 6, block; 6, 7; 6, 12; 6, 26; 6, 44; 6, 57; 6, 70; 6, 82; 6, 159; 6, 193; 6, 223; 7, assert_statement; 7, 8; 8, not_operator; 8, 9; 9, attribute; 9, 10; 9, 11; ...
def offers_to_pwl(self, offers): assert not self.is_load g_offers = [offer for offer in offers if offer.generator == self] gt_zero = [offr for offr in g_offers if round(offr.quantity, 4) > 0.0] valid = [offer for offer in gt_zero if not offer.withheld] p_offers = [v for v in vali...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 6; 2, function_name:bids_to_pwl; 3, parameters; 3, 4; 3, 5; 4, identifier:self; 5, identifier:bids; 6, block; 6, 7; 6, 11; 6, 25; 6, 43; 6, 56; 6, 69; 6, 81; 6, 149; 6, 203; 7, assert_statement; 7, 8; 8, attribute; 8, 9; 8, 10; 9, identifier:self; 10, identifier:i...
def bids_to_pwl(self, bids): assert self.is_load vl_bids = [bid for bid in bids if bid.vLoad == self] gt_zero = [bid for bid in vl_bids if round(bid.quantity, 4) > 0.0] valid_bids = [bid for bid in gt_zero if not bid.withheld] p_bids = [v for v in valid_bids if not v.reactive] ...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 5; 2, function_name:DoxyfileParse; 3, parameters; 3, 4; 4, identifier:file_contents; 5, block; 5, 6; 5, 10; 5, 13; 5, 27; 5, 33; 5, 47; 5, 53; 5, 59; 5, 67; 5, 71; 5, 75; 5, 79; 5, 83; 5, 87; 5, 127; 5, 274; 5, 325; 6, expression_statement; 6, 7; 7, assignment; 7,...
def DoxyfileParse(file_contents): data = {} import shlex lex = shlex.shlex(instream = file_contents, posix = True) lex.wordchars += "*+./-:" lex.whitespace = lex.whitespace.replace("\n", "") lex.escape = "" lineno = lex.lineno token = lex.get_token() key = token last_token = "" key_toke...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 7; 2, function_name:DoxySourceScan; 3, parameters; 3, 4; 3, 5; 3, 6; 4, identifier:node; 5, identifier:env; 6, identifier:path; 7, block; 7, 8; 7, 37; 7, 42; 7, 46; 7, 57; 7, 78; 7, 88; 7, 98; 7, 112; 7, 282; 7, 333; 7, 393; 7, 398; 7, 403; 7, 408; 7, 424; 8, expr...
def DoxySourceScan(node, env, path): default_file_patterns = [ '*.c', '*.cc', '*.cxx', '*.cpp', '*.c++', '*.java', '*.ii', '*.ixx', '*.ipp', '*.i++', '*.inl', '*.h', '*.hh ', '*.hxx', '*.hpp', '*.h++', '*.idl', '*.odl', '*.cs', '*.php', '*.php3', '*.inc', '*.m', '*.mm', '*.py', ] defaul...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 9; 2, function_name:_quadratic_costs; 3, parameters; 3, 4; 3, 5; 3, 6; 3, 7; 3, 8; 4, identifier:self; 5, identifier:generators; 6, identifier:ipol; 7, identifier:nxyz; 8, identifier:base_mva; 9, block; 9, 10; 9, 17; 9, 24; 9, 38; 9, 61; 9, 89; 9, 117; 9, 126; 9, ...
def _quadratic_costs(self, generators, ipol, nxyz, base_mva): npol = len(ipol) rnpol = range(npol) gpol = [g for g in generators if g.pcost_model == POLYNOMIAL] if [g for g in gpol if len(g.p_cost) > 3]: logger.error("Order of polynomial cost greater than quadratic.") ...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 6; 2, function_name:_gh; 3, parameters; 3, 4; 3, 5; 4, identifier:self; 5, identifier:x; 6, block; 6, 7; 6, 26; 6, 45; 6, 80; 6, 95; 6, 114; 6, 133; 6, 144; 6, 159; 6, 170; 6, 192; 6, 212; 6, 384; 7, expression_statement; 7, 8; 8, assignment; 8, 9; 8, 10; 9, ident...
def _gh(self, x): Pgen = x[self._Pg.i1:self._Pg.iN + 1] Qgen = x[self._Qg.i1:self._Qg.iN + 1] for i, gen in enumerate(self._gn): gen.p = Pgen[i] * self._base_mva gen.q = Qgen[i] * self._base_mva Sbus = self.om.case.getSbus(self._bs) Vang = x[self._Va.i1:se...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 6; 2, function_name:performAction; 3, parameters; 3, 4; 3, 5; 4, identifier:self; 5, identifier:action; 6, block; 6, 7; 6, 27; 6, 37; 6, 49; 6, 66; 6, 80; 6, 102; 6, 163; 6, 169; 7, expression_statement; 7, 8; 8, assignment; 8, 9; 8, 10; 9, identifier:gs; 10, list...
def performAction(self, action): gs = [g for g in self.case.online_generators if g.bus.type !=REFERENCE] assert len(action) == len(gs) logger.info("Action: %s" % list(action)) for i, g in enumerate(gs): g.p = action[i] NewtonPF(self.case, verbose=False).solve() ...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 20; 1, 28; 2, function_name:blt; 3, parameters; 3, 4; 3, 12; 4, typed_parameter; 4, 5; 4, 6; 5, identifier:f; 6, type; 6, 7; 7, generic_type; 7, 8; 7, 9; 8, identifier:List; 9, type_parameter; 9, 10; 10, type; 10, 11; 11, identifier:SYM; 12, typed_parameter; 12, 1...
def blt(f: List[SYM], x: List[SYM]) -> Dict[str, Any]: J = ca.jacobian(f, x) nblock, rowperm, colperm, rowblock, colblock, coarserow, coarsecol = J.sparsity().btf() return { 'J': J, 'nblock': nblock, 'rowperm': rowperm, 'colperm': colperm, 'rowblock': rowblock, ...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 5; 2, function_name:sort_generators; 3, parameters; 3, 4; 4, identifier:self; 5, block; 5, 6; 6, expression_statement; 6, 7; 7, call; 7, 8; 7, 13; 8, attribute; 8, 9; 8, 12; 9, attribute; 9, 10; 9, 11; 10, identifier:self; 11, identifier:generators; 12, identifier...
def sort_generators(self): self.generators.sort(key=lambda gn: gn.bus._i)
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 12; 2, function_name:create; 3, parameters; 3, 4; 3, 5; 3, 6; 3, 9; 4, identifier:self; 5, identifier:dotdata; 6, default_parameter; 6, 7; 6, 8; 7, identifier:prog; 8, string:"dot"; 9, default_parameter; 9, 10; 9, 11; 10, identifier:format; 11, string:"xdot"; 12, ...
def create(self, dotdata, prog="dot", format="xdot"): import os, tempfile from dot2tex.dotparsing import find_graphviz progs = find_graphviz() if progs is None: logger.warning("GraphViz executables not found.") return None if not progs.has_key(prog): ...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 6; 2, function_name:format; 3, parameters; 3, 4; 3, 5; 4, identifier:file_metrics; 5, identifier:build_metrics; 6, block; 6, 7; 6, 132; 6, 141; 6, 150; 6, 157; 6, 226; 6, 245; 6, 250; 6, 276; 7, function_definition; 7, 8; 7, 9; 7, 14; 8, function_name:indent; 9, p...
def format(file_metrics, build_metrics): def indent(elem, level=0): i = "\n" + level*" " if len(elem): if not elem.text or not elem.text.strip(): elem.text = i + " " if not elem.tail or not elem.tail.strip(): elem.tail = i for ele...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 8; 2, function_name:governor; 3, parameters; 3, 4; 3, 5; 3, 6; 3, 7; 4, identifier:self; 5, identifier:Xgov; 6, identifier:Pgov; 7, identifier:Vgov; 8, block; 8, 9; 8, 15; 8, 25; 8, 34; 8, 52; 8, 70; 8, 77; 8, 84; 8, 91; 8, 98; 8, 105; 8, 112; 8, 119; 8, 126; 8, 1...
def governor(self, Xgov, Pgov, Vgov): governors = self.governors omegas = 2 * pi * self.freq F = zeros(Xgov.shape) typ1 = [g.generator._i for g in governors if g.model == CONST_POWER] typ2 = [g.generator._i for g in governors if g.model == GENERAL_IEEE] F[typ1, 0] = 0 ...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 9; 2, function_name:generator; 3, parameters; 3, 4; 3, 5; 3, 6; 3, 7; 3, 8; 4, identifier:self; 5, identifier:Xgen; 6, identifier:Xexc; 7, identifier:Xgov; 8, identifier:Vgen; 9, block; 9, 10; 9, 16; 9, 26; 9, 35; 9, 51; 9, 67; 9, 74; 9, 81; 9, 96; 9, 111; 9, 118;...
def generator(self, Xgen, Xexc, Xgov, Vgen): generators = self.dyn_generators omegas = 2 * pi * self.freq F = zeros(Xgen.shape) typ1 = [g._i for g in generators if g.model == CLASSICAL] typ2 = [g._i for g in generators if g.model == FOURTH_ORDER] omega = Xgen[typ1, 1] ...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 7; 2, function_name:_const_pf_constraints; 3, parameters; 3, 4; 3, 5; 3, 6; 4, identifier:self; 5, identifier:gn; 6, identifier:base_mva; 7, block; 7, 8; 7, 41; 7, 51; 7, 58; 7, 65; 7, 80; 7, 95; 7, 110; 7, 125; 7, 140; 7, 166; 7, 182; 7, 209; 7, 325; 8, expressio...
def _const_pf_constraints(self, gn, base_mva): ivl = array([i for i, g in enumerate(gn) if g.is_load and (g.q_min != 0.0 or g.q_max != 0.0)]) vl = [gn[i] for i in ivl] nvl = len(vl) ng = len(gn) Pg = array([g.p for g in vl]) / base_mva Qg = array([g.q...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 7; 2, function_name:_voltage_angle_diff_limit; 3, parameters; 3, 4; 3, 5; 3, 6; 4, identifier:self; 5, identifier:buses; 6, identifier:branches; 7, block; 7, 8; 7, 15; 7, 300; 8, expression_statement; 8, 9; 9, assignment; 9, 10; 9, 11; 10, identifier:nb; 11, call;...
def _voltage_angle_diff_limit(self, buses, branches): nb = len(buses) if not self.ignore_ang_lim: iang = [i for i, b in enumerate(branches) if (b.ang_min and (b.ang_min > -360.0)) or (b.ang_max and (b.ang_max < 360.0))] iangl = array([i for...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 5; 2, function_name:_clipPrices; 3, parameters; 3, 4; 4, identifier:self; 5, block; 5, 6; 5, 38; 5, 70; 5, 107; 5, 144; 6, if_statement; 6, 7; 6, 10; 7, attribute; 7, 8; 7, 9; 8, identifier:self; 9, identifier:guaranteeOfferPrice; 10, block; 10, 11; 11, for_statem...
def _clipPrices(self): if self.guaranteeOfferPrice: for offer in self.offers: if offer.accepted and offer.clearedPrice < offer.price: offer.clearedPrice = offer.price if self.guaranteeBidPrice: for bid in self.bids: if bid.accep...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 9; 2, function_name:gpu_iuwt_decomposition; 3, parameters; 3, 4; 3, 5; 3, 6; 3, 7; 3, 8; 4, identifier:in1; 5, identifier:scale_count; 6, identifier:scale_adjust; 7, identifier:store_smoothed; 8, identifier:store_on_gpu; 9, block; 9, 10; 9, 16; 9, 40; 9, 49; 9, 74...
def gpu_iuwt_decomposition(in1, scale_count, scale_adjust, store_smoothed, store_on_gpu): ker = SourceModule( ) wavelet_filter = (1./16)*np.array([1,4,6,4,1], dtype=np.float32) wavelet_filter = gpuarray.to_gpu_async(wavelet_filter) detail_coeffs = gpuarray.empty([scale_count-scale_adjust, in1.shape[0], ...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 8; 2, function_name:gpu_iuwt_recomposition; 3, parameters; 3, 4; 3, 5; 3, 6; 3, 7; 4, identifier:in1; 5, identifier:scale_adjust; 6, identifier:store_on_gpu; 7, identifier:smoothed_array; 8, block; 8, 9; 8, 33; 8, 42; 8, 52; 8, 97; 8, 121; 8, 130; 8, 143; 8, 149; ...
def gpu_iuwt_recomposition(in1, scale_adjust, store_on_gpu, smoothed_array): wavelet_filter = (1./16)*np.array([1,4,6,4,1], dtype=np.float32) wavelet_filter = gpuarray.to_gpu_async(wavelet_filter) max_scale = in1.shape[0] + scale_adjust if smoothed_array is None: recomposition = gpuarray.zeros([...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 5; 2, function_name:from_config; 3, parameters; 3, 4; 4, identifier:config; 5, block; 5, 6; 5, 10; 5, 18; 5, 172; 6, expression_statement; 6, 7; 7, assignment; 7, 8; 7, 9; 8, identifier:matrix; 9, dictionary; 10, expression_statement; 10, 11; 11, assignment; 11, 1...
def from_config(config): matrix = {} variables = config.keys() for entries in product(*config.values()): combination = dict(zip(variables, entries)) include = True for value in combination.values(): for reducer in value.reducers: if reducer.pattern == '-':...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 71; 2, function_name:moresane_by_scale; 3, parameters; 3, 4; 3, 5; 3, 8; 3, 11; 3, 14; 3, 17; 3, 20; 3, 23; 3, 26; 3, 29; 3, 32; 3, 35; 3, 38; 3, 41; 3, 44; 3, 47; 3, 50; 3, 53; 3, 56; 3, 59; 3, 62; 3, 65; 3, 68; 4, identifier:self; 5, default_parameter; 5, 6; 5, ...
def moresane_by_scale(self, start_scale=1, stop_scale=20, subregion=None, sigma_level=4, loop_gain=0.1, tolerance=0.75, accuracy=1e-6, major_loop_miter=100, minor_loop_miter=30, all_on_gpu=False, decom_mode="ser", core_count=1, conv_device='cpu', conv_mode='linear', e...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 16; 2, function_name:load_or_create; 3, parameters; 3, 4; 3, 5; 3, 8; 3, 11; 3, 14; 4, identifier:cls; 5, default_parameter; 5, 6; 5, 7; 6, identifier:filename; 7, None; 8, default_parameter; 8, 9; 8, 10; 9, identifier:no_input; 10, False; 11, default_parameter; 1...
def load_or_create(cls, filename=None, no_input=False, create_new=False, **kwargs): parser = argparse.ArgumentParser() parser.add_argument('--no_input', action='store_true') parser.add_argument('--create_new', action='store_true') args = parser.parse_args() if args.no_input: ...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 5; 2, function_name:cleanup; 3, parameters; 3, 4; 4, identifier:self; 5, block; 5, 6; 5, 12; 5, 24; 5, 45; 5, 54; 5, 66; 5, 75; 5, 97; 5, 106; 5, 123; 5, 132; 5, 154; 5, 163; 5, 190; 6, expression_statement; 6, 7; 7, assignment; 7, 8; 7, 11; 8, attribute; 8, 9; 8,...
def cleanup(self): self.pre_exit_trigger = True self.logger.info("Shutting down %s, please wait a moment.", self.name) for t in threading.enumerate(): if isinstance(t, TimerClass): t.cancel() self.logger.debug('Timers cancelled') for i in self.objects:...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 8; 2, function_name:write_puml; 3, parameters; 3, 4; 3, 5; 4, identifier:self; 5, default_parameter; 5, 6; 5, 7; 6, identifier:filename; 7, string:''; 8, block; 8, 9; 8, 42; 8, 63; 8, 70; 8, 77; 8, 103; 8, 110; 8, 333; 8, 340; 9, function_definition; 9, 10; 9, 11;...
def write_puml(self, filename=''): def get_type(o): type = 'program' if isinstance(o, AbstractSensor): type = 'sensor' elif isinstance(o, AbstractActuator): type = 'actuator' return type if filename: s = open(fil...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 9; 2, function_name:fit; 3, parameters; 3, 4; 3, 5; 3, 6; 4, identifier:self; 5, identifier:X; 6, default_parameter; 6, 7; 6, 8; 7, identifier:y; 8, None; 9, block; 9, 10; 9, 18; 9, 32; 9, 44; 9, 52; 9, 63; 9, 89; 9, 106; 9, 118; 9, 170; 9, 187; 9, 191; 9, 203; 9,...
def fit(self, X, y=None): N = X.shape[0] if y is None: y = np.zeros(N) self.classes = list(set(y)) self.classes.sort() self.n_classes = len(self.classes) if not self.sigma: self.sigma = median_kneighbour_distance(X) self.gamma = self.si...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 11; 2, function_name:predict_sequence; 3, parameters; 3, 4; 3, 5; 3, 6; 3, 7; 3, 8; 4, identifier:self; 5, identifier:X; 6, identifier:A; 7, identifier:pi; 8, default_parameter; 8, 9; 8, 10; 9, identifier:inference; 10, string:'smoothing'; 11, block; 11, 12; 11, 2...
def predict_sequence(self, X, A, pi, inference='smoothing'): obsll = self.predict_proba(X) T, S = obsll.shape alpha = np.zeros((T, S)) alpha[0, :] = pi for t in range(1, T): alpha[t, :] = np.dot(alpha[t-1, :], A) for s in range(S): alpha[t,...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 15; 2, function_name:add_bgcolor; 3, parameters; 3, 4; 3, 5; 3, 6; 3, 9; 3, 12; 4, identifier:self; 5, identifier:colname; 6, default_parameter; 6, 7; 6, 8; 7, identifier:cmap; 8, string:'copper'; 9, default_parameter; 9, 10; 9, 11; 10, identifier:mode; 11, string...
def add_bgcolor(self, colname, cmap='copper', mode='absmax', threshold=2): try: cmap = cmap_builder(cmap) except: pass data = self.df[colname].values if len(data) == 0: return if mode == 'clip': data = [min(x, threshold)...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 20; 2, function_name:list; 3, parameters; 3, 4; 3, 5; 3, 8; 3, 11; 3, 14; 3, 17; 4, identifier:self; 5, default_parameter; 5, 6; 5, 7; 6, identifier:filter; 7, None; 8, default_parameter; 8, 9; 8, 10; 9, identifier:type; 10, None; 11, default_parameter; 11, 12; 11...
def list(self, filter=None, type=None, sort=None, limit=None, page=None): schema = self.LIST_SCHEMA resp = self.service.list(self.base, filter, type, sort, limit, page) cs, l = self.service.decode(schema, resp, many=True, links=True) return Page(cs, l)
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 9; 2, function_name:signup_handler; 3, parameters; 3, 4; 3, 5; 3, 7; 4, identifier:remote; 5, list_splat_pattern; 5, 6; 6, identifier:args; 7, dictionary_splat_pattern; 7, 8; 8, identifier:kwargs; 9, block; 9, 10; 9, 20; 9, 27; 9, 36; 9, 45; 9, 70; 9, 79; 9, 302; ...
def signup_handler(remote, *args, **kwargs): if current_user.is_authenticated: return redirect('/') oauth_token = token_getter(remote) if not oauth_token: return redirect('/') session_prefix = token_session_key(remote.name) if not session.get(session_prefix + '_autoregister', False):...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 20; 2, function_name:list_csv; 3, parameters; 3, 4; 3, 5; 3, 8; 3, 11; 3, 14; 3, 17; 4, identifier:self; 5, default_parameter; 5, 6; 5, 7; 6, identifier:filter; 7, None; 8, default_parameter; 8, 9; 8, 10; 9, identifier:type; 10, None; 11, default_parameter; 11, 12...
def list_csv(self, filter=None, type=None, sort=None, limit=None, page=None): return self.service.list(self.base, filter, type, sort, limit, page, format='csv').text
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 12; 2, function_name:list_logdir; 3, parameters; 3, 4; 3, 5; 3, 6; 3, 9; 4, identifier:self; 5, identifier:id; 6, default_parameter; 6, 7; 6, 8; 7, identifier:filter; 8, None; 9, default_parameter; 9, 10; 9, 11; 10, identifier:sort; 11, None; 12, block; 12, 13; 12...
def list_logdir(self, id, filter=None, sort=None): schema = LogDirFileSchema() resp = self.service.list(self.base+str(id)+'/logdir/', filter, sort) return self.service.decode(schema, resp, many=True)
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 5; 2, function_name:_init_report; 3, parameters; 3, 4; 4, identifier:self; 5, block; 5, 6; 5, 12; 5, 18; 6, expression_statement; 6, 7; 7, assignment; 7, 8; 7, 11; 8, attribute; 8, 9; 8, 10; 9, identifier:self; 10, identifier:sections; 11, list:[]; 12, expression_...
def _init_report(self): self.sections = [] self.section_names = [] try: if os.path.isdir(self.directory) is False: if self.verbose: print("Created directory {}".format(self.directory)) os.mkdir(self.directory) for this i...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 5; 2, function_name:check_instance; 3, parameters; 3, 4; 4, identifier:function; 5, block; 5, 6; 5, 271; 6, function_definition; 6, 7; 6, 8; 6, 14; 7, function_name:wrapper; 8, parameters; 8, 9; 8, 10; 8, 12; 9, identifier:self; 10, list_splat_pattern; 10, 11; 11,...
def check_instance(function): def wrapper(self, *args, **kwargs): func_trans = { "commit": manager.Manager, "compare_config": manager.Manager, "commit_check": manager.Manager, "device_info": manager.Manager, "diff_config...
0, module; 0, 1; 1, function_definition; 1, 2; 1, 3; 1, 23; 2, function_name:commit; 3, parameters; 3, 4; 3, 5; 3, 8; 3, 11; 3, 14; 3, 17; 3, 20; 4, identifier:self; 5, default_parameter; 5, 6; 5, 7; 6, identifier:commands; 7, string:""; 8, default_parameter; 8, 9; 8, 10; 9, identifier:confirmed; 10, None; 11, default_...
def commit(self, commands="", confirmed=None, comment=None, at_time=None, synchronize=False, req_format='text'): if not commands: commands = 'annotate system ""' clean_cmds = [] for cmd in clean_lines(commands): clean_cmds.append(cmd) self.lock() ...