hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0760aecd744d04b7a42ae02e90ca8b423ee0a619
| 2,834
|
py
|
Python
|
ucscsdk/mometa/storage/StorageScsiLunRef.py
|
parag-may4/ucscsdk
|
2ea762fa070330e3a4e2c21b46b157469555405b
|
[
"Apache-2.0"
] | 9
|
2016-12-22T08:39:25.000Z
|
2019-09-10T15:36:19.000Z
|
ucscsdk/mometa/storage/StorageScsiLunRef.py
|
parag-may4/ucscsdk
|
2ea762fa070330e3a4e2c21b46b157469555405b
|
[
"Apache-2.0"
] | 10
|
2017-01-31T06:59:56.000Z
|
2021-11-09T09:14:37.000Z
|
ucscsdk/mometa/storage/StorageScsiLunRef.py
|
parag-may4/ucscsdk
|
2ea762fa070330e3a4e2c21b46b157469555405b
|
[
"Apache-2.0"
] | 13
|
2016-11-14T07:42:58.000Z
|
2022-02-10T17:32:05.000Z
|
"""This module contains the general information for StorageScsiLunRef ManagedObject."""
from ...ucscmo import ManagedObject
from ...ucsccoremeta import UcscVersion, MoPropertyMeta, MoMeta
from ...ucscmeta import VersionMeta
class StorageScsiLunRefConsts():
pass
class StorageScsiLunRef(ManagedObject):
"""This is StorageScsiLunRef class."""
consts = StorageScsiLunRefConsts()
naming_props = set([u'id'])
mo_meta = MoMeta("StorageScsiLunRef", "storageScsiLunRef", "scsi-lun-ref-[id]", VersionMeta.Version131a, "InputOutput", 0x1f, [], ["read-only"], [u'storageLunReplica', u'storageLunSnapshot', u'storageScsiLun', u'storageVirtualDrive'], [], ["Get"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version131a, MoPropertyMeta.INTERNAL, None, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, 0x2, 0, 256, None, [], []),
"id": MoPropertyMeta("id", "id", "uint", VersionMeta.Version131a, MoPropertyMeta.NAMING, 0x4, None, None, None, [], []),
"ls_dn": MoPropertyMeta("ls_dn", "lsDn", "string", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"lun_name": MoPropertyMeta("lun_name", "lunName", "string", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, None, None, None, r"""[\-\.:_a-zA-Z0-9]{0,16}""", [], []),
"pn_dn": MoPropertyMeta("pn_dn", "pnDn", "string", VersionMeta.Version141a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"profile_dn": MoPropertyMeta("profile_dn", "profileDn", "string", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version131a, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"childAction": "child_action",
"dn": "dn",
"id": "id",
"lsDn": "ls_dn",
"lunName": "lun_name",
"pnDn": "pn_dn",
"profileDn": "profile_dn",
"rn": "rn",
"status": "status",
}
def __init__(self, parent_mo_or_dn, id, **kwargs):
self._dirty_mask = 0
self.id = id
self.child_action = None
self.ls_dn = None
self.lun_name = None
self.pn_dn = None
self.profile_dn = None
self.status = None
ManagedObject.__init__(self, "StorageScsiLunRef", parent_mo_or_dn, **kwargs)
| 50.607143
| 251
| 0.642202
| 301
| 2,834
| 5.887043
| 0.315615
| 0.111738
| 0.162528
| 0.165914
| 0.235892
| 0.176072
| 0.119639
| 0.069977
| 0.069977
| 0.069977
| 0
| 0.03223
| 0.178899
| 2,834
| 55
| 252
| 51.527273
| 0.729265
| 0.040226
| 0
| 0
| 0
| 0.04878
| 0.24003
| 0.069055
| 0
| 0
| 0.006278
| 0
| 0
| 1
| 0.02439
| false
| 0.02439
| 0.073171
| 0
| 0.268293
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0765d0b1f7f6046c9a5ec38c71317e234a345a45
| 270
|
py
|
Python
|
pyrocco/__init__.py
|
joaopalmeiro/pyrocco
|
4144f56d654500c3ec49cb04c06b98296004eafe
|
[
"MIT"
] | null | null | null |
pyrocco/__init__.py
|
joaopalmeiro/pyrocco
|
4144f56d654500c3ec49cb04c06b98296004eafe
|
[
"MIT"
] | 4
|
2021-05-31T16:44:16.000Z
|
2021-05-31T17:08:04.000Z
|
pyrocco/__init__.py
|
joaopalmeiro/pyrocco
|
4144f56d654500c3ec49cb04c06b98296004eafe
|
[
"MIT"
] | null | null | null |
__package_name__ = "pyrocco"
__version__ = "0.1.0"
__author__ = "João Palmeiro"
__author_email__ = "[email protected]"
__description__ = "A Python CLI to add the Party Parrot to a custom background image."
__url__ = "https://github.com/joaopalmeiro/pyrocco"
| 38.571429
| 86
| 0.766667
| 38
| 270
| 4.763158
| 0.842105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012552
| 0.114815
| 270
| 6
| 87
| 45
| 0.74477
| 0
| 0
| 0
| 0
| 0
| 0.588889
| 0.107407
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0768b4de117d71513a10b4439456e7226bc8f05f
| 850
|
py
|
Python
|
2020/day08/machine.py
|
ingjrs01/adventofcode
|
c5e4f0158dac0efc2dbfc10167f2700693b41fea
|
[
"Apache-2.0"
] | null | null | null |
2020/day08/machine.py
|
ingjrs01/adventofcode
|
c5e4f0158dac0efc2dbfc10167f2700693b41fea
|
[
"Apache-2.0"
] | null | null | null |
2020/day08/machine.py
|
ingjrs01/adventofcode
|
c5e4f0158dac0efc2dbfc10167f2700693b41fea
|
[
"Apache-2.0"
] | null | null | null |
class Machine():
def __init__(self):
self.pointer = 0
self.accum = 0
self.visited = []
def run(self,program):
salir = False
while (salir == False):
if (self.pointer in self.visited):
return False
if (self.pointer >= len(program)):
return True
self.visited.append(self.pointer)
incremento = 1
if (program[self.pointer][0] == "acc"):
self.accum += program[self.pointer][1]
if (program[self.pointer][0] == "jmp"):
incremento = program[self.pointer][1]
self.pointer += incremento
return True
def getVisited(self):
return self.visited
def getAccum(self):
return self.accum
| 22.368421
| 54
| 0.483529
| 84
| 850
| 4.845238
| 0.309524
| 0.243243
| 0.176904
| 0.088452
| 0.108108
| 0.108108
| 0
| 0
| 0
| 0
| 0
| 0.013917
| 0.408235
| 850
| 37
| 55
| 22.972973
| 0.795229
| 0
| 0
| 0.083333
| 0
| 0
| 0.007059
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0
| 0.083333
| 0.416667
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
4ad5badf5fa7e630a25fb87b42b8e063138bfecd
| 495
|
py
|
Python
|
opencv/resizing.py
|
hackerman-101/Hacktoberfest-2022
|
839f28293930987da55f8a2414efaa1cf9676cc9
|
[
"MIT"
] | 1
|
2022-02-22T17:13:54.000Z
|
2022-02-22T17:13:54.000Z
|
opencv/resizing.py
|
hackerman-101/Hacktoberfest-2022
|
839f28293930987da55f8a2414efaa1cf9676cc9
|
[
"MIT"
] | 11
|
2022-01-24T20:42:11.000Z
|
2022-02-27T23:58:24.000Z
|
opencv/resizing.py
|
hackerman-101/Hacktoberfest-2022
|
839f28293930987da55f8a2414efaa1cf9676cc9
|
[
"MIT"
] | null | null | null |
import cv2 as cv
import numpy as np
cap = cv.VideoCapture(1)
print(cap.get(cv.CAP_PROP_FRAME_WIDTH))
print(cap.get(cv.CAP_PROP_FRAME_HEIGHT))
cap.set(3,3000)
cap.set(4,3000)
print(cap.get(cv.CAP_PROP_FRAME_WIDTH))
print(cap.get(cv.CAP_PROP_FRAME_HEIGHT))
while (cap.isOpened()):
ret , frame = cap.read()
if (ret == True):
cv.imshow("camVid", frame)
if cv.waitKey(25) & 0xFF == ord('q'):
break
else:
break
cap.release()
cv.destroyAllWindows()
| 18.333333
| 45
| 0.656566
| 80
| 495
| 3.9125
| 0.45
| 0.102236
| 0.140575
| 0.166134
| 0.389776
| 0.389776
| 0.389776
| 0.389776
| 0.389776
| 0.389776
| 0
| 0.037406
| 0.189899
| 495
| 26
| 46
| 19.038462
| 0.743142
| 0
| 0
| 0.315789
| 0
| 0
| 0.014141
| 0
| 0
| 0
| 0.008081
| 0
| 0
| 1
| 0
| false
| 0
| 0.105263
| 0
| 0.105263
| 0.210526
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
4ad89a5bebd4952730caed6adc03938d82e1dcd1
| 4,251
|
py
|
Python
|
src/review_scraper.py
|
ryankirkland/voice-of-the-customer
|
0214af45cc6aa76bfce64065f07c3f4781ee045e
|
[
"MIT"
] | null | null | null |
src/review_scraper.py
|
ryankirkland/voice-of-the-customer
|
0214af45cc6aa76bfce64065f07c3f4781ee045e
|
[
"MIT"
] | null | null | null |
src/review_scraper.py
|
ryankirkland/voice-of-the-customer
|
0214af45cc6aa76bfce64065f07c3f4781ee045e
|
[
"MIT"
] | null | null | null |
from bs4 import BeautifulSoup
import pandas as pd
import requests
import time
import sys
def reviews_scraper(asin_list, filename):
'''
Takes a list of asins, retrieves html for reviews page, and parses out key data points
Parameters
----------
List of ASINs (list of strings)
Returns:
-------
review information (list), reviews_df (Pandas DataFrame)
'''
asin_list = [asin_list]
print(asin_list)
reviews = []
headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0", "Accept-Encoding":"gzip, deflate", "Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", "DNT":"1","Connection":"close", "Upgrade-Insecure-Requests":"1"}
for asin in asin_list:
print(f'Collecting reviews for {asin}')
passed_last_page = None
counter = 1
while (passed_last_page == None) and (counter <= 10):
print(len(reviews))
reviews_url = f'https://www.amazon.com/product-reviews/{asin}/ref=cm_cr_arp_d_viewopt_srt?ie=UTF8&reviewerType=all_reviews&sortBy=recent&pageNumber={counter}'
print(reviews_url)
rev = requests.get(reviews_url, headers=headers)
print(rev.status_code)
reviews_page_content = rev.content
review_soup = BeautifulSoup(reviews_page_content, features='lxml')
print(review_soup)
passed_last_page = review_soup.find('div', attrs={'class': 'a-section a-spacing-top-large a-text-center no-reviews-section'})
if passed_last_page == None:
for d in review_soup.findAll('div', attrs={'data-hook':'review'}):
# print(d)
try:
date = d.find('span', attrs={'data-hook':'review-date'})
date = date.text.split(' ')[-3:]
date = ' '.join(date)
except:
date = 'null'
try:
title = d.find('a', attrs={'data-hook': 'review-title'})
except:
title = 'null'
try:
product = d.find('a', attrs={'data-hook': 'format-strip'})
product = product.text
except:
product = 'null'
try:
review_asin = product['href'].split('/')[3]
except:
review_asin = asin
try:
verified = d.find('span', attrs={'data-hook':'avp-badge'})
if verified == None:
verified = 'Not Verified'
else:
verified = verified.text
except:
verified = 'null'
try:
description = d.find('span', attrs={'data-hook': 'review-body'})
except:
description = 'null'
try:
reviewer_name = d.find('span', attrs={'class': 'a-profile-name'})
except:
reviewer_name = 'null'
try:
stars = d.find('span', attrs={'class': 'a-icon-alt'})
except:
stars = 'null'
reviews.append([review_asin, product, date, verified, title.text, description.text, reviewer_name.text, float(stars.text[0:3])])
else:
pass
counter += 1
time.sleep(15)
reviews_df = pd.DataFrame(reviews, columns=['asin','product','date', 'verified', 'title', 'desc', 'reviewer_name', 'rating'])
reviews_df.to_csv(f'data/reviews/{filename}')
print(f'{len(reviews)} reviews for {len(asin_list)} asins stored successfully in {filename}')
return reviews, reviews_df
if __name__ == '__main__':
reviews_scraper(*sys.argv[1:])
| 42.089109
| 285
| 0.482945
| 428
| 4,251
| 4.670561
| 0.376168
| 0.017509
| 0.03902
| 0.035018
| 0.106053
| 0.078039
| 0.028014
| 0
| 0
| 0
| 0
| 0.016304
| 0.394025
| 4,251
| 101
| 286
| 42.089109
| 0.759705
| 0.054575
| 0
| 0.236842
| 0
| 0.039474
| 0.21608
| 0.027889
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013158
| false
| 0.065789
| 0.065789
| 0
| 0.092105
| 0.092105
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
4adace3be34277664a2e8a315913402feb463667
| 3,788
|
py
|
Python
|
lumberdata/metadata.py
|
cglumberjack/lumber_metadata
|
aebca5dbecb8d7684b1b169bf2961e4ab0daca2b
|
[
"MIT"
] | null | null | null |
lumberdata/metadata.py
|
cglumberjack/lumber_metadata
|
aebca5dbecb8d7684b1b169bf2961e4ab0daca2b
|
[
"MIT"
] | null | null | null |
lumberdata/metadata.py
|
cglumberjack/lumber_metadata
|
aebca5dbecb8d7684b1b169bf2961e4ab0daca2b
|
[
"MIT"
] | null | null | null |
# noinspection PyUnresolvedReferences
import os
import re
# TODO I'm going to need to make a dictionary for my big list of stuff i care about and what's needed for
# every file type....
RAF = ['EXIF:LensModel', 'MakerNotes:RawImageHeight', 'MakerNotes:RawImageWidth', 'EXIF:CreateDate', 'EXIF:ModifyDate',
'EXIF:SerialNumber', 'Composite:Aperture', 'EXIF:FocalLength', 'EXIF:Make', 'EXIF:Model', 'EXIF:LensMake']
MOV = ['EXIF:LensModel', 'MakerNotes:RawImageHeight', 'MakerNotes:RawImageWidth', 'EXIF:CreateDate', 'EXIF:ModifyDate',
'EXIF:SerialNumber', 'Composite:Aperture', 'EXIF:FocalLength', 'EXIF:Make', 'EXIF:Model', 'EXIF:LensMake',
'QuickTime:VideoFrameRate', 'QuickTime:Duration']
R3D = ['ClipName', 'EdgeTC', 'EndEdgeTC', 'TotalFrames', 'FrameHeight', 'FrameWidth', 'Aperture', 'ISO', 'Date',
'AudioSlate', 'VideoSlate', 'Camera', 'CameraModel', 'CameraPIN', 'MediaSerialNumber', 'LensSerialNumber',
'FPS', 'AspectRatio', 'Kelvin', 'LensName', 'LensBrand', 'FocalLength', 'Shutter(deg)', 'SensorID', 'SensorName',
'Take']
def check_exiftool():
"""
checks if exiftool is installed.
:return:
"""
pass
def check_redline():
"""
checks if redline is installed
:return:
"""
pass
def check_ffprobe():
"""
checks if ffprobe is installed
:return:
"""
pass
def get(filein, tool='exiftool', print_output=False):
"""
Due to issues with the exiftool module this is provided as a way to parse output directly
from exiftool through the system commands and cglexecute. For the moment it's only designed
to get the lumberdata for a single file.
:param filein:
:return: dictionary containing lumberdata from exiftool
"""
ext = os.path.splitext(filein)[-1]
d = {}
if tool == 'exiftool':
command = r'exiftool %s' % filein
output = cgl_execute(command=command, verbose=False, print_output=print_output)
for each in output['printout']:
key, value = re.split("\s+:\s+", each)
d[key] = value
return d
elif tool == 'ffprobe':
command = r'%s %s' % ('ffprobe', filein)
output = cgl_execute(command=command)
for each in output['printout']:
try:
values = re.split(":\s+", each)
key = values[0]
values.pop(0)
if 'Stream' in key:
split_v = values[1].split(',')
d['Image Size'] = split_v[2].split()[0]
d['Source Image Width'], d['Source Image Height'] = d['Image Size'].split('x')
d['Video Frame Rate'] = split_v[4].split(' fps')[0].replace(' ', '')
if 'Duration' in key:
d['Track Duration'] = '%s s' % values[0].split(',')[0]
value = ' '.join(values)
d[key] = value
except ValueError:
print('skipping %s' % each)
return d
def get_red_data(filein):
"""
method for pulling lumberdata from r3d files. REDLINE is a command line interface from RED that is required
for this
https://www.red.com/downloads/options?itemInternalId=16144
:param filein:
:return:
"""
file_, ext_ = os.path.splitext(filein)
if ext_.upper() == '.R3D':
command = r'REDLINE --i %s --printMeta 1' % filein
d = {}
for line in os.popen(command).readlines():
line = line.strip('\n')
line = line.replace('\t', '')
line = line.replace(' ', '')
try:
key_, value = line.split(':', 1)
if key_ != 'None':
d[key_] = value
except ValueError:
pass
return d
| 35.735849
| 120
| 0.573654
| 429
| 3,788
| 5.020979
| 0.414918
| 0.01857
| 0.023677
| 0.029248
| 0.290622
| 0.213556
| 0.153203
| 0.153203
| 0.153203
| 0.153203
| 0
| 0.007361
| 0.282735
| 3,788
| 105
| 121
| 36.07619
| 0.785425
| 0.205385
| 0
| 0.269841
| 0
| 0
| 0.299619
| 0.042258
| 0
| 0
| 0
| 0.009524
| 0
| 1
| 0.079365
| false
| 0.063492
| 0.031746
| 0
| 0.15873
| 0.095238
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
4adecc45d925a985d290d61ac2e4d5096ee82755
| 3,057
|
py
|
Python
|
jug/subcommands/demo.py
|
rdenham/jug
|
40925445a5f96f9eec237de37e46e6fabcce6526
|
[
"MIT"
] | 309
|
2015-02-09T09:33:52.000Z
|
2022-03-26T22:30:18.000Z
|
jug/subcommands/demo.py
|
zhaoxiugao/jug
|
9c5e3930777658699bc9579c872a010a7c3bffe3
|
[
"MIT"
] | 61
|
2015-01-25T18:11:14.000Z
|
2020-10-15T06:52:13.000Z
|
jug/subcommands/demo.py
|
zhaoxiugao/jug
|
9c5e3930777658699bc9579c872a010a7c3bffe3
|
[
"MIT"
] | 51
|
2015-01-25T17:40:31.000Z
|
2022-02-28T20:42:42.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2017, Luis Pedro Coelho <[email protected]>
# vim: set ts=4 sts=4 sw=4 expandtab smartindent:
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from . import SubCommand
__all__ = ['DemoCommand']
class DemoCommand(SubCommand):
'''Create demo directory.
'''
name = "demo"
def run(self, *args, **kwargs):
import os
from os import path
print('''
Jug will create a directory called 'jug-demo/' with a file called 'primes.py'
inside.
You can test jug by switching to that directory and running the commands:
jug status primes.py
followed by
jug execute primes.py
Upon termination of the process, results will be in a file called 'output.txt'.
PARALLEL USAGE
You can speed up the process by running several 'jug execute' in parallel:
jug execute primes.py &
jug execute primes.py &
jug execute primes.py &
jug execute primes.py &
TROUBLE SHOOTING:
Should you run into issues, you can run the internal tests for jug with
jug test-jug
FURTHER READING
The online documentation contains further reading. You can read the next
tutorial here:
http://jug.readthedocs.io/en/latest/decrypt-example.html
''')
if path.exists('jug-demo'):
print("Jug-demo previously created")
return
os.mkdir('jug-demo')
output = open('jug-demo/primes.py', 'wt')
output.write(r'''
from time import sleep
from jug import TaskGenerator
@TaskGenerator
def is_prime(n):
sleep(1.)
for j in range(2, n - 1):
if (n % j) == 0:
return False
return True
@TaskGenerator
def count_primes(ps):
return sum(ps)
@TaskGenerator
def write_output(n):
output = open('output.txt', 'wt')
output.write("Found {0} primes <= 100.\n".format(n))
output.close()
primes100 = []
for n in range(2, 101):
primes100.append(is_prime(n))
n_primes = count_primes(primes100)
write_output(n_primes)
''')
output.close()
demo = DemoCommand()
| 26.815789
| 80
| 0.700687
| 448
| 3,057
| 4.754464
| 0.473214
| 0.041315
| 0.037559
| 0.042254
| 0.033803
| 0.033803
| 0.033803
| 0.033803
| 0.033803
| 0.033803
| 0
| 0.012018
| 0.210664
| 3,057
| 113
| 81
| 27.053097
| 0.8707
| 0.394832
| 0
| 0.183333
| 0
| 0
| 0.771256
| 0.040592
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016667
| false
| 0
| 0.083333
| 0
| 0.2
| 0.033333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
4af181be525d8e8daf1ffbab71cb2d90c60d3216
| 597
|
py
|
Python
|
EP_2019/py_impl/main.py
|
Alisa-lisa/conferences
|
d93014747dc9d18493295dbc33fa51c8fb9467dc
|
[
"MIT"
] | 5
|
2019-07-06T07:22:57.000Z
|
2020-12-19T22:49:35.000Z
|
EP_2019/py_impl/main.py
|
pindash/conferences
|
87fcb9f595a244408c015c66283c337d124b358d
|
[
"MIT"
] | null | null | null |
EP_2019/py_impl/main.py
|
pindash/conferences
|
87fcb9f595a244408c015c66283c337d124b358d
|
[
"MIT"
] | 3
|
2020-06-07T14:58:24.000Z
|
2020-11-24T22:51:14.000Z
|
from simulation.car import spawn_drivers
from simulation.passenger import spawn_passengers
from simulation.core import World, Clock
conf = {
"x": 100,
"y": 100,
"drivers": 200,
"users": 1000,
"start": "2019-07-08T00:00:00",
"end": "2019-07-08T00:01:00"
}
clock = Clock(conf["start"], conf["end"])
if __name__ == '__main__':
world = World([conf['x'], conf['y']], clock=clock)
world.register_drivers(spawn_drivers(conf["drivers"], conf['x'], conf['y']))
world.register_passengers(spawn_passengers(conf["users"], conf['x'], conf['y']))
world.run(log=False)
| 28.428571
| 84
| 0.649916
| 82
| 597
| 4.560976
| 0.390244
| 0.053476
| 0.072193
| 0.080214
| 0.080214
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081188
| 0.154104
| 597
| 20
| 85
| 29.85
| 0.659406
| 0
| 0
| 0
| 0
| 0
| 0.157454
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.117647
| 0.176471
| 0
| 0.176471
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
4af19e16fcec726156bfcc2b3d41a671e651e34c
| 795
|
py
|
Python
|
Python/reverse_with_swap.py
|
avulaankith/Python
|
71269b1a36b45150edb7834c559386a91618e723
|
[
"MIT"
] | null | null | null |
Python/reverse_with_swap.py
|
avulaankith/Python
|
71269b1a36b45150edb7834c559386a91618e723
|
[
"MIT"
] | null | null | null |
Python/reverse_with_swap.py
|
avulaankith/Python
|
71269b1a36b45150edb7834c559386a91618e723
|
[
"MIT"
] | 1
|
2021-08-14T13:24:11.000Z
|
2021-08-14T13:24:11.000Z
|
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'reverse_words_order_and_swap_cases' function below.
#
# The function is expected to return a STRING.
# The function accepts STRING sentence as parameter.
#
def reverse_words_order_and_swap_cases(sentence):
# Write your code here
l = []
st = ""
for i in sentence:
if i == " ":
l.append(st)
st = ""
else:
st += i.swapcase()
# continue
l.append(st)
st = ""
l.reverse()
news = ""
for i in range(len(l)):
if i != (len(l) - 1):
news += l[i] + " "
else:
news += l[i]
return news
sentence = input()
news = reverse_words_order_and_swap_cases(sentence)
print(news)
| 18.488372
| 67
| 0.566038
| 105
| 795
| 4.142857
| 0.466667
| 0.082759
| 0.117241
| 0.137931
| 0.236782
| 0.236782
| 0.170115
| 0
| 0
| 0
| 0
| 0.003711
| 0.322013
| 795
| 42
| 68
| 18.928571
| 0.80334
| 0.257862
| 0
| 0.259259
| 0
| 0
| 0.003442
| 0
| 0
| 0
| 0
| 0.02381
| 0
| 1
| 0.037037
| false
| 0
| 0.185185
| 0
| 0.259259
| 0.037037
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
4af2b457e2a07435b2f1cbc51394d14794b7cb2f
| 294
|
py
|
Python
|
creeds/static/api1.py
|
MaayanLab/creeds
|
7d580c91ca45c03e34bbc0d1928668f266ff13d9
|
[
"CC0-1.0"
] | 2
|
2019-01-10T18:10:45.000Z
|
2019-04-05T13:47:01.000Z
|
creeds/static/api1.py
|
MaayanLab/creeds
|
7d580c91ca45c03e34bbc0d1928668f266ff13d9
|
[
"CC0-1.0"
] | 1
|
2019-05-09T21:25:31.000Z
|
2019-05-13T14:26:30.000Z
|
creeds/static/api1.py
|
MaayanLab/creeds
|
7d580c91ca45c03e34bbc0d1928668f266ff13d9
|
[
"CC0-1.0"
] | 2
|
2018-12-21T23:59:27.000Z
|
2019-10-24T18:26:26.000Z
|
import json, requests
from pprint import pprint
CREEDS_URL = 'http://amp.pharm.mssm.edu/CREEDS/'
response = requests.get(CREEDS_URL + 'search', params={'q':'STAT3'})
if response.status_code == 200:
pprint(response.json())
json.dump(response.json(), open('api1_result.json', 'wb'), indent=4)
| 32.666667
| 69
| 0.721088
| 43
| 294
| 4.837209
| 0.674419
| 0.086538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022556
| 0.095238
| 294
| 8
| 70
| 36.75
| 0.759399
| 0
| 0
| 0
| 0
| 0
| 0.214286
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.285714
| 0
| 0.285714
| 0.285714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
4af59f537fb6e3fa8f98dad4df206983a8ca37fd
| 3,651
|
py
|
Python
|
gengine/app/tests_old/test_groups.py
|
greck2908/gamification-engine
|
4a74086bde4505217e4b9ba36349a427a7042b4b
|
[
"MIT"
] | 347
|
2015-03-03T14:25:59.000Z
|
2022-03-09T07:46:31.000Z
|
gengine/app/tests_old/test_groups.py
|
greck2908/gamification-engine
|
4a74086bde4505217e4b9ba36349a427a7042b4b
|
[
"MIT"
] | 76
|
2015-03-05T23:37:31.000Z
|
2022-03-31T13:41:42.000Z
|
gengine/app/tests_old/test_groups.py
|
greck2908/gamification-engine
|
4a74086bde4505217e4b9ba36349a427a7042b4b
|
[
"MIT"
] | 115
|
2015-03-04T23:47:25.000Z
|
2021-12-24T06:24:06.000Z
|
# -*- coding: utf-8 -*-
from gengine.app.tests.base import BaseDBTest
from gengine.app.tests.helpers import create_user, update_user, delete_user, get_or_create_language
from gengine.metadata import DBSession
from gengine.app.model import AuthUser
class TestUserCreation(BaseDBTest):
def test_user_creation(self):
lang = get_or_create_language("en")
user = create_user(
lat = 12.1,
lng = 12.2,
#country = "RO",
#region = "Transylvania",
#city = "Cluj-Napoca",
timezone = "Europe/Bukarest",
language = "en",
additional_public_data = {
"first_name" : "Rudolf",
"last_name" : "Red Nose"
}
)
self.assertTrue(user.lat == 12.1)
self.assertTrue(user.lng == 12.2)
#self.assertTrue(user.country == "RO")
#self.assertTrue(user.region == "Transylvania")
#self.assertTrue(user.city == "Cluj-Napoca")
self.assertTrue(user.timezone == "Europe/Bukarest")
self.assertTrue(user.language_id == lang.id)
self.assertTrue(user.additional_public_data["first_name"] == "Rudolf")
self.assertTrue(user.additional_public_data["last_name"] == "Red Nose")
def test_user_updation(self):
lang = get_or_create_language("en")
user = create_user()
user = update_user(
user_id = user.id,
lat = 14.2,
lng = 16.3,
#country = "EN",
#region = "Transylvania",
#city = "Cluj-Napoca",
timezone = "Europe/Bukarest",
language = "en",
additional_public_data = {
"first_name" : "Rudolf",
"last_name" : "Red Nose"
}
)
# Correct cases
self.assertTrue(user.lat == 14.2)
self.assertTrue(user.lng == 16.3)
#self.assertTrue(user.country == "EN")
#self.assertTrue(user.region == "Transylvania")
#self.assertTrue(user.city == "Cluj-Napoca")
self.assertTrue(user.timezone == "Europe/Bukarest")
self.assertTrue(user.language_id == lang.id)
def test_user_deletion(self):
user1 = create_user()
# Create Second user
user2 = create_user(
lat=85.59,
lng=65.75,
#country="DE",
#region="Niedersachsen",
#city="Osnabrück",
timezone="Europe/Berlin",
language="de",
additional_public_data={
"first_name": "Michael",
"last_name": "Clarke"
},
friends=[1]
)
remaining_users = delete_user(
user_id = user1.id
)
# Correct cases
self.assertNotIn(user1.id, remaining_users)
self.assertEqual(user2.id, remaining_users[0].id)
def test_verify_password(self):
auth_user = AuthUser()
auth_user.password = "test12345"
auth_user.active = True
auth_user.email = "[email protected]"
DBSession.add(auth_user)
iscorrect = auth_user.verify_password("test12345")
self.assertEqual(iscorrect, True)
def test_create_token(self):
user = create_user()
auth_user = AuthUser()
auth_user.user_id = user.id
auth_user.password = "test12345"
auth_user.active = True
auth_user.email = "[email protected]"
DBSession.add(auth_user)
if auth_user.verify_password("test12345"):
token = auth_user.get_or_create_token()
self.assertNotEqual(token, None)
| 29.92623
| 99
| 0.564229
| 388
| 3,651
| 5.118557
| 0.25
| 0.11279
| 0.145015
| 0.050352
| 0.517623
| 0.437563
| 0.391742
| 0.391742
| 0.391742
| 0.391742
| 0
| 0.024077
| 0.317447
| 3,651
| 121
| 100
| 30.173554
| 0.772873
| 0.135032
| 0
| 0.371795
| 0
| 0
| 0.09001
| 0
| 0
| 0
| 0
| 0
| 0.179487
| 1
| 0.064103
| false
| 0.064103
| 0.051282
| 0
| 0.128205
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
ab0004198f8e66f5be455567544099aa471f9197
| 3,349
|
py
|
Python
|
modules/helper/subtitles/subtitles.py
|
sdelcore/video-event-notifier-old
|
16bd322f2b81efbb3e08e63ed407ab098d610c88
|
[
"MIT"
] | null | null | null |
modules/helper/subtitles/subtitles.py
|
sdelcore/video-event-notifier-old
|
16bd322f2b81efbb3e08e63ed407ab098d610c88
|
[
"MIT"
] | null | null | null |
modules/helper/subtitles/subtitles.py
|
sdelcore/video-event-notifier-old
|
16bd322f2b81efbb3e08e63ed407ab098d610c88
|
[
"MIT"
] | null | null | null |
import time
import srt
import re
import datetime
from mqtthandler import MQTTHandler
INIT_STATUS={
"video": {
"title": None,
"series_title": None,
"season": None,
"episode": None
},
"time": None,
"events": None
}
class SubtitleHandler:
subtitles = []
phrases = []
def __init__(self, broker):
self.mqtt = MQTTHandler(broker)
def parseSRT(self, srt_filename):
f=open(srt_filename, "r")
subtitle_generate = srt.parse(f.read())
f.close()
self.subtitles = list(subtitle_generate)
return self.subtitles
def parsePhrases(self, phrase_filename):
f=open(phrase_filename, "r")
lines = f.readlines()
for line in lines:
phrase = line.rstrip("\n\r").split("/")
self.phrases.append(phrase)
return self.phrases
def isPhraseInLine(self,phrase, sub, content):
sub_line = re.sub('[^A-Za-z0-9\s]+', '', str(content)).lower()
phrase = re.sub('[^A-Za-z0-9\s]+', '', str(phrase)).lower()
count = 0
while bool(re.search(phrase, sub_line)):
count += 1
sub_line = sub_line.replace(phrase, '', 1)
return count
def getEventTime(self,sub):
middle = sub.end - sub.start
between_sec = datetime.timedelta.total_seconds(middle) / 2
sec = between_sec + datetime.timedelta.total_seconds(sub.start)
return int(sec)
def matchEventToMovie(self, movie, subtitles, phrases, time_offset):
global INIT_STATUS
status = INIT_STATUS
status["video"]["title"] = movie
#TODO determine how to set up phrase data
for sub in subtitles:
c = sub.content.replace('\n', ' ')
c = c.split(" ")
firstpart, secondpart = " ".join(c[:len(c)//2]), " ".join(c[len(c)//2:])
mult = 0
for phrase in phrases:
line = phrase[0]
events = phrase[1]
mult += self.isPhraseInLine(line,sub,sub.content)
#f = self.isPhraseInLine(line,sub, firstpart)
#s = self.isPhraseInLine(line,sub, secondpart)
#if f + s == 0:
# mult += self.isPhraseInLine(line,sub,sub.content )
#else:
# mult += f+s
## DEAR LESS DRUNK SELF
# this currently adds the number of events over the entire subtitle
# what you need to do if you wish to accept it, is to split each subtitle into to two parts
# the first part will the the half that has the first bit of text, which will have the correct time to event for the work
# the second half will have the correct time to event gfor the second half
# you could have three if statements that check and each toher them reach a send.message()
if mult > 0: # wotn work properly if events is greater than 1
status["time"] = self.getEventTime(sub) + time_offset
status["events"] = int(events) * mult
self.sendMessage(status)
#print(sub.content)
def sendMessage(self, msg):
self.mqtt.send(msg)
print(msg)
return msg
def isDone(self):
return True
| 33.828283
| 133
| 0.561959
| 407
| 3,349
| 4.565111
| 0.371007
| 0.026911
| 0.047363
| 0.053821
| 0.142088
| 0.131324
| 0.089343
| 0.016146
| 0
| 0
| 0
| 0.007127
| 0.329651
| 3,349
| 99
| 134
| 33.828283
| 0.82049
| 0.221857
| 0
| 0
| 0
| 0
| 0.041683
| 0
| 0
| 0
| 0
| 0.010101
| 0
| 1
| 0.115942
| false
| 0
| 0.072464
| 0.014493
| 0.318841
| 0.014493
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
ab143c1e766e4bf7477a807945495619e156d263
| 729
|
py
|
Python
|
Examples/IMAP/FilteringMessagesFromIMAPMailbox.py
|
Muzammil-khan/Aspose.Email-Python-Dotnet
|
04ca3a6f440339f3ddf316218f92d15d66f24e7e
|
[
"MIT"
] | 5
|
2019-01-28T05:17:12.000Z
|
2020-04-14T14:31:34.000Z
|
Examples/IMAP/FilteringMessagesFromIMAPMailbox.py
|
Muzammil-khan/Aspose.Email-Python-Dotnet
|
04ca3a6f440339f3ddf316218f92d15d66f24e7e
|
[
"MIT"
] | 1
|
2019-01-28T16:07:26.000Z
|
2021-11-25T10:59:52.000Z
|
Examples/IMAP/FilteringMessagesFromIMAPMailbox.py
|
Muzammil-khan/Aspose.Email-Python-Dotnet
|
04ca3a6f440339f3ddf316218f92d15d66f24e7e
|
[
"MIT"
] | 6
|
2018-07-16T14:57:34.000Z
|
2020-08-30T05:59:52.000Z
|
import aspose.email
from aspose.email.clients.imap import ImapClient
from aspose.email.clients import SecurityOptions
from aspose.email.clients.imap import ImapQueryBuilder
import datetime as dt
def run():
dataDir = ""
#ExStart: FetchEmailMessageFromServer
client = ImapClient("imap.gmail.com", 993, "username", "password")
client.select_folder("Inbox")
builder = ImapQueryBuilder()
builder.subject.contains("Newsletter")
builder.internal_date.on(dt.datetime.now())
query = builder.get_query()
msgsColl = client.list_messages(query)
print("Total Messages fulfilling search criterion: " + str(len(msgsColl)))
#ExEnd: FetchEmailMessageFromServer
if __name__ == '__main__':
run()
| 31.695652
| 78
| 0.739369
| 81
| 729
| 6.506173
| 0.617284
| 0.083491
| 0.085389
| 0.125237
| 0.121442
| 0.121442
| 0
| 0
| 0
| 0
| 0
| 0.004847
| 0.150892
| 729
| 22
| 79
| 33.136364
| 0.846527
| 0.096022
| 0
| 0
| 0
| 0
| 0.147641
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0.058824
| 0.294118
| 0
| 0.352941
| 0.058824
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
ab149d0949672fc58bdb20c8bbee5cb7134e800f
| 2,363
|
py
|
Python
|
Python.FancyBear/settings.py
|
010001111/Vx-Suites
|
6b4b90a60512cce48aa7b87aec5e5ac1c4bb9a79
|
[
"MIT"
] | 2
|
2021-02-04T06:47:45.000Z
|
2021-07-28T10:02:10.000Z
|
Python.FancyBear/settings.py
|
010001111/Vx-Suites
|
6b4b90a60512cce48aa7b87aec5e5ac1c4bb9a79
|
[
"MIT"
] | null | null | null |
Python.FancyBear/settings.py
|
010001111/Vx-Suites
|
6b4b90a60512cce48aa7b87aec5e5ac1c4bb9a79
|
[
"MIT"
] | null | null | null |
# Server UID
SERVER_UID = 45158729
# Setup Logging system #########################################
#
import os
from FileConsoleLogger import FileConsoleLogger
ServerLogger = FileConsoleLogger( os.path.join(os.path.dirname(os.path.abspath(__file__)), "_w3server.log") )
W3Logger = FileConsoleLogger( os.path.join(os.path.dirname(os.path.abspath(__file__)), "_w3.log") )
#
# Setup Level 2 Protocol - P2Scheme #########################################
#
from P2Scheme import P2Scheme
P2_URL_TOKEN = '760e25f9eb3124'.decode('hex')
P2_SUBJECT_TOKEN = '\x55\xaa\x63\x68\x69\x6e\x61'
P2_DATA_TOKEN = '\x55\xaa\x63\x68\x69\x6e\x61'
# P2_DATA_TOKEN = 'd85a8c54fbe5e6'.decode('hex')
MARK = 'itwm='
B64_JUNK_LEN = 9
BIN_JUNK_LEN = 4
P2_Scheme = P2Scheme(_url_token=P2_URL_TOKEN, _data_token=P2_DATA_TOKEN, _mark=MARK, _subj_token=P2_SUBJECT_TOKEN,\
_b64junk_len=B64_JUNK_LEN, _binary_junk_len=BIN_JUNK_LEN)
#
# Setup Level 3 Protocol - P3Scheme #########################################
#
from P3Scheme import P3Scheme
#
P3_PRIVATE_TOKEN = 'a20e25f9aa3fe4'.decode('hex')
P3_SERVICE_TOKEN = '015a1354acf1b1'.decode('hex')
#
P3_Scheme = P3Scheme(private_token=P3_PRIVATE_TOKEN, service_token=P3_SERVICE_TOKEN)
#
# Setup HTTP checker
#
#from HTTPHeadersChecker import HTTPHeadersChecker
#
#HTTPChecker = HTTPHeadersChecker()
# Setup LocalStorage
#
from FSLocalStorage import FSLocalStorage
LocalStorage = FSLocalStorage()
############################################################
# Initialize Server instance #
#
#from W3Server import W3Server
#MAIN_HANDLER = W3Server(p2_scheme=P2_Scheme, p3_scheme=P3_Scheme, http_checker=HTTPChecker, local_storage=LocalStorage, logger=ServerLogger)
############################################################
# Mail Parameters
POP3_MAIL_IP = 'pop.gmail.com'
POP3_PORT = 995
POP3_ADDR = '[email protected]'
POP3_PASS = '30Jass11'
SMTP_MAIL_IP = 'smtp.gmail.com'
SMTP_PORT = 587
SMTP_TO_ADDR = '[email protected]'
SMTP_FROM_ADDR = '[email protected]'
SMTP_PASS = '75Gina75'
# C&C Parametrs
#
XAS_IP = '104.152.187.66'
XAS_GATE = '/updates/'
############################################################
# Setup P3 communication
# wsgi2
#
LS_TIMEOUT = 1 # big loop timeout
FILES_PER_ITER = 5 # count of requests per iter
############################################################
| 28.46988
| 141
| 0.650444
| 275
| 2,363
| 5.290909
| 0.418182
| 0.024742
| 0.02268
| 0.037113
| 0.125773
| 0.125773
| 0.125773
| 0.125773
| 0.125773
| 0.125773
| 0
| 0.067989
| 0.103682
| 2,363
| 82
| 142
| 28.817073
| 0.61898
| 0.241219
| 0
| 0
| 0
| 0
| 0.19341
| 0.077364
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.060606
| 0.151515
| 0
| 0.151515
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
ab1a86e3a749c305907e0a449b620a088db1db5e
| 4,070
|
py
|
Python
|
var/spack/repos/builtin/packages/py-mdanalysis/package.py
|
LiamBindle/spack
|
e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2,360
|
2017-11-06T08:47:01.000Z
|
2022-03-31T14:45:33.000Z
|
var/spack/repos/builtin/packages/py-mdanalysis/package.py
|
LiamBindle/spack
|
e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 13,838
|
2017-11-04T07:49:45.000Z
|
2022-03-31T23:38:39.000Z
|
var/spack/repos/builtin/packages/py-mdanalysis/package.py
|
LiamBindle/spack
|
e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1,793
|
2017-11-04T07:45:50.000Z
|
2022-03-30T14:31:53.000Z
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyMdanalysis(PythonPackage):
"""MDAnalysis is a Python toolkit to analyze molecular dynamics
trajectories generated by a wide range of popular simulation
packages including DL_Poly, CHARMM, Amber, NAMD, LAMMPS, and
Gromacs. (See the lists of supported trajectory formats and
topology formats.)"""
homepage = "https://www.mdanalysis.org"
pypi = "MDAnalysis/MDAnalysis-0.19.2.tar.gz"
version('1.0.0', sha256='f45a024aca45e390ff1c45ca90beb2180b78881be377e2a1aa9cd6c109bcfa81')
version('0.20.1', sha256='d04b71b193b9716d2597ffb9938b93f43487fa535da1bb5c1f2baccf356d7df9')
version('0.19.2', sha256='c5395bbafa5efca2e1aee4715d26129844140c47cb8301da0293106cb969de7d')
version('0.19.1', sha256='ff1d694f8598c0833ec340de6a6adb3b5e62b92d0fa94ee6401718ba972db3cc')
version('0.19.0', sha256='248e3b37fc6150e31c609cc18a3927c32aee37b76d29cbfedf635e7e1aa982cf')
version('0.18.0', sha256='a08acea1755112411e7db55e3f282e164b47a59e15794b38744cce6c596f252a')
version('0.17.0', sha256='9bd61760334698cc7b8a57ad26456451e926e9c9e66722594ad8816561348cde')
version('0.16.2', sha256='407d9a9ff1ab8a5e47973714d06fabff220f8d08a28792dee93e88e70e995b0a')
version('0.16.1', sha256='3dc8f5d639ab3a0d152cbd7259ae9372ec8a9bac0f8cb7d3b80ce5adc1e3ee57')
version('0.16.0', sha256='c4824fa1fddd336daa39371436187ebb023366885fb250c2827ed7fce2546bd4')
version('0.15.0', sha256='9088786048b47339cba1f8a586977bbb3bb04ae1bcd0462b59e45bda37e25533')
variant('analysis', default=True,
description='Enable analysis packages: matplotlib, scipy, seaborn')
variant('amber', default=False,
description='Support AMBER netcdf format.')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('[email protected]:', type='build')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', when='@0.17.0:', type=('build', 'run'))
depends_on('[email protected]:', when='@0.16.0:', type=('build', 'run'))
depends_on('py-mock', when='@0.18.0:', type=('build', 'run'))
depends_on('[email protected]:', when='@1.0.0:', type=('build', 'run'))
depends_on('py-joblib', when='@0.16.0:0.20.1', type=('build', 'run'))
depends_on('[email protected]:', when='@1.0.0:', type=('build', 'run'))
depends_on('[email protected]:', when='@:0.15.0', type=('build', 'run'))
depends_on('[email protected]:', when='@0.16.0:0.19.2', type=('build', 'run'))
depends_on('[email protected]:', when='@0.20.1:', type=('build', 'run'))
depends_on('[email protected]:', when='@:0.17.0', type=('build', 'run'))
depends_on('[email protected]:', when='@0.18.0:', type=('build', 'run'))
depends_on('[email protected]:', when='@:0.16.2', type=('build', 'run'))
depends_on('[email protected]:', when='@0.17.0:', type=('build', 'run'))
depends_on('py-matplotlib', when='@:0.15.0+analysis', type=('build', 'run'))
depends_on('[email protected]:', when='@0.16.0:0.16.1+analysis', type=('build', 'run'))
depends_on('[email protected]:', when='@0.16.2:', type=('build', 'run'))
depends_on('py-scipy', when='@:0.16.1+analysis', type=('build', 'run'))
depends_on('py-scipy', when='@0.16.2:0.17.0', type=('build', 'run'))
depends_on('[email protected]:', when='@0.18.0:', type=('build', 'run'))
depends_on('py-scikit-learn', when='@0.16.0:+analysis', type=('build', 'run'))
depends_on('py-seaborn', when='+analysis', type=('build', 'run'))
depends_on('[email protected]:', when='+amber', type=('build', 'run'))
depends_on('hdf5', when='+amber', type=('run'))
| 54.266667
| 96
| 0.653317
| 516
| 4,070
| 5.096899
| 0.265504
| 0.095817
| 0.108745
| 0.180608
| 0.356654
| 0.322433
| 0.313688
| 0.263878
| 0.220532
| 0.176046
| 0
| 0.193446
| 0.145209
| 4,070
| 74
| 97
| 55
| 0.562518
| 0.110811
| 0
| 0
| 0
| 0
| 0.495826
| 0.231219
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.021277
| 0
| 0.085106
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
ab1d930ad268269a2d4b9569657fc14b57b495e4
| 690
|
py
|
Python
|
lib/jbgp/jbgpneighbor.py
|
routedo/junos-pyez-example
|
b89df2d40ca0a233529e4a26b42dd605c00aae46
|
[
"Apache-2.0"
] | null | null | null |
lib/jbgp/jbgpneighbor.py
|
routedo/junos-pyez-example
|
b89df2d40ca0a233529e4a26b42dd605c00aae46
|
[
"Apache-2.0"
] | null | null | null |
lib/jbgp/jbgpneighbor.py
|
routedo/junos-pyez-example
|
b89df2d40ca0a233529e4a26b42dd605c00aae46
|
[
"Apache-2.0"
] | 1
|
2020-06-17T12:17:18.000Z
|
2020-06-17T12:17:18.000Z
|
"""
Query BGP neighbor table on a Juniper network device.
"""
import sys
from jnpr.junos import Device
from jnpr.junos.factory import loadyaml
def juniper_bgp_state(dev, bgp_neighbor):
"""
This function queries the BGP neighbor table on a Juniper network device.
dev = Juniper device connection
bgp_neighbor = IP address of BGP neighbor
return = Returns state of BGP neighbor
"""
try:
globals().update(loadyaml('yaml/bgp_neighbor.yml'))
bgp_ni = bgp_neighbor_info(dev).get(neighbor_address=bgp_neighbor)
return bgp_ni
except Exception as err:
print(err)
dev.close()
sys.exit(1)
return
return
| 23
| 77
| 0.676812
| 93
| 690
| 4.903226
| 0.483871
| 0.217105
| 0.070175
| 0.078947
| 0.171053
| 0.171053
| 0.171053
| 0.171053
| 0
| 0
| 0
| 0.001927
| 0.247826
| 690
| 29
| 78
| 23.793103
| 0.876686
| 0.349275
| 0
| 0.142857
| 0
| 0
| 0.050725
| 0.050725
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.214286
| 0
| 0.428571
| 0.071429
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
ab1fe51ebbcd4a1dc4363d8ff7260094c438deca
| 2,170
|
py
|
Python
|
lib/cherrypy/cherrypy/test/test_sessionauthenticate.py
|
MiCHiLU/google_appengine_sdk
|
3da9f20d7e65e26c4938d2c4054bc4f39cbc5522
|
[
"Apache-2.0"
] | 790
|
2015-01-03T02:13:39.000Z
|
2020-05-10T19:53:57.000Z
|
AppServer/lib/cherrypy/cherrypy/test/test_sessionauthenticate.py
|
nlake44/appscale
|
6944af660ca4cb772c9b6c2332ab28e5ef4d849f
|
[
"Apache-2.0"
] | 1,361
|
2015-01-08T23:09:40.000Z
|
2020-04-14T00:03:04.000Z
|
AppServer/lib/cherrypy/cherrypy/test/test_sessionauthenticate.py
|
nlake44/appscale
|
6944af660ca4cb772c9b6c2332ab28e5ef4d849f
|
[
"Apache-2.0"
] | 162
|
2015-01-01T00:21:16.000Z
|
2022-02-23T02:36:04.000Z
|
import cherrypy
from cherrypy.test import helper
class SessionAuthenticateTest(helper.CPWebCase):
def setup_server():
def check(username, password):
# Dummy check_username_and_password function
if username != 'test' or password != 'password':
return 'Wrong login/password'
def augment_params():
# A simple tool to add some things to request.params
# This is to check to make sure that session_auth can handle request
# params (ticket #780)
cherrypy.request.params["test"] = "test"
cherrypy.tools.augment_params = cherrypy.Tool('before_handler',
augment_params, None, priority=30)
class Test:
_cp_config = {'tools.sessions.on': True,
'tools.session_auth.on': True,
'tools.session_auth.check_username_and_password': check,
'tools.augment_params.on': True,
}
def index(self, **kwargs):
return "Hi %s, you are logged in" % cherrypy.request.login
index.exposed = True
cherrypy.tree.mount(Test())
setup_server = staticmethod(setup_server)
def testSessionAuthenticate(self):
# request a page and check for login form
self.getPage('/')
self.assertInBody('<form method="post" action="do_login">')
# setup credentials
login_body = 'username=test&password=password&from_page=/'
# attempt a login
self.getPage('/do_login', method='POST', body=login_body)
self.assertStatus((302, 303))
# get the page now that we are logged in
self.getPage('/', self.cookies)
self.assertBody('Hi test, you are logged in')
# do a logout
self.getPage('/do_logout', self.cookies, method='POST')
self.assertStatus((302, 303))
# verify we are logged out
self.getPage('/', self.cookies)
self.assertInBody('<form method="post" action="do_login">')
| 34.444444
| 82
| 0.566359
| 233
| 2,170
| 5.16309
| 0.39485
| 0.045719
| 0.027431
| 0.0399
| 0.147963
| 0.071488
| 0.071488
| 0.071488
| 0
| 0
| 0
| 0.011765
| 0.334101
| 2,170
| 62
| 83
| 35
| 0.820761
| 0.152074
| 0
| 0.176471
| 0
| 0
| 0.197044
| 0.072797
| 0
| 0
| 0
| 0
| 0.147059
| 1
| 0.147059
| false
| 0.147059
| 0.058824
| 0.029412
| 0.352941
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
ab224c0b7dd96b0783239d1ab27b2b04825a3e94
| 4,122
|
py
|
Python
|
Python/libraries/recognizers-date-time/recognizers_date_time/date_time/italian/timeperiod_extractor_config.py
|
felaray/Recognizers-Text
|
f514fd61c8d472ed92565261162712409f655312
|
[
"MIT"
] | null | null | null |
Python/libraries/recognizers-date-time/recognizers_date_time/date_time/italian/timeperiod_extractor_config.py
|
felaray/Recognizers-Text
|
f514fd61c8d472ed92565261162712409f655312
|
[
"MIT"
] | 6
|
2021-12-20T17:13:35.000Z
|
2022-03-29T08:54:11.000Z
|
Python/libraries/recognizers-date-time/recognizers_date_time/date_time/italian/timeperiod_extractor_config.py
|
felaray/Recognizers-Text
|
f514fd61c8d472ed92565261162712409f655312
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from typing import List, Pattern
from recognizers_text.utilities import RegExpUtility
from recognizers_text.extractor import Extractor
from recognizers_number.number.italian.extractors import ItalianIntegerExtractor
from ...resources.italian_date_time import ItalianDateTime
from ..extractors import DateTimeExtractor
from ..base_timeperiod import TimePeriodExtractorConfiguration, MatchedIndex
from ..base_time import BaseTimeExtractor
from ..base_timezone import BaseTimeZoneExtractor
from .time_extractor_config import ItalianTimeExtractorConfiguration
from .base_configs import ItalianDateTimeUtilityConfiguration
from .timezone_extractor_config import ItalianTimeZoneExtractorConfiguration
class ItalianTimePeriodExtractorConfiguration(TimePeriodExtractorConfiguration):
@property
def check_both_before_after(self) -> bool:
return self._check_both_before_after
@property
def simple_cases_regex(self) -> List[Pattern]:
return self._simple_cases_regex
@property
def till_regex(self) -> Pattern:
return self._till_regex
@property
def time_of_day_regex(self) -> Pattern:
return self._time_of_day_regex
@property
def general_ending_regex(self) -> Pattern:
return self._general_ending_regex
@property
def single_time_extractor(self) -> DateTimeExtractor:
return self._single_time_extractor
@property
def integer_extractor(self) -> Extractor:
return self._integer_extractor
@property
def token_before_date(self) -> str:
return self._token_before_date
@property
def pure_number_regex(self) -> List[Pattern]:
return self._pure_number_regex
@property
def time_zone_extractor(self) -> DateTimeExtractor:
return self._time_zone_extractor
def __init__(self):
super().__init__()
self._check_both_before_after = ItalianDateTime.CheckBothBeforeAfter
self._single_time_extractor = BaseTimeExtractor(
ItalianTimeExtractorConfiguration())
self._integer_extractor = ItalianIntegerExtractor()
self.utility_configuration = ItalianDateTimeUtilityConfiguration()
self._simple_cases_regex: List[Pattern] = [
RegExpUtility.get_safe_reg_exp(ItalianDateTime.PureNumFromTo),
RegExpUtility.get_safe_reg_exp(ItalianDateTime.PureNumBetweenAnd),
RegExpUtility.get_safe_reg_exp(ItalianDateTime.PmRegex),
RegExpUtility.get_safe_reg_exp(ItalianDateTime.AmRegex)
]
self._till_regex: Pattern = RegExpUtility.get_safe_reg_exp(
ItalianDateTime.TillRegex)
self._time_of_day_regex: Pattern = RegExpUtility.get_safe_reg_exp(
ItalianDateTime.TimeOfDayRegex)
self._general_ending_regex: Pattern = RegExpUtility.get_safe_reg_exp(
ItalianDateTime.GeneralEndingRegex)
self.from_regex = RegExpUtility.get_safe_reg_exp(
ItalianDateTime.FromRegex2)
self.connector_and_regex = RegExpUtility.get_safe_reg_exp(
ItalianDateTime.ConnectorAndRegex)
self.before_regex = RegExpUtility.get_safe_reg_exp(
ItalianDateTime.BeforeRegex2)
self._token_before_date = ItalianDateTime.TokenBeforeDate
self._pure_number_regex = [ItalianDateTime.PureNumFromTo, ItalianDateTime.PureNumFromTo]
self._time_zone_extractor = BaseTimeZoneExtractor(
ItalianTimeZoneExtractorConfiguration())
def get_from_token_index(self, source: str) -> MatchedIndex:
match = self.from_regex.search(source)
if match:
return MatchedIndex(True, match.start())
return MatchedIndex(False, -1)
def get_between_token_index(self, source: str) -> MatchedIndex:
match = self.before_regex.search(source)
if match:
return MatchedIndex(True, match.start())
return MatchedIndex(False, -1)
def is_connector_token(self, source: str):
return self.connector_and_regex.match(source)
| 37.816514
| 96
| 0.743328
| 419
| 4,122
| 6.959427
| 0.24105
| 0.037723
| 0.068587
| 0.078875
| 0.34808
| 0.268176
| 0.205418
| 0.141632
| 0.056927
| 0.056927
| 0
| 0.001201
| 0.191897
| 4,122
| 108
| 97
| 38.166667
| 0.874212
| 0.021834
| 0
| 0.190476
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.142857
| 0.130952
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 1
|
ab253b2fa27d701106a981880d15472309de60c1
| 2,379
|
py
|
Python
|
tests_oval_graph/test_arf_xml_parser/test_arf_xml_parser.py
|
Honny1/oval-graph
|
96472a9d2b08c2afce620c54f229ce95ad019d1f
|
[
"Apache-2.0"
] | 21
|
2019-08-01T09:09:25.000Z
|
2020-09-27T10:00:09.000Z
|
tests_oval_graph/test_arf_xml_parser/test_arf_xml_parser.py
|
Honny1/oval-graph
|
96472a9d2b08c2afce620c54f229ce95ad019d1f
|
[
"Apache-2.0"
] | 129
|
2019-08-04T19:06:24.000Z
|
2020-10-03T10:02:26.000Z
|
tests_oval_graph/test_arf_xml_parser/test_arf_xml_parser.py
|
Honny1/oval-graph
|
96472a9d2b08c2afce620c54f229ce95ad019d1f
|
[
"Apache-2.0"
] | 11
|
2019-08-07T08:53:54.000Z
|
2020-10-02T22:02:38.000Z
|
from pathlib import Path
import pytest
from oval_graph.arf_xml_parser.arf_xml_parser import ARFXMLParser
def get_arf_report_path(src="global_test_data/ssg-fedora-ds-arf.xml"):
return str(Path(__file__).parent.parent / src)
@pytest.mark.parametrize("rule_id, result", [
(
"xccdf_org.ssgproject.content_rule_accounts_passwords_pam_faillock_deny",
"false",
),
(
"xccdf_org.ssgproject.content_rule_sshd_disable_gssapi_auth",
"false",
),
(
"xccdf_org.ssgproject.content_rule_service_debug-shell_disabled",
"true",
),
(
"xccdf_org.ssgproject.content_rule_mount_option_dev_shm_noexec",
"false",
),
(
"xccdf_org.ssgproject.content_rule_audit_rules_unsuccessful_file_modification_creat",
"false",
),
(
"xccdf_org.ssgproject.content_rule_audit_rules_file_deletion_events_rmdir",
"false",
),
(
"xccdf_org.ssgproject.content_rule_require_singleuser_auth",
"true",
),
])
def test_parsing_and_evaluate_scan_rule(rule_id, result):
path = get_arf_report_path()
parser = ARFXMLParser(path)
oval_tree = parser.get_oval_tree(rule_id)
assert oval_tree.evaluate_tree() == result
def test_parsing_arf_report_without_system_data():
path = get_arf_report_path("global_test_data/arf_no_system_data.xml")
rule_id = "xccdf_com.example.www_rule_test-fail"
parser = ARFXMLParser(path)
oval_tree = parser.get_oval_tree(rule_id)
assert oval_tree.evaluate_tree() == "false"
@pytest.mark.parametrize("rule_id, pattern", [
("hello", "404 rule \"hello\" not found!"),
("xccdf_org.ssgproject.content_rule_ntpd_specify_remote_server", "notselected"),
("xccdf_org.ssgproject.content_rule_configure_bind_crypto_policy", "notchecked"),
("xccdf_org.ssgproject.content_rule_ensure_gpgcheck_local_packages", "notapplicable"),
])
def test_parsing_bad_rule(rule_id, pattern):
path = get_arf_report_path()
parser = ARFXMLParser(path)
with pytest.raises(Exception, match=pattern):
assert parser.get_oval_tree(rule_id)
def test_use_bad_report_file():
src = 'global_test_data/xccdf_org.ssgproject.content_profile_ospp-results-initial.xml'
path = get_arf_report_path(src)
with pytest.raises(Exception, match=r"arf\b|ARF\b"):
assert ARFXMLParser(path)
| 30.896104
| 93
| 0.721732
| 307
| 2,379
| 5.13355
| 0.355049
| 0.055838
| 0.125635
| 0.174492
| 0.478426
| 0.274746
| 0.195431
| 0.195431
| 0.100254
| 0.100254
| 0
| 0.001518
| 0.169399
| 2,379
| 76
| 94
| 31.302632
| 0.796053
| 0
| 0
| 0.370968
| 0
| 0
| 0.411517
| 0.352669
| 0
| 0
| 0
| 0
| 0.064516
| 1
| 0.080645
| false
| 0.016129
| 0.048387
| 0.016129
| 0.145161
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
ab2a38bd32faf647f78849a772f13ad447eb6e18
| 2,144
|
py
|
Python
|
chapter_13/mailtools/__init__.py
|
bimri/programming_python
|
ba52ccd18b9b4e6c5387bf4032f381ae816b5e77
|
[
"MIT"
] | null | null | null |
chapter_13/mailtools/__init__.py
|
bimri/programming_python
|
ba52ccd18b9b4e6c5387bf4032f381ae816b5e77
|
[
"MIT"
] | null | null | null |
chapter_13/mailtools/__init__.py
|
bimri/programming_python
|
ba52ccd18b9b4e6c5387bf4032f381ae816b5e77
|
[
"MIT"
] | null | null | null |
"The mailtools Utility Package"
'Initialization File'
"""
##################################################################################
mailtools package: interface to mail server transfers, used by pymail2, PyMailGUI,
and PyMailCGI; does loads, sends, parsing, composing, and deleting, with part
attachments, encodings (of both the email and Unicdode kind), etc.; the parser,
fetcher, and sender classes here are designed to be mixed-in to subclasses which
use their methods, or used as embedded or standalone objects;
this package also includes convenience subclasses for silent mode, and more;
loads all mail text if pop server doesn't do top; doesn't handle threads or UI
here, and allows askPassword to differ per subclass; progress callback funcs get
status; all calls raise exceptions on error--client must handle in GUI/other;
this changed from file to package: nested modules imported here for bw compat;
4E: need to use package-relative import syntax throughout, because in Py 3.X
package dir in no longer on module import search path if package is imported
elsewhere (from another directory which uses this package); also performs
Unicode decoding on mail text when fetched (see mailFetcher), as well as for
some text part payloads which might have been email-encoded (see mailParser);
TBD: in saveparts, should file be opened in text mode for text/ contypes?
TBD: in walkNamedParts, should we skip oddballs like message/delivery-status?
TBD: Unicode support has not been tested exhaustively: see Chapter 13 for more
on the Py3.1 email package and its limitations, and the policies used here;
##################################################################################
"""
# collect contents of all modules here, when package dir imported directly
from .mailFetcher import *
from .mailSender import * # 4E: package-relative
from .mailParser import *
# export nested modules here, when from mailtools import *
__all__ = 'mailFetcher', 'mailSender', 'mailParser'
# self-test code is in selftest.py to allow mailconfig's path
# to be set before running thr nested module imports above
| 51.047619
| 83
| 0.718284
| 301
| 2,144
| 5.10299
| 0.584718
| 0.005208
| 0.019531
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004432
| 0.158116
| 2,144
| 41
| 84
| 52.292683
| 0.846537
| 0.139459
| 0
| 0
| 0
| 0
| 0.369159
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
ab2b3845336cbc9c2cd653a367ec0d03b0cfffa6
| 223
|
py
|
Python
|
server.py
|
SDelhey/websocket-chat
|
c7b83583007a723baee25acedbceddd55c12ffec
|
[
"MIT"
] | null | null | null |
server.py
|
SDelhey/websocket-chat
|
c7b83583007a723baee25acedbceddd55c12ffec
|
[
"MIT"
] | null | null | null |
server.py
|
SDelhey/websocket-chat
|
c7b83583007a723baee25acedbceddd55c12ffec
|
[
"MIT"
] | null | null | null |
from flask import Flask, render_template
from flask_socketio import SocketIO, send, emit
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
socketio = SocketIO(app)
if __name__ == '__main__':
socketio.run(app)
| 24.777778
| 47
| 0.748879
| 30
| 223
| 5.066667
| 0.533333
| 0.118421
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134529
| 223
| 9
| 48
| 24.777778
| 0.787565
| 0
| 0
| 0
| 0
| 0
| 0.111607
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.285714
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
ab32101612714ab2b6b04c378a7a5646daa96906
| 155
|
py
|
Python
|
Problem_30/main.py
|
jdalzatec/EulerProject
|
2f2f4d9c009be7fd63bb229bb437ea75db77d891
|
[
"MIT"
] | 1
|
2022-03-28T05:32:58.000Z
|
2022-03-28T05:32:58.000Z
|
Problem_30/main.py
|
jdalzatec/EulerProject
|
2f2f4d9c009be7fd63bb229bb437ea75db77d891
|
[
"MIT"
] | null | null | null |
Problem_30/main.py
|
jdalzatec/EulerProject
|
2f2f4d9c009be7fd63bb229bb437ea75db77d891
|
[
"MIT"
] | null | null | null |
total = 0
for n in range(1000, 1000000):
suma = 0
for i in str(n):
suma += int(i)**5
if (n == suma):
total += n
print(total)
| 14.090909
| 30
| 0.483871
| 26
| 155
| 2.884615
| 0.576923
| 0.106667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.141414
| 0.36129
| 155
| 11
| 31
| 14.090909
| 0.616162
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.125
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
ab37e16ef4016e52fa0dab454a286037abc7c623
| 889
|
py
|
Python
|
tests/test_tempo_event.py
|
yokaze/crest-python
|
c246b16ade6fd706f0e18aae797660064bddd555
|
[
"MIT"
] | null | null | null |
tests/test_tempo_event.py
|
yokaze/crest-python
|
c246b16ade6fd706f0e18aae797660064bddd555
|
[
"MIT"
] | null | null | null |
tests/test_tempo_event.py
|
yokaze/crest-python
|
c246b16ade6fd706f0e18aae797660064bddd555
|
[
"MIT"
] | null | null | null |
#
# test_tempo_event.py
# crest-python
#
# Copyright (C) 2017 Rue Yokaze
# Distributed under the MIT License.
#
import crest_loader
import unittest
from crest.events.meta import TempoEvent
class TestTempoEvent(unittest.TestCase):
def test_ctor(self):
TempoEvent()
TempoEvent(120)
def test_message(self):
evt = TempoEvent(120)
self.assertEqual(evt.Message, [0xFF, 0x51, 0x03, 0x07, 0xA1, 0x20])
def test_property(self):
evt = TempoEvent(120)
self.assertEqual(evt.Tempo, 120)
self.assertEqual(evt.MicroSeconds, 500000)
evt.Tempo = 60
self.assertEqual(evt.Tempo, 60)
self.assertEqual(evt.MicroSeconds, 1000000)
evt.MicroSeconds = 250000
self.assertEqual(evt.Tempo, 240)
self.assertEqual(evt.MicroSeconds, 250000)
if (__name__ == '__main__'):
unittest.main()
| 24.694444
| 75
| 0.662542
| 103
| 889
| 5.582524
| 0.475728
| 0.182609
| 0.21913
| 0.109565
| 0.224348
| 0.224348
| 0.132174
| 0
| 0
| 0
| 0
| 0.092375
| 0.232846
| 889
| 35
| 76
| 25.4
| 0.750733
| 0.115861
| 0
| 0.090909
| 0
| 0
| 0.010309
| 0
| 0
| 0
| 0.030928
| 0
| 0.318182
| 1
| 0.136364
| false
| 0
| 0.136364
| 0
| 0.318182
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
ab3ea1f161bcea5311f9766c4b23a51c645e6437
| 1,174
|
py
|
Python
|
startuptweet.py
|
cudmore/startupnotify
|
76b61b295ae7049e597fa05457a6696e624c4955
|
[
"MIT"
] | null | null | null |
startuptweet.py
|
cudmore/startupnotify
|
76b61b295ae7049e597fa05457a6696e624c4955
|
[
"MIT"
] | null | null | null |
startuptweet.py
|
cudmore/startupnotify
|
76b61b295ae7049e597fa05457a6696e624c4955
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
"""
Author: Robert Cudmore
Date: 20181013
Purpose: Send a Tweet with IP and MAC address of a Raspberry Pi
Install:
pip3 install tweepy
Usage:
python3 startuptweet.py 'this is my tweet'
"""
import tweepy
import sys
import socket
import subprocess
from uuid import getnode as get_mac
from datetime import datetime
# Create variables for each key, secret, token
from my_config import hash_tag
from my_config import consumer_key
from my_config import consumer_secret
from my_config import access_token
from my_config import access_token_secret
message = ''
if len( sys.argv ) > 1:
message = sys.argv[1]
# Set up OAuth and integrate with API
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
#
thetime = datetime.now().strftime('%Y%m%d %H:%M:%S')
ip = subprocess.check_output(['hostname', '--all-ip-addresses'])
ip = ip.decode('utf-8').strip()
hostname = socket.gethostname()
mac = get_mac()
mac = hex(mac)
tweet = thetime + ' ' + hostname + ' ' + ip + ' ' + mac + ' ' + message + ' ' + hash_tag
print('tweeting:', tweet)
api.update_status(status=tweet)
| 22.576923
| 88
| 0.736797
| 176
| 1,174
| 4.784091
| 0.488636
| 0.035629
| 0.071259
| 0.106888
| 0.157957
| 0.068884
| 0
| 0
| 0
| 0
| 0
| 0.014042
| 0.150767
| 1,174
| 51
| 89
| 23.019608
| 0.830491
| 0.251278
| 0
| 0
| 0
| 0
| 0.068027
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.423077
| 0
| 0.423077
| 0.038462
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
ab42719d063612a8629ae16074131965d4bb9222
| 1,397
|
py
|
Python
|
src/ice_g2p/dictionaries.py
|
cadia-lvl/ice-g2p
|
5a6cc55f45282e8a656ea0742e2f373189c9a912
|
[
"Apache-2.0"
] | null | null | null |
src/ice_g2p/dictionaries.py
|
cadia-lvl/ice-g2p
|
5a6cc55f45282e8a656ea0742e2f373189c9a912
|
[
"Apache-2.0"
] | null | null | null |
src/ice_g2p/dictionaries.py
|
cadia-lvl/ice-g2p
|
5a6cc55f45282e8a656ea0742e2f373189c9a912
|
[
"Apache-2.0"
] | null | null | null |
import os, sys
DICTIONARY_FILE = os.path.join(sys.prefix, 'dictionaries/ice_pron_dict_standard_clear.csv')
HEAD_FILE = os.path.join(sys.prefix, 'data/head_map.csv')
MODIFIER_FILE = os.path.join(sys.prefix, 'data/modifier_map.csv')
VOWELS_FILE = os.path.join(sys.prefix, 'data/vowels_sampa.txt')
CONS_CLUSTERS_FILE = os.path.join(sys.prefix, 'data/cons_clusters_sampa.txt')
def read_map(filename):
with open(filename) as f:
file_content = f.read().splitlines()
dict_map = {}
for line in file_content:
arr = line.split('\t')
if len(arr) > 1:
values = arr[1:]
else:
values = []
key = arr[0]
dict_map[key] = values
return dict_map
def read_dictionary(filename):
with open(filename) as f:
file_content = f.read().splitlines()
pronDict = {}
for line in file_content:
word, transcr = line.split('\t')
pronDict[word] = transcr
return pronDict
def read_list(filename):
with open(filename) as f:
file_content = f.read().splitlines()
return file_content
def get_head_map():
return read_map(HEAD_FILE)
def get_modifier_map():
return read_map(MODIFIER_FILE)
def get_dictionary():
return read_dictionary(DICTIONARY_FILE)
def get_vowels():
return read_list(VOWELS_FILE)
def get_cons_clusters():
return read_list(CONS_CLUSTERS_FILE)
| 24.086207
| 91
| 0.670007
| 198
| 1,397
| 4.494949
| 0.262626
| 0.074157
| 0.05618
| 0.078652
| 0.370787
| 0.325843
| 0.3
| 0.178652
| 0.178652
| 0.178652
| 0
| 0.002727
| 0.212598
| 1,397
| 57
| 92
| 24.508772
| 0.806364
| 0
| 0
| 0.195122
| 0
| 0
| 0.097351
| 0.082319
| 0
| 0
| 0
| 0
| 0
| 1
| 0.195122
| false
| 0
| 0.02439
| 0.121951
| 0.414634
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 1
|
ab42c6179a77692e03a58e9d6335af55ec3cb46d
| 385
|
py
|
Python
|
tests/test_annotations_notebook.py
|
jeromedockes/pylabelbuddy
|
26be00db679e94117968387aa7010dab2739b517
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_annotations_notebook.py
|
jeromedockes/pylabelbuddy
|
26be00db679e94117968387aa7010dab2739b517
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_annotations_notebook.py
|
jeromedockes/pylabelbuddy
|
26be00db679e94117968387aa7010dab2739b517
|
[
"BSD-3-Clause"
] | null | null | null |
from pylabelbuddy import _annotations_notebook
def test_annotations_notebook(root, annotations_mock, dataset_mock):
nb = _annotations_notebook.AnnotationsNotebook(
root, annotations_mock, dataset_mock
)
nb.change_database()
assert nb.notebook.index(nb.notebook.select()) == 2
nb.go_to_annotations()
assert nb.notebook.index(nb.notebook.select()) == 0
| 32.083333
| 68
| 0.750649
| 46
| 385
| 6
| 0.456522
| 0.144928
| 0.137681
| 0.188406
| 0.5
| 0.5
| 0.268116
| 0
| 0
| 0
| 0
| 0.006135
| 0.153247
| 385
| 11
| 69
| 35
| 0.840491
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.222222
| 1
| 0.111111
| false
| 0
| 0.111111
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
ab4374fa18ea29af4960ad145950b9d2672ecb83
| 1,257
|
py
|
Python
|
middleware/run.py
|
natedogg484/react-flask-authentication
|
5000685d35471b03f72e0b07dfbdbf6d5fc296d2
|
[
"MIT"
] | null | null | null |
middleware/run.py
|
natedogg484/react-flask-authentication
|
5000685d35471b03f72e0b07dfbdbf6d5fc296d2
|
[
"MIT"
] | 4
|
2021-03-09T21:12:06.000Z
|
2022-02-26T19:17:31.000Z
|
middleware/run.py
|
natedogg484/vue-authentication
|
ab087e238d98606ffb73167cb9a16648812ac3e5
|
[
"MIT"
] | null | null | null |
from flask import Flask
from flask_cors import CORS
from flask_restful import Api
from flask_sqlalchemy import SQLAlchemy
from flask_jwt_extended import JWTManager
app = Flask(__name__)
CORS(app)
api = Api(app)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///app.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SECRET_KEY'] = 'some-secret-string'
app.config['JWT_SECRET_KEY'] = 'jwt-secret-string'
app.config['JWT_BLACKLIST_ENABLED'] = True
app.config['JWT_BLACKLIST_TOKEN_CHECKS'] = ['access', 'refresh']
db = SQLAlchemy(app)
jwt = JWTManager(app)
@app.before_first_request
def create_tables():
db.create_all()
import models, resources, views
api.add_resource(resources.UserRegistration, '/registration')
api.add_resource(resources.UserLogin, '/login')
api.add_resource(resources.UserLogoutAccess, '/logout/access')
api.add_resource(resources.UserLogoutRefresh, '/logout/refresh')
api.add_resource(resources.TokenRefresh, '/token/refresh')
api.add_resource(resources.AllUsers, '/users')
api.add_resource(resources.SecretResource, '/secret')
@jwt.token_in_blacklist_loader
def check_if_token_in_blacklist(decrypted_token):
jti = decrypted_token['jti']
return models.RevokedTokenModel.is_jti_blacklisted(jti)
| 27.933333
| 64
| 0.791567
| 165
| 1,257
| 5.769697
| 0.387879
| 0.044118
| 0.102941
| 0.169118
| 0.113445
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085123
| 1,257
| 45
| 65
| 27.933333
| 0.827826
| 0
| 0
| 0
| 0
| 0
| 0.211447
| 0.079491
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064516
| false
| 0
| 0.193548
| 0
| 0.290323
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
ab59b426727f7713efb93b6855597da219efc0be
| 1,695
|
py
|
Python
|
examples/multimedia/mmimdb_MFM.py
|
kapikantzari/MultiBench
|
44ab6ea028682040a0c04de68239ce5cdf15123f
|
[
"MIT"
] | 148
|
2021-03-06T06:54:13.000Z
|
2022-03-29T19:27:21.000Z
|
examples/multimedia/mmimdb_MFM.py
|
kapikantzari/MultiBench
|
44ab6ea028682040a0c04de68239ce5cdf15123f
|
[
"MIT"
] | 10
|
2021-07-19T22:57:49.000Z
|
2022-02-04T03:12:29.000Z
|
examples/multimedia/mmimdb_MFM.py
|
kapikantzari/MultiBench
|
44ab6ea028682040a0c04de68239ce5cdf15123f
|
[
"MIT"
] | 18
|
2021-07-22T07:17:27.000Z
|
2022-03-27T16:11:40.000Z
|
import torch
import sys
import os
sys.path.append(os.getcwd())
from utils.helper_modules import Sequential2
from unimodals.common_models import Linear, MLP, MaxOut_MLP
from datasets.imdb.get_data import get_dataloader
from fusions.common_fusions import Concat
from objective_functions.objectives_for_supervised_learning import MFM_objective
from objective_functions.recon import sigmloss1d
from training_structures.Supervised_Learning import train, test
filename = "best_mfm.pt"
traindata, validdata, testdata = get_dataloader(
"../video/multimodal_imdb.hdf5", "../video/mmimdb", vgg=True, batch_size=128)
classes = 23
n_latent = 512
fuse = Sequential2(Concat(), MLP(2*n_latent, n_latent, n_latent//2)).cuda()
encoders = [MaxOut_MLP(512, 512, 300, n_latent, False).cuda(
), MaxOut_MLP(512, 1024, 4096, n_latent, False).cuda()]
head = Linear(n_latent//2, classes).cuda()
decoders = [MLP(n_latent, 600, 300).cuda(), MLP(n_latent, 2048, 4096).cuda()]
intermediates = [MLP(n_latent, n_latent//2, n_latent//2).cuda(),
MLP(n_latent, n_latent//2, n_latent//2).cuda()]
recon_loss = MFM_objective(2.0, [sigmloss1d, sigmloss1d], [
1.0, 1.0], criterion=torch.nn.BCEWithLogitsLoss())
train(encoders, fuse, head, traindata, validdata, 1000, decoders+intermediates, early_stop=True, task="multilabel",
objective_args_dict={"decoders": decoders, "intermediates": intermediates}, save=filename, optimtype=torch.optim.AdamW, lr=5e-3, weight_decay=0.01, objective=recon_loss)
print("Testing:")
model = torch.load(filename).cuda()
test(model, testdata, method_name="MFM", dataset="imdb",
criterion=torch.nn.BCEWithLogitsLoss(), task="multilabel")
| 42.375
| 175
| 0.746313
| 236
| 1,695
| 5.177966
| 0.427966
| 0.085925
| 0.03928
| 0.045827
| 0.061375
| 0.0491
| 0.0491
| 0.0491
| 0.0491
| 0.0491
| 0
| 0.046791
| 0.117404
| 1,695
| 39
| 176
| 43.461538
| 0.770053
| 0
| 0
| 0
| 0
| 0
| 0.065487
| 0.017109
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.322581
| 0
| 0.322581
| 0.032258
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
ab5ff68a9733a875c0aeb19f8b19c6f3ac7260b4
| 3,108
|
py
|
Python
|
vendor/packages/logilab-astng/__pkginfo__.py
|
jgmize/kitsune
|
8f23727a9c7fcdd05afc86886f0134fb08d9a2f0
|
[
"BSD-3-Clause"
] | 2
|
2019-08-19T17:08:47.000Z
|
2019-10-05T11:37:02.000Z
|
vendor/packages/logilab-astng/__pkginfo__.py
|
jgmize/kitsune
|
8f23727a9c7fcdd05afc86886f0134fb08d9a2f0
|
[
"BSD-3-Clause"
] | null | null | null |
vendor/packages/logilab-astng/__pkginfo__.py
|
jgmize/kitsune
|
8f23727a9c7fcdd05afc86886f0134fb08d9a2f0
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2003-2010 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:[email protected]
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:[email protected]
# copyright 2003-2010 Sylvain Thenault, all rights reserved.
# contact mailto:[email protected]
#
# This file is part of logilab-astng.
#
# logilab-astng is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# logilab-astng is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-astng. If not, see <http://www.gnu.org/licenses/>.
"""
logilab.astng packaging information
"""
distname = 'logilab-astng'
modname = 'astng'
subpackage_of = 'logilab'
numversion = (0, 20, 1)
version = '.'.join([str(num) for num in numversion])
install_requires = ['logilab-common >= 0.49.0']
pyversions = ["2.3", "2.4", "2.5", '2.6']
license = 'LGPL'
author = 'Logilab'
author_email = '[email protected]'
mailinglist = "mailto://%s" % author_email
web = "http://www.logilab.org/project/%s" % distname
ftp = "ftp://ftp.logilab.org/pub/%s" % modname
short_desc = "rebuild a new abstract syntax tree from Python's ast"
long_desc = """The aim of this module is to provide a common base \
representation of python source code for projects such as pychecker, pyreverse,
pylint... Well, actually the development of this library is essentially
governed by pylint's needs.
It rebuilds the tree generated by the compiler.ast [1] module (python <= 2.4)
or by the builtin _ast module (python >= 2.5) by recursively walking down the
AST and building an extended ast (let's call it astng ;). The new node classes
have additional methods and attributes for different usages.
Furthermore, astng builds partial trees by inspecting living objects."""
from os.path import join
include_dirs = [join('test', 'regrtest_data'),
join('test', 'data'), join('test', 'data2')]
| 40.363636
| 87
| 0.740991
| 483
| 3,108
| 4.749482
| 0.403727
| 0.013078
| 0.031386
| 0.049695
| 0.442895
| 0.442895
| 0.442895
| 0.418483
| 0.385353
| 0.385353
| 0
| 0.024166
| 0.161197
| 3,108
| 76
| 88
| 40.894737
| 0.855773
| 0.557593
| 0
| 0
| 0
| 0.038462
| 0.647412
| 0.045761
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.038462
| 0
| 0.038462
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
ab617d4c442405b9219d3fa02f66e3a525d82e42
| 4,339
|
py
|
Python
|
bioinformatics/analysis/rnaseq/prepare/split_gtf_by_type.py
|
bioShaun/omsCabinet
|
741179a06cbd5200662cd03bc2e0115f4ad06917
|
[
"MIT"
] | null | null | null |
bioinformatics/analysis/rnaseq/prepare/split_gtf_by_type.py
|
bioShaun/omsCabinet
|
741179a06cbd5200662cd03bc2e0115f4ad06917
|
[
"MIT"
] | null | null | null |
bioinformatics/analysis/rnaseq/prepare/split_gtf_by_type.py
|
bioShaun/omsCabinet
|
741179a06cbd5200662cd03bc2e0115f4ad06917
|
[
"MIT"
] | null | null | null |
import fire
import gtfparse
from pathlib import Path
GENCODE_CATEGORY_MAP = {
'IG_C_gene': 'protein_coding',
'IG_D_gene': 'protein_coding',
'IG_J_gene': 'protein_coding',
'IG_V_gene': 'protein_coding',
'IG_LV_gene': 'protein_coding',
'TR_C_gene': 'protein_coding',
'TR_J_gene': 'protein_coding',
'TR_V_gene': 'protein_coding',
'TR_D_gene': 'protein_coding',
'TEC': 'protein_coding',
'nonsense_mediated_decay': 'protein_coding',
'non_stop_decay': 'protein_coding',
'retained_intron': 'lncRNA',
'protein_coding': 'protein_coding',
'ambiguous_orf': 'lncRNA',
'Mt_rRNA': 'ncRNA',
'Mt_tRNA': 'ncRNA',
'miRNA': 'ncRNA',
'misc_RNA': 'ncRNA',
'rRNA': 'ncRNA',
'snRNA': 'ncRNA',
'snoRNA': 'ncRNA',
'ribozyme': 'ncRNA',
'sRNA': 'ncRNA',
'scaRNA': 'ncRNA',
'scRNA': 'ncRNA',
'non_coding': 'lncRNA',
'known_ncrna': 'ncRNA',
'3prime_overlapping_ncrna': 'lncRNA',
'3prime_overlapping_ncRNA': 'lncRNA',
'vaultRNA': 'ncRNA',
'processed_transcript': 'lncRNA',
'lincRNA': 'lncRNA',
'macro_lncRNA': 'lncRNA',
'sense_intronic': 'lncRNA',
'sense_overlapping': 'lncRNA',
'antisense': 'lncRNA',
'antisense_RNA': 'lncRNA',
'bidirectional_promoter_lncRNA': 'lncRNA',
'IG_pseudogene': 'pseudogene',
'IG_D_pseudogene': 'pseudogene',
'IG_C_pseudogene': 'pseudogene',
'IG_J_pseudogene': 'pseudogene',
'IG_V_pseudogene': 'pseudogene',
'TR_V_pseudogene': 'pseudogene',
'TR_J_pseudogene': 'pseudogene',
'Mt_tRNA_pseudogene': 'pseudogene',
'tRNA_pseudogene': 'pseudogene',
'snoRNA_pseudogene': 'pseudogene',
'snRNA_pseudogene': 'pseudogene',
'scRNA_pseudogene': 'pseudogene',
'rRNA_pseudogene': 'pseudogene',
'misc_RNA_pseudogene': 'pseudogene',
'miRNA_pseudogene': 'pseudogene',
'pseudogene': 'pseudogene',
'processed_pseudogene': 'pseudogene',
'polymorphic_pseudogene': 'pseudogene',
'retrotransposed': 'pseudogene',
'transcribed_processed_pseudogene': 'pseudogene',
'transcribed_unprocessed_pseudogene': 'pseudogene',
'transcribed_unitary_pseudogene': 'pseudogene',
'translated_processed_pseudogene': 'pseudogene',
'translated_unprocessed_pseudogene': 'pseudogene',
'unitary_pseudogene': 'pseudogene',
'unprocessed_pseudogene': 'pseudogene',
'novel_lncRNA': 'lncRNA',
'TUCP': 'TUCP',
'lncRNA': 'lncRNA'
}
def simplify_gene_type(gene_type):
if gene_type in GENCODE_CATEGORY_MAP:
sim_type = GENCODE_CATEGORY_MAP.get(gene_type)
if sim_type == 'lncRNA':
sim_type = f'annotated_{sim_type}'
elif sim_type == 'ncRNA':
sim_type = f'other_{sim_type}'
else:
pass
return sim_type
else:
raise ValueError(gene_type)
def dfline2gtfline(dfline):
basic_inf = dfline[:8]
basic_inf.fillna('.', inplace=True)
basic_inf.frame = '.'
basic_inf_list = [str(each) for each in basic_inf]
basic_inf_line = '\t'.join(basic_inf_list)
attr_inf = dfline[8:]
attr_inf_list = []
for key, val in attr_inf.items():
if val:
attr_inf_list.append(f'{key} "{val}";')
attr_inf_line = ' '.join(attr_inf_list)
return f'{basic_inf_line}\t{attr_inf_line}\n'
def split_gtf(gtf, outdir, novel=False):
gtf_df = gtfparse.read_gtf(gtf)
if 'gene_type' in gtf_df.columns:
gtf_df.loc[:, 'gene_biotype'] = gtf_df.gene_type
gtf_df.drop('gene_type', axis=1, inplace=True)
elif 'gene_biotype' in gtf_df.columns:
pass
else:
gtf_df.loc[:, 'gene_biotype'] = 'protein_coding'
type_label = 'gene_biotype'
if novel:
gtf_df.loc[
:, type_label] = gtf_df.loc[:, type_label].map(
GENCODE_CATEGORY_MAP)
else:
gtf_df.loc[
:, type_label] = gtf_df.loc[:, type_label].map(
simplify_gene_type)
outdir = Path(outdir)
outdir.mkdir(parents=True, exist_ok=True)
for gt, grp in gtf_df.groupby(type_label):
gt_file = outdir / f'{gt}.gtf'
with open(gt_file, 'w') as gt_inf:
for idx in grp.index:
outline = dfline2gtfline(grp.loc[idx])
gt_inf.write(outline)
if __name__ == '__main__':
fire.Fire(split_gtf)
| 30.77305
| 59
| 0.63563
| 506
| 4,339
| 5.086957
| 0.264822
| 0.20202
| 0.059441
| 0.029526
| 0.043512
| 0.028749
| 0.028749
| 0.028749
| 0.028749
| 0.028749
| 0
| 0.002061
| 0.217101
| 4,339
| 140
| 60
| 30.992857
| 0.755667
| 0
| 0
| 0.079365
| 0
| 0
| 0.403319
| 0.078129
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02381
| false
| 0.015873
| 0.02381
| 0
| 0.063492
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
ab6209870d287fc20132452f64da2ca39e9ab140
| 1,890
|
py
|
Python
|
cities_light/tests/test_import.py
|
jsandovalc/django-cities-light
|
a1c6af08938b7b01d4e12555bd4cb5040905603d
|
[
"MIT"
] | null | null | null |
cities_light/tests/test_import.py
|
jsandovalc/django-cities-light
|
a1c6af08938b7b01d4e12555bd4cb5040905603d
|
[
"MIT"
] | null | null | null |
cities_light/tests/test_import.py
|
jsandovalc/django-cities-light
|
a1c6af08938b7b01d4e12555bd4cb5040905603d
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
import glob
import os
from dbdiff.fixture import Fixture
from .base import TestImportBase, FixtureDir
from ..settings import DATA_DIR
class TestImport(TestImportBase):
"""Load test."""
def test_single_city(self):
"""Load single city."""
fixture_dir = FixtureDir('import')
self.import_data(
fixture_dir,
'angouleme_country',
'angouleme_region',
'angouleme_subregion',
'angouleme_city',
'angouleme_translations'
)
Fixture(fixture_dir.get_file_path('angouleme.json')).assertNoDiff()
def test_single_city_zip(self):
"""Load single city."""
filelist = glob.glob(os.path.join(DATA_DIR, "angouleme_*.txt"))
for f in filelist:
os.remove(f)
fixture_dir = FixtureDir('import_zip')
self.import_data(
fixture_dir,
'angouleme_country',
'angouleme_region',
'angouleme_subregion',
'angouleme_city',
'angouleme_translations',
file_type="zip"
)
Fixture(FixtureDir('import').get_file_path('angouleme.json')).assertNoDiff()
def test_city_wrong_timezone(self):
"""Load single city with wrong timezone."""
fixture_dir = FixtureDir('import')
self.import_data(
fixture_dir,
'angouleme_country',
'angouleme_region',
'angouleme_subregion',
'angouleme_city_wtz',
'angouleme_translations'
)
Fixture(fixture_dir.get_file_path('angouleme_wtz.json')).assertNoDiff()
from ..loading import get_cities_model
city_model = get_cities_model('City')
cities = city_model.objects.all()
for city in cities:
print(city.get_timezone_info().zone)
| 29.53125
| 84
| 0.607937
| 192
| 1,890
| 5.671875
| 0.28125
| 0.073462
| 0.038567
| 0.049587
| 0.471074
| 0.471074
| 0.471074
| 0.471074
| 0.410468
| 0.323232
| 0
| 0
| 0.291005
| 1,890
| 63
| 85
| 30
| 0.812687
| 0.044444
| 0
| 0.428571
| 0
| 0
| 0.204036
| 0.036996
| 0
| 0
| 0
| 0
| 0.061224
| 1
| 0.061224
| false
| 0
| 0.306122
| 0
| 0.387755
| 0.020408
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
db4545f1a4dfa83103a39912add856795ff6a347
| 813
|
py
|
Python
|
core/tests/test_base_time_range_controller.py
|
One-Green/plant-keeper-master
|
67101a4cc7070d26fd1685631a710ae9a60fc5e8
|
[
"CC0-1.0"
] | 2
|
2022-02-04T17:52:38.000Z
|
2022-02-04T17:52:40.000Z
|
core/tests/test_base_time_range_controller.py
|
shanisma/plant-keeper
|
3ca92ae2d55544a301e1398496a08a45cca6d15b
|
[
"CC0-1.0"
] | 4
|
2021-06-16T20:01:50.000Z
|
2022-03-09T20:17:53.000Z
|
core/tests/test_base_time_range_controller.py
|
shanisma/plant-keeper
|
3ca92ae2d55544a301e1398496a08a45cca6d15b
|
[
"CC0-1.0"
] | 1
|
2021-06-27T10:45:36.000Z
|
2021-06-27T10:45:36.000Z
|
import os
import sys
from datetime import time
import unittest
sys.path.append(
os.path.dirname(
os.path.dirname(os.path.join("..", "..", "..", os.path.dirname("__file__")))
)
)
from core.controller import BaseTimeRangeController
class TestTimeRangeController(unittest.TestCase):
def test_time_range(self):
start_at = time(10, 0, 0)
end_at = time(12, 0, 0)
time_range_controller = BaseTimeRangeController(start_at, end_at)
time_now = time(11, 0, 0)
time_range_controller.set_current_time(time_now)
self.assertTrue(time_range_controller.action)
time_now = time(12, 15, 0)
time_range_controller.set_current_time(time_now)
self.assertFalse(time_range_controller.action)
if __name__ == "__main__":
unittest.main()
| 26.225806
| 84
| 0.688807
| 105
| 813
| 4.990476
| 0.371429
| 0.103053
| 0.181298
| 0.114504
| 0.274809
| 0.171756
| 0.171756
| 0.171756
| 0.171756
| 0.171756
| 0
| 0.026114
| 0.199262
| 813
| 30
| 85
| 27.1
| 0.778802
| 0
| 0
| 0.086957
| 0
| 0
| 0.02706
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 1
| 0.043478
| false
| 0
| 0.217391
| 0
| 0.304348
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
db4d954b047874012d94933f5000302aa9b31037
| 1,500
|
py
|
Python
|
TSFpy/debug/sample_fibonacci.py
|
ooblog/TSF1KEV
|
f7d4b4ff88f52ba00b46eb53ed98f8ea62ec2f6d
|
[
"MIT"
] | null | null | null |
TSFpy/debug/sample_fibonacci.py
|
ooblog/TSF1KEV
|
f7d4b4ff88f52ba00b46eb53ed98f8ea62ec2f6d
|
[
"MIT"
] | null | null | null |
TSFpy/debug/sample_fibonacci.py
|
ooblog/TSF1KEV
|
f7d4b4ff88f52ba00b46eb53ed98f8ea62ec2f6d
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
# -*- coding: UTF-8 -*-
from __future__ import division,print_function,absolute_import,unicode_literals
import sys
import os
os.chdir(sys.path[0])
sys.path.append('/mnt/sda2/github/TSF1KEV/TSFpy')
from TSF_io import *
#from TSF_Forth import *
from TSF_shuffle import *
from TSF_match import *
from TSF_calc import *
from TSF_time import *
TSF_Forth_init(TSF_io_argvs(),[TSF_shuffle_Initwords,TSF_match_Initwords,TSF_calc_Initwords,TSF_time_Initwords])
TSF_Forth_setTSF("TSF_Tab-Separated-Forth:",
"\t".join(["UTF-8","#TSF_encoding","200","#TSF_calcPR","N-Fibonacci:","#TSF_this","0","#TSF_fin."]),
TSF_style="T")
TSF_Forth_setTSF("N-Fibonacci:",
"\t".join(["TSF_argvs:","#TSF_cloneargvs","TSF_argvs:","#TSF_lenthe","[0]Z[Fibcount:0]~[TSF_argvs:0]","#TSF_calcDC","Fibcount:","0","#TSF_pokethe","Fibonacci:","#TSF_this"]),
TSF_style="T")
TSF_Forth_setTSF("Fibonacci:",
"\t".join(["[Fibcount:1]Z1~[Fibcount:1]","#TSF_calcDC","((2&(([0]+3)*[0]+2)^)/((2&(2*[0]+2)^)-(2&([0]+1)^)-1)\\1)#(2&([0]+1)^)","#TSF_calcDC","1","#TSF_echoN","[Fibcount:1]+1","#TSF_calcDC","Fibcount:","1","#TSF_pokethe","Fibjump:","[Fibcount:0]-([Fibcount:1]+1)o0~1","#TSF_calcDC","#TSF_peekthe","#TSF_this"]),
TSF_style="T")
TSF_Forth_setTSF("Fibcount:",
"\t".join(["20","-1"]),
TSF_style="T")
TSF_Forth_setTSF("Fibjump:",
"\t".join(["Fibonacci:","#exit"]),
TSF_style="T")
TSF_Forth_addfin(TSF_io_argvs())
TSF_Forth_argvsleftcut(TSF_io_argvs(),1)
TSF_Forth_run()
| 39.473684
| 315
| 0.675333
| 235
| 1,500
| 4.008511
| 0.293617
| 0.084926
| 0.069002
| 0.063694
| 0.130573
| 0.112527
| 0.063694
| 0.063694
| 0
| 0
| 0
| 0.032999
| 0.070667
| 1,500
| 37
| 316
| 40.540541
| 0.642755
| 0.044
| 0
| 0.172414
| 0
| 0.034483
| 0.397203
| 0.14965
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.275862
| 0
| 0.275862
| 0.034483
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
db66779a2882ba639d36d1d562ab73945afc92fc
| 1,317
|
py
|
Python
|
examples/rrbot_p2p_low_energy.py
|
abcamiletto/urdf2optcontrol
|
39b3f761a4685cc7d50b48793b6b2906c89b1694
|
[
"MIT"
] | null | null | null |
examples/rrbot_p2p_low_energy.py
|
abcamiletto/urdf2optcontrol
|
39b3f761a4685cc7d50b48793b6b2906c89b1694
|
[
"MIT"
] | null | null | null |
examples/rrbot_p2p_low_energy.py
|
abcamiletto/urdf2optcontrol
|
39b3f761a4685cc7d50b48793b6b2906c89b1694
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from urdf2optcontrol import optimizer
from matplotlib import pyplot as plt
import pathlib
# URDF options
urdf_path = pathlib.Path(__file__).parent.joinpath('urdf', 'rrbot.urdf').absolute()
root = "link1"
end = "link3"
in_cond = [0] * 4
def my_cost_func(q, qd, qdd, ee_pos, u, t):
return u.T @ u
def my_constraint1(q, qd, qdd, ee_pos, u, t):
return [-30, -30], u, [30, 30]
def my_constraint2(q, qd, qdd, ee_pos, u, t):
return [-4, -4], qd, [4, 4]
my_constraints = [my_constraint1, my_constraint2]
def my_final_constraint1(q, qd, qdd, ee_pos, u):
return [3.14 / 2, 0], q, [3.14 / 2, 0]
def my_final_constraint2(q, qd, qdd, ee_pos, u):
return [0, 0], qd, [0, 0]
my_final_constraints = [my_final_constraint1, my_final_constraint2]
time_horizon = 2.0
steps = 40
# Load the urdf and calculate the differential equations
optimizer.load_robot(urdf_path, root, end)
# Loading the problem conditions
optimizer.load_problem(
my_cost_func,
steps,
in_cond,
time_horizon=time_horizon,
constraints=my_constraints,
final_constraints=my_final_constraints,
max_iter=500
)
# Solving the non linear problem
res = optimizer.solve()
print('u = ', res['u'][0])
print('q = ', res['q'][0])
# Print the results!
fig = optimizer.plot_result(show=True)
| 21.241935
| 83
| 0.688686
| 211
| 1,317
| 4.094787
| 0.369668
| 0.048611
| 0.034722
| 0.046296
| 0.158565
| 0.158565
| 0.158565
| 0.065972
| 0
| 0
| 0
| 0.045078
| 0.174639
| 1,317
| 61
| 84
| 21.590164
| 0.74977
| 0.129081
| 0
| 0
| 0
| 0
| 0.029798
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.085714
| 0.142857
| 0.371429
| 0.057143
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 1
|
db7042284fa2b7f2b0d11816372b28c2a0aa4dd3
| 1,755
|
py
|
Python
|
__dm__.py
|
AbhilashDatta/InstagramBot
|
21916fcfc621ae3185df8494b12aa35743c165f8
|
[
"MIT"
] | 12
|
2021-07-17T09:19:07.000Z
|
2022-01-18T18:49:43.000Z
|
__dm__.py
|
kumarankm/InstagramBot
|
db08f0ae12f22b76d31f844a9ff7f037622e534f
|
[
"MIT"
] | 1
|
2021-08-12T22:04:07.000Z
|
2021-08-13T14:14:10.000Z
|
__dm__.py
|
kumarankm/InstagramBot
|
db08f0ae12f22b76d31f844a9ff7f037622e534f
|
[
"MIT"
] | 8
|
2021-07-17T09:19:19.000Z
|
2021-09-13T19:15:04.000Z
|
from selenium import webdriver
from time import sleep
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
def Dm(driver,user,message):
''' This function is used to direct message a single user/group '''
driver.get('https://www.instagram.com/direct/inbox/')
send_message_button = WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH, '//*[@id="react-root"]/section/div/div[2]/div/div/div[2]/div/div[3]/div/button'))).click()
search_user = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, '/html/body/div[5]/div/div/div[2]/div[1]/div/div[2]/input')))
search_user.send_keys(user)
selector = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, '/html/body/div[5]/div/div/div[2]/div[2]/div/div/div[3]/button/span'))).click()
next_button = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, '/html/body/div[5]/div/div/div[1]/div/div[2]/div/button/div'))).click()
try:
text = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, '//*[@id="react-root"]/section/div/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div/div[2]/textarea')))
text.send_keys(message)
send = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, '//*[@id="react-root"]/section/div/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div/div[3]/button'))).click()
driver.get('https://www.instagram.com/direct/inbox/')
except:
print('No message sent to '+user)
driver.get('https://www.instagram.com/direct/inbox/')
| 56.612903
| 193
| 0.699145
| 277
| 1,755
| 4.33574
| 0.249097
| 0.129892
| 0.081599
| 0.09159
| 0.576187
| 0.560366
| 0.560366
| 0.545379
| 0.439634
| 0.439634
| 0
| 0.023121
| 0.112821
| 1,755
| 31
| 194
| 56.612903
| 0.748234
| 0.033618
| 0
| 0.142857
| 0
| 0.285714
| 0.346359
| 0.265838
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.285714
| 0
| 0.333333
| 0.047619
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
db7052a530fb46c3cf9935b4a0d738b78df5d9c6
| 11,060
|
py
|
Python
|
mashov.py
|
Yotamefr/BeitBiram
|
84bd6abddf6ac865b502e0692561ee48d510ef7c
|
[
"MIT"
] | 1
|
2020-12-31T07:32:28.000Z
|
2020-12-31T07:32:28.000Z
|
mashov.py
|
Yotamefr/BeitBiram
|
84bd6abddf6ac865b502e0692561ee48d510ef7c
|
[
"MIT"
] | null | null | null |
mashov.py
|
Yotamefr/BeitBiram
|
84bd6abddf6ac865b502e0692561ee48d510ef7c
|
[
"MIT"
] | null | null | null |
import requests
from datetime import datetime
import json
from extras import Day, Lesson
class PasswordError(Exception):
pass
class LoginFailed(Exception):
pass
class MashovAPI:
"""
MashovAPI
Originally made by Xiddoc. Project can be found here: https://github.com/Xiddoc/MashovAPI
Modifications were made by me, Yotamefr.
"""
def __init__(self, username, **kwargs):
"""
Parameters
------------
username -> Represents the username
------------
There are some weird stuff here. I might clean it in a while
Again, this code wasn't made by me, just modified by me
"""
self.url = "https://web.mashov.info/api/{}/"
self.session = requests.Session()
self.session.headers.update({'Accept': 'application/json, text/plain, */*',
'Referer': 'https://web.mashov.info/students/login',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.61 Safari/537.36',
'Content-Type': 'application/json'})
self.username = username
self.auth_ID = 0
self.user_ID = self.auth_ID
self.uid = self.auth_ID
self.uID = self.auth_ID
self.guid = self.auth_ID
self.guID = self.auth_ID
self.school_site = ""
self.moodle_site = ""
self.school_name = ""
self.last_name = ""
self.first_name = ""
self.class_name = ""
self.last_pass = ""
self.last_login = ""
self.school_years = []
self.csrf_token = ""
self.user_children = {}
# Kwargs password
if "password" in kwargs:
self.password = kwargs["password"]
else:
self.password = False
# Kwargs schoolData
if "schoolData" in kwargs:
self.school_data = kwargs["schoolData"]
else:
self.school_data = False
# Kwargs schoolID
if "schoolID" in kwargs:
self.school_ID = kwargs["schoolID"]
elif not self.school_data:
self.school_data = self.get_schools()
self.school_ID = self.get_school_ID_by_name(kwargs["schoolName"])
self.current_year = datetime.now().year + 1
def login(self):
"""
Parameters
------------
------------
"""
if not self.password:
raise PasswordError("No password entered.")
self.login_data = {'semel': self.school_ID,
'username': self.username,
'password': self.password,
'year': self.current_year}
self.ret_data = self.send("login", "post", self.login_data)
self.ret_text = json.loads(self.ret_data.text)
if not self.ret_data.status_code == 200:
self.is_logged_in = False
raise LoginFailed()
self.is_logged_in = True
self.auth_ID = self.ret_text["credential"]["userId"]
self.user_ID = self.auth_ID
self.uid = self.auth_ID
self.uID = self.auth_ID
self.guid = self.auth_ID
self.guID = self.auth_ID
self.school_site = self.ret_text["accessToken"]["schoolOptions"]["schoolSite"]
self.moodle_site = self.ret_text["accessToken"]["schoolOptions"]["moodleSite"]
self.school_name = self.ret_text["accessToken"]["schoolOptions"]["schoolName"]
self.last_name = self.ret_text["accessToken"]["children"][0]["familyName"]
self.first_name = self.ret_text["accessToken"]["children"][0]["privateName"]
self.class_name = f'{self.ret_text["accessToken"]["children"][0]["classNum"]}{self.ret_text["accessToken"]["children"][0]["classCode"]}'
self.last_pass = self.ret_text["accessToken"]["lastPassSet"]
self.last_login = self.ret_text["accessToken"]["lastLogin"]
self.school_years = self.ret_text["accessToken"]["userSchoolYears"]
self.csrf_token = self.ret_data.cookies["Csrf-Token"]
self.session.headers.update({"x-csrf-token": self.csrf_token})
self.user_children = self.ret_text["accessToken"]["children"]
del self.username
del self.password
@property
def timetable(self):
return self.form_return(self.send(f"students/{self.user_ID}/timetable", "get"))
def update_school_data(self):
"""
Parameters
------------
------------
"""
self.school_data = self.form_return(self.send("schools", "get"))
def get_schools(self):
"""
Parameters
------------
------------
"""
self.update_school_data()
return self.school_data()
def get_school_ID_by_name(self, school):
"""
Parameters
------------
school -> Represents the school name
------------
"""
if self.school_data:
schoolData = self.school_data
else:
schoolData = self.update_school_data()
for schools in schoolData:
if schools["name"].find(school) == 0:
return schools["semel"]
def clear_session(self):
"""
Parameters
------------
------------
"""
return self.form_return(self.send("clearSession", "get"))
def get_special_lessons(self):
"""
Parameters
------------
------------
"""
return self.get_private_lessons()
def get_private_lessons(self):
"""
Parameters
------------
------------
"""
return self.form_return(self.send("students/{}/specialHoursLessons".format(self.auth_ID), "get"))
def get_private_lesson_types(self):
"""
Parameters
------------
------------
"""
return self.form_return(self.send("lessonsTypes", "get"))
@property
def classes(self):
return self.groups
@property
def groups(self):
return self.form_return(self.send("students/{}/groups".format(self.auth_ID), "get"))
@property
def teachers(self):
recipents = self.recipents
teachers = []
for i in recipents:
if "הורים/" not in i["displayName"]:
teachers.append(i)
return teachers
@property
def recipents(self):
return self.form_return(self.send("mail/recipients", "get"))
def form_return(self, response):
"""
Parameters
------------
response -> Represents the response from the website
------------
"""
if response.status_code != 200:
return False
else:
try:
return json.loads(response.text)
except:
return response.text
def send(self, url, method="get", params={}, files={}):
"""
Parameters
------------
url -> Represents the url to go to
method -> Represents the method to use. Can be either `get` or `post`
params -> Represents the parameters to send to the website. Only use it on `post`
files -> Pretty much the same as for the params
------------
"""
return getattr(self.session, str(method).strip().lower())(self.url.format(url), data=json.dumps(params),
files=files)
def __str__(self):
return json.dumps({
"MashovAPI": {
"url": self.url,
"sessionH": dict(self.session.headers),
"sessionC": self.session.cookies.get_dict(),
"username": self.username,
"password": self.password,
"schoolData": self.school_data,
"schoolID": self.school_ID,
"currentYear": self.current_year,
"loginData": self.login_data,
"isLoggedIn": self.is_logged_in,
"authID": self.auth_ID,
"userID": self.user_ID,
"uid": self.uid,
"uID": self.uID,
"guid": self.guid,
"guID": self.guID,
"schoolSite": self.school_site,
"moodleSite": self.moodle_site,
"schoolName": self.school_name,
"lastName": self.last_name,
"firstName": self.first_name,
"className": self.class_name,
"lastPass": self.last_pass,
"lastLogin": self.last_login,
"schoolYears": self.school_years,
"csrfToken": self.csrf_token,
"userChildren": self.user_children
}})
def get_day(self, day_num: int):
"""
Parameters
------------
day -> Represents the day number
------------
"""
day = []
timetable = []
for i in self.timetable:
if i["timeTable"]["day"] == day_num:
timetable.append(i)
for i in range(len(timetable)):
for j in range(i+1, len(timetable), 1):
if timetable[i]["timeTable"]["lesson"] > timetable[j]["timeTable"]["lesson"]:
temp = timetable[i]
timetable[i] = timetable[j]
timetable[j] = temp
for i in timetable:
if not "קפ'" in i["groupDetails"]["subjectName"]: # We don't need that. It's useless.
if len(day) > 0:
while i["timeTable"]["lesson"] > day[-1].number + 1:
day.append(Lesson(
lesson="",
lesson_number=day[-1].number + 1,
lesson_time="",
classroom="",
teacher="",
)
)
i["groupDetails"]["groupTeachers"][0]["teacherName"] = i["groupDetails"]["groupTeachers"][0]["teacherName"].replace("-", " ")
day.append(Lesson(
lesson=i["groupDetails"]["subjectName"],
lesson_number=i["timeTable"]["lesson"],
lesson_time="",
classroom=i["timeTable"]["roomNum"],
teacher=i["groupDetails"]["groupTeachers"][0]["teacherName"]
)
)
return Day(day_num, day)
def get_today(self):
"""
Parameters
------------
------------
"""
today = datetime.now().weekday()
today += 2
if today > 7:
today -= 7
return self.get_day(today)
| 34.88959
| 168
| 0.499458
| 1,082
| 11,060
| 4.966728
| 0.224584
| 0.042799
| 0.027912
| 0.028656
| 0.208969
| 0.164496
| 0.097507
| 0.06364
| 0.040194
| 0.040194
| 0
| 0.007595
| 0.357143
| 11,060
| 316
| 169
| 35
| 0.748242
| 0.110579
| 0
| 0.122549
| 0
| 0.009804
| 0.155264
| 0.019408
| 0
| 0
| 0
| 0
| 0
| 1
| 0.093137
| false
| 0.068627
| 0.019608
| 0.02451
| 0.215686
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
db7edea364132ddeeca859f58229a42b6ea2f0ae
| 534
|
py
|
Python
|
config/settings/local.py
|
vyshakTs/STORE_MANAGEMENT_SYSTEM
|
b6b82a02c0b512083c35a8656e191436552569a9
|
[
"CC0-1.0"
] | null | null | null |
config/settings/local.py
|
vyshakTs/STORE_MANAGEMENT_SYSTEM
|
b6b82a02c0b512083c35a8656e191436552569a9
|
[
"CC0-1.0"
] | null | null | null |
config/settings/local.py
|
vyshakTs/STORE_MANAGEMENT_SYSTEM
|
b6b82a02c0b512083c35a8656e191436552569a9
|
[
"CC0-1.0"
] | null | null | null |
from .base import *
DEBUG = True
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'SMS',
'USER': 'postgres',
'PASSWORD': 'password',
'HOST': 'localhost',
'PORT': '',
}
}
INSTALLED_APPS += [
'debug_toolbar.apps.DebugToolbarConfig',
'django_extensions',
]
ALLOWED_HOSTS += ['.herokuapp.com']
# Loads SECRET_KEY from .env file
# SECRET_KEY = get_env_variable('SECRET_KEY')
| 19.777778
| 64
| 0.617978
| 54
| 534
| 5.925926
| 0.777778
| 0.084375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.220974
| 534
| 26
| 65
| 20.538462
| 0.769231
| 0.140449
| 0
| 0
| 0
| 0
| 0.45614
| 0.245614
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.055556
| 0.055556
| 0
| 0.055556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
db8615ff95bbb42756435769fd0cc3b6f45c202c
| 503
|
py
|
Python
|
day-2/part_b.py
|
yuetsin/AoC
|
a7c5aea245ee6e77312352907fc4d1ac8eac2d3a
|
[
"CC0-1.0"
] | null | null | null |
day-2/part_b.py
|
yuetsin/AoC
|
a7c5aea245ee6e77312352907fc4d1ac8eac2d3a
|
[
"CC0-1.0"
] | null | null | null |
day-2/part_b.py
|
yuetsin/AoC
|
a7c5aea245ee6e77312352907fc4d1ac8eac2d3a
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python3
import re
def get_input() -> list:
with open('./input', 'r') as f:
return [v for v in [v.strip() for v in f.readlines()] if v]
lines = get_input()
count = 0
for line in lines:
lower, upper, char, password = re.split(r'-|: | ', line)
lower, upper = int(lower) - 1, int(upper) - 1
try:
if (password[lower] == char) ^ (password[upper] == char):
count += 1
except:
# don't care about boundaries
pass
print(count)
| 20.12
| 67
| 0.554672
| 74
| 503
| 3.743243
| 0.567568
| 0.057762
| 0.043321
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013928
| 0.286282
| 503
| 24
| 68
| 20.958333
| 0.75766
| 0.097416
| 0
| 0
| 0
| 0
| 0.030973
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0.2
| 0.066667
| 0
| 0.2
| 0.066667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
db8707b6679e39765f15056eb4cf61c517a7c762
| 9,435
|
py
|
Python
|
hcloud/servers/domain.py
|
usmannasir/hcloud-python
|
2a90551fb1c4d9d8a6aea5d8b6601a7c1360494d
|
[
"MIT"
] | 1
|
2019-10-23T01:00:08.000Z
|
2019-10-23T01:00:08.000Z
|
hcloud/servers/domain.py
|
usmannasir/hcloud-python
|
2a90551fb1c4d9d8a6aea5d8b6601a7c1360494d
|
[
"MIT"
] | null | null | null |
hcloud/servers/domain.py
|
usmannasir/hcloud-python
|
2a90551fb1c4d9d8a6aea5d8b6601a7c1360494d
|
[
"MIT"
] | 1
|
2019-06-19T17:53:10.000Z
|
2019-06-19T17:53:10.000Z
|
# -*- coding: utf-8 -*-
from hcloud.core.domain import BaseDomain
from hcloud.helpers.descriptors import ISODateTime
class Server(BaseDomain):
"""Server Domain
:param id: int
ID of the server
:param name: str
Name of the server (must be unique per project and a valid hostname as per RFC 1123)
:param status: str
Status of the server Choices: `running`, `initializing`, `starting`, `stopping`, `off`, `deleting`, `migrating`, `rebuilding`, `unknown`
:param created: datetime
Point in time when the server was created
:param public_net: :class:`PublicNetwork <hcloud.servers.domain.PublicNetwork>`
Public network information.
:param server_type: :class:`BoundServerType <hcloud.server_types.client.BoundServerType>`
:param datacenter: :class:`BoundDatacenter <hcloud.datacenters.client.BoundDatacenter>`
:param image: :class:`BoundImage <hcloud.images.client.BoundImage>`, None
:param iso: :class:`BoundIso <hcloud.isos.client.BoundIso>`, None
:param rescue_enabled: bool
True if rescue mode is enabled: Server will then boot into rescue system on next reboot.
:param locked: bool
True if server has been locked and is not available to user.
:param backup_window: str, None
Time window (UTC) in which the backup will run, or None if the backups are not enabled
:param outgoing_traffic: int, None
Outbound Traffic for the current billing period in bytes
:param ingoing_traffic: int, None
Inbound Traffic for the current billing period in bytes
:param included_traffic: int
Free Traffic for the current billing period in bytes
:param protection: dict
Protection configuration for the server
:param labels: dict
User-defined labels (key-value pairs)
:param volumes: List[:class:`BoundVolume <hcloud.volumes.client.BoundVolume>`]
Volumes assigned to this server.
"""
STATUS_RUNNING = "running"
"""Server Status running"""
STATUS_INIT = "initializing"
"""Server Status initializing"""
STATUS_STARTING = "starting"
"""Server Status starting"""
STATUS_STOPPING = "stopping"
"""Server Status stopping"""
STATUS_OFF = "off"
"""Server Status off"""
STATUS_DELETING = "deleting"
"""Server Status deleting"""
STATUS_MIGRATING = "migrating"
"""Server Status migrating"""
STATUS_REBUILDING = "rebuilding"
"""Server Status rebuilding"""
STATUS_UNKNOWN = "unknown"
"""Server Status unknown"""
__slots__ = (
"id",
"name",
"status",
"public_net",
"server_type",
"datacenter",
"image",
"iso",
"rescue_enabled",
"locked",
"backup_window",
"outgoing_traffic",
"ingoing_traffic",
"included_traffic",
"protection",
"labels",
"volumes",
)
created = ISODateTime()
supported_fields = ("created",)
def __init__(
self,
id,
name=None,
status=None,
created=None,
public_net=None,
server_type=None,
datacenter=None,
image=None,
iso=None,
rescue_enabled=None,
locked=None,
backup_window=None,
outgoing_traffic=None,
ingoing_traffic=None,
included_traffic=None,
protection=None,
labels=None,
volumes=None,
):
self.id = id
self.name = name
self.status = status
self.created = created
self.public_net = public_net
self.server_type = server_type
self.datacenter = datacenter
self.image = image
self.iso = iso
self.rescue_enabled = rescue_enabled
self.locked = locked
self.backup_window = backup_window
self.outgoing_traffic = outgoing_traffic
self.ingoing_traffic = ingoing_traffic
self.included_traffic = included_traffic
self.protection = protection
self.labels = labels
self.volumes = volumes
class CreateServerResponse(BaseDomain):
"""Create Server Response Domain
:param action: :class:`BoundServer <hcloud.servers.client.BoundServer>`
The created server
:param action: :class:`BoundAction <hcloud.actions.client.BoundAction>`
Shows the progress of the server creation
:param next_actions: List[:class:`BoundAction <hcloud.actions.client.BoundAction>`]
Additional actions like a `start_server` action after the server creation
:param root_password: str, None
The root password of the server if no SSH-Key was given on server creation
"""
__slots__ = (
"server",
"action",
"next_actions",
"root_password"
)
def __init__(
self,
server, # type: BoundServer
action, # type: BoundAction
next_actions, # type: List[Action]
root_password # type: str
):
self.server = server
self.action = action
self.next_actions = next_actions
self.root_password = root_password
class ResetPasswordResponse(BaseDomain):
"""Reset Password Response Domain
:param action: :class:`BoundAction <hcloud.actions.client.BoundAction>`
Shows the progress of the server passwort reset action
:param root_password: str
The root password of the server
"""
__slots__ = (
"action",
"root_password"
)
def __init__(
self,
action, # type: BoundAction
root_password # type: str
):
self.action = action
self.root_password = root_password
class EnableRescueResponse(BaseDomain):
"""Enable Rescue Response Domain
:param action: :class:`BoundAction <hcloud.actions.client.BoundAction>`
Shows the progress of the server enable rescue action
:param root_password: str
The root password of the server in the rescue mode
"""
__slots__ = (
"action",
"root_password"
)
def __init__(
self,
action, # type: BoundAction
root_password # type: str
):
self.action = action
self.root_password = root_password
class RequestConsoleResponse(BaseDomain):
"""Request Console Response Domain
:param action: :class:`BoundAction <hcloud.actions.client.BoundAction>`
Shows the progress of the server request console action
:param wss_url: str
URL of websocket proxy to use. This includes a token which is valid for a limited time only.
:param password: str
VNC password to use for this connection. This password only works in combination with a wss_url with valid token.
"""
__slots__ = (
"action",
"wss_url",
"password"
)
def __init__(
self,
action, # type: BoundAction
wss_url, # type: str
password, # type: str
):
self.action = action
self.wss_url = wss_url
self.password = password
class PublicNetwork(BaseDomain):
"""Public Network Domain
:param ipv4: :class:`IPv4Address <hcloud.servers.domain.IPv4Address>`
:param ipv6: :class:`IPv6Network <hcloud.servers.domain.IPv6Network>`
:param floating_ips: List[:class:`BoundFloatingIP <hcloud.floating_ips.client.BoundFloatingIP>`]
"""
__slots__ = (
"ipv4",
"ipv6",
"floating_ips"
)
def __init__(self,
ipv4, # type: IPv4Address
ipv6, # type: IPv6Network
floating_ips, # type: List[BoundFloatingIP]
):
self.ipv4 = ipv4
self.ipv6 = ipv6
self.floating_ips = floating_ips
class IPv4Address(BaseDomain):
"""IPv4 Address Domain
:param ip: str
The IPv4 Address
:param blocked: bool
Determine if the IP is blocked
:param dns_ptr: str
DNS PTR for the ip
"""
__slots__ = (
"ip",
"blocked",
"dns_ptr"
)
def __init__(self,
ip, # type: str
blocked, # type: bool
dns_ptr, # type: str
):
self.ip = ip
self.blocked = blocked
self.dns_ptr = dns_ptr
class IPv6Network(BaseDomain):
"""IPv6 Network Domain
:param ip: str
The IPv6 Network as CIDR Notation
:param blocked: bool
Determine if the Network is blocked
:param dns_ptr: dict
DNS PTR Records for the Network as Dict
:param network: str
The network without the network mask
:param network_mask: str
The network mask
"""
__slots__ = (
"ip",
"blocked",
"dns_ptr",
"network",
"network_mask"
)
def __init__(self,
ip, # type: str
blocked, # type: bool
dns_ptr, # type: list
):
self.ip = ip
self.blocked = blocked
self.dns_ptr = dns_ptr
ip_parts = self.ip.split("/") # 2001:db8::/64 to 2001:db8:: and 64
self.network = ip_parts[0]
self.network_mask = ip_parts[1]
| 30.337621
| 147
| 0.598728
| 1,008
| 9,435
| 5.446429
| 0.196429
| 0.039344
| 0.020036
| 0.026412
| 0.267942
| 0.232605
| 0.20255
| 0.188889
| 0.188889
| 0.164299
| 0
| 0.006655
| 0.315209
| 9,435
| 310
| 148
| 30.435484
| 0.843058
| 0.449497
| 0
| 0.375
| 0
| 0
| 0.088171
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0.073864
| 0.011364
| 0
| 0.210227
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
db8a89f5042414f5dbf4f47067a5e2131c5f76b8
| 1,881
|
py
|
Python
|
dlk/core/schedulers/__init__.py
|
cstsunfu/dlkit
|
69e0efd372fa5c0ae5313124d0ba1ef55b535196
|
[
"Apache-2.0"
] | null | null | null |
dlk/core/schedulers/__init__.py
|
cstsunfu/dlkit
|
69e0efd372fa5c0ae5313124d0ba1ef55b535196
|
[
"Apache-2.0"
] | null | null | null |
dlk/core/schedulers/__init__.py
|
cstsunfu/dlkit
|
69e0efd372fa5c0ae5313124d0ba1ef55b535196
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 cstsunfu. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""schedulers"""
import importlib
import os
from dlk.utils.register import Register
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
import math
scheduler_config_register = Register("Schedule config register.")
scheduler_register = Register("Schedule register.")
class BaseScheduler(object):
"""interface for Schedule"""
def get_scheduler(self)->LambdaLR:
"""return the initialized scheduler
Returns:
Schedule
"""
raise NotImplementedError
def __call__(self):
"""the same as self.get_scheduler()
"""
return self.get_scheduler()
def import_schedulers(schedulers_dir, namespace):
for file in os.listdir(schedulers_dir):
path = os.path.join(schedulers_dir, file)
if (
not file.startswith("_")
and not file.startswith(".")
and (file.endswith(".py") or os.path.isdir(path))
):
scheduler_name = file[: file.find(".py")] if file.endswith(".py") else file
importlib.import_module(namespace + "." + scheduler_name)
# automatically import any Python files in the schedulers directory
schedulers_dir = os.path.dirname(__file__)
import_schedulers(schedulers_dir, "dlk.core.schedulers")
| 30.836066
| 87
| 0.701223
| 238
| 1,881
| 5.432773
| 0.487395
| 0.046404
| 0.020108
| 0.024749
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005355
| 0.205742
| 1,881
| 60
| 88
| 31.35
| 0.860107
| 0.407762
| 0
| 0
| 0
| 0
| 0.070209
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12
| false
| 0
| 0.36
| 0
| 0.56
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
db991c0b9d90667e802fd9ff394fd81d65368331
| 624
|
py
|
Python
|
ex38.py
|
YunMeMeThaw/python_exercises
|
151d5d3695d578059611ac09c94b3677442197d7
|
[
"MIT"
] | null | null | null |
ex38.py
|
YunMeMeThaw/python_exercises
|
151d5d3695d578059611ac09c94b3677442197d7
|
[
"MIT"
] | null | null | null |
ex38.py
|
YunMeMeThaw/python_exercises
|
151d5d3695d578059611ac09c94b3677442197d7
|
[
"MIT"
] | null | null | null |
ten_things = "Apples Oranges cows Telephone Light Sugar"
print ("Wait there are not 10 things in that list. Let's fix")
stuff = ten_things.split(' ')
more_stuff = {"Day", "Night", "Song", "Firebee",
"Corn", "Banana", "Girl", "Boy"}
while len(stuff) !=10:
next_one = more_stuff.pop()
print("Adding: ", next_one)
stuff.append(next_one)
print (f"There are {len(stuff)} items n ow.")
print ("There we go : ", stuff)
print ("Let's do some things with stuff.")
print (stuff[1])
print (stuff[-1]) # whoa! cool!
print (stuff.pop())
print (' '.join(stuff)) # what? cool !
print ('#'.join(stuff[3:5])) #super stealler!
| 27.130435
| 62
| 0.647436
| 97
| 624
| 4.092784
| 0.57732
| 0.052897
| 0.065491
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015267
| 0.160256
| 624
| 22
| 63
| 28.363636
| 0.742366
| 0.0625
| 0
| 0
| 0
| 0
| 0.378657
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.588235
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 1
|
db99d0c184b26e85aa45a341b38434f288a19023
| 700
|
py
|
Python
|
var/spack/repos/builtin/packages/diffmark/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 11
|
2015-10-04T02:17:46.000Z
|
2018-02-07T18:23:00.000Z
|
var/spack/repos/builtin/packages/diffmark/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 22
|
2017-08-01T22:45:10.000Z
|
2022-03-10T07:46:31.000Z
|
var/spack/repos/builtin/packages/diffmark/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 4
|
2016-06-10T17:57:39.000Z
|
2018-09-11T04:59:38.000Z
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Diffmark(AutotoolsPackage):
"""Diffmark is a DSL for transforming one string to another."""
homepage = "https://github.com/vbar/diffmark"
git = "https://github.com/vbar/diffmark.git"
version('master', branch='master')
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
depends_on('m4', type='build')
depends_on('pkgconfig', type='build')
depends_on('libxml2')
| 30.434783
| 73
| 0.688571
| 90
| 700
| 5.288889
| 0.666667
| 0.113445
| 0.168067
| 0.189076
| 0.121849
| 0.121849
| 0
| 0
| 0
| 0
| 0
| 0.02069
| 0.171429
| 700
| 22
| 74
| 31.818182
| 0.8
| 0.352857
| 0
| 0
| 0
| 0
| 0.328829
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.090909
| 0
| 0.363636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
db9d8c67bcfd3a7c9d253f50f4a6bf8badfcdb9c
| 592
|
py
|
Python
|
betterbib/__init__.py
|
tbabej/betterbib
|
80a3c9040232d9988f9a1e4c40724b40b9b9ed85
|
[
"MIT"
] | null | null | null |
betterbib/__init__.py
|
tbabej/betterbib
|
80a3c9040232d9988f9a1e4c40724b40b9b9ed85
|
[
"MIT"
] | null | null | null |
betterbib/__init__.py
|
tbabej/betterbib
|
80a3c9040232d9988f9a1e4c40724b40b9b9ed85
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
from __future__ import print_function
from betterbib.__about__ import (
__version__,
__author__,
__author_email__,
__website__,
)
from betterbib.tools import (
create_dict,
decode,
pybtex_to_dict,
pybtex_to_bibtex_string,
write,
update,
JournalNameUpdater,
translate_month
)
from betterbib.crossref import Crossref
from betterbib.dblp import Dblp
try:
import pipdate
except ImportError:
pass
else:
if pipdate.needs_checking(__name__):
print(pipdate.check(__name__, __version__), end='')
| 18.5
| 59
| 0.701014
| 64
| 592
| 5.765625
| 0.640625
| 0.140921
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002169
| 0.221284
| 592
| 31
| 60
| 19.096774
| 0.798265
| 0.035473
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.038462
| 0.269231
| 0
| 0.269231
| 0.076923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
dbad2da50018b20b9e8cf4be1668cfeef2d4c6cb
| 729
|
py
|
Python
|
tests/test_dump.py
|
flaeppe/astunparse
|
754ec7d113fa273625ccc7b6c5d65aa7700ab8a9
|
[
"PSF-2.0"
] | 189
|
2016-03-15T06:48:48.000Z
|
2022-03-12T09:34:10.000Z
|
tests/test_dump.py
|
flaeppe/astunparse
|
754ec7d113fa273625ccc7b6c5d65aa7700ab8a9
|
[
"PSF-2.0"
] | 50
|
2015-09-14T16:22:00.000Z
|
2022-02-24T05:36:57.000Z
|
tests/test_dump.py
|
flaeppe/astunparse
|
754ec7d113fa273625ccc7b6c5d65aa7700ab8a9
|
[
"PSF-2.0"
] | 52
|
2015-04-29T10:52:33.000Z
|
2022-03-03T19:59:54.000Z
|
import ast
import re
import sys
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
import astunparse
from tests.common import AstunparseCommonTestCase
class DumpTestCase(AstunparseCommonTestCase, unittest.TestCase):
def assertASTEqual(self, dump1, dump2):
# undo the pretty-printing
dump1 = re.sub(r"(?<=[\(\[])\n\s+", "", dump1)
dump1 = re.sub(r"\n\s+", " ", dump1)
self.assertEqual(dump1, dump2)
def check_roundtrip(self, code1, filename="internal", mode="exec"):
ast_ = compile(str(code1), filename, mode, ast.PyCF_ONLY_AST)
dump1 = astunparse.dump(ast_)
dump2 = ast.dump(ast_)
self.assertASTEqual(dump1, dump2)
| 29.16
| 71
| 0.663923
| 89
| 729
| 5.359551
| 0.52809
| 0.062893
| 0.041929
| 0.046122
| 0.075472
| 0.075472
| 0.075472
| 0
| 0
| 0
| 0
| 0.029412
| 0.207133
| 729
| 24
| 72
| 30.375
| 0.795848
| 0.032922
| 0
| 0
| 0
| 0
| 0.048364
| 0
| 0
| 0
| 0
| 0
| 0.157895
| 1
| 0.105263
| false
| 0
| 0.368421
| 0
| 0.526316
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
dbb81ecf1571a74c986e0ef5e76802273692f79e
| 1,106
|
py
|
Python
|
data_interrogator/admin/views.py
|
s-i-l-k-e/django-data-interrogator
|
0284168b81aaa31a8df84f3ea52166eded8a4362
|
[
"MIT"
] | null | null | null |
data_interrogator/admin/views.py
|
s-i-l-k-e/django-data-interrogator
|
0284168b81aaa31a8df84f3ea52166eded8a4362
|
[
"MIT"
] | null | null | null |
data_interrogator/admin/views.py
|
s-i-l-k-e/django-data-interrogator
|
0284168b81aaa31a8df84f3ea52166eded8a4362
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.decorators import user_passes_test
from django.utils.decorators import method_decorator
from data_interrogator.admin.forms import AdminInvestigationForm, AdminPivotTableForm
from data_interrogator.interrogators import Allowable
from data_interrogator.views import InterrogationView, InterrogationAutocompleteUrls, PivotTableView, \
InterrogationAutoComplete
class AdminInterrogationRoom(InterrogationView):
template_name = 'admin/analytics/analytics.html'
form_class = AdminInvestigationForm
report_models = Allowable.ALL_MODELS
allowed = Allowable.ALL_APPS
excluded = []
@method_decorator(user_passes_test(lambda u: u.is_superuser))
def get(self, request):
return super(AdminInterrogationRoom,self).get(request)
class AdminInterrogationAutocompleteUrls(InterrogationAutocompleteUrls):
interrogator_view_class = AdminInterrogationRoom
interrogator_autocomplete_class = InterrogationAutoComplete
class AdminPivotTableView(PivotTableView):
form_class = AdminPivotTableForm
template_name = 'admin/analytics/pivot.html'
| 35.677419
| 103
| 0.824593
| 104
| 1,106
| 8.567308
| 0.509615
| 0.026936
| 0.06734
| 0.058361
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.118445
| 1,106
| 30
| 104
| 36.866667
| 0.913846
| 0
| 0
| 0
| 0
| 0
| 0.050633
| 0.050633
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0.095238
| 0.238095
| 0.047619
| 0.904762
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
dbb832b244c092d5e626be322221a0dd99c61a02
| 327
|
py
|
Python
|
configs/pspnet/pspnet_r18-d8_512x512_80k_loveda.py
|
heytanay/mmsegmentation
|
7ddd2fe2ecff9c95999bd00ec05cc37eafb558f8
|
[
"Apache-2.0"
] | 11
|
2022-02-04T01:09:45.000Z
|
2022-03-08T05:49:16.000Z
|
configs/pspnet/pspnet_r18-d8_512x512_80k_loveda.py
|
heytanay/mmsegmentation
|
7ddd2fe2ecff9c95999bd00ec05cc37eafb558f8
|
[
"Apache-2.0"
] | 2
|
2022-02-25T03:07:23.000Z
|
2022-03-08T12:54:05.000Z
|
configs/pspnet/pspnet_r18-d8_512x512_80k_loveda.py
|
heytanay/mmsegmentation
|
7ddd2fe2ecff9c95999bd00ec05cc37eafb558f8
|
[
"Apache-2.0"
] | 2
|
2021-04-23T05:32:00.000Z
|
2021-11-11T02:45:08.000Z
|
_base_ = './pspnet_r50-d8_512x512_80k_loveda.py'
model = dict(
backbone=dict(
depth=18,
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnet18_v1c')),
decode_head=dict(
in_channels=512,
channels=128,
),
auxiliary_head=dict(in_channels=256, channels=64))
| 27.25
| 72
| 0.629969
| 40
| 327
| 4.85
| 0.775
| 0.082474
| 0.103093
| 0.185567
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108
| 0.235474
| 327
| 11
| 73
| 29.727273
| 0.668
| 0
| 0
| 0
| 0
| 0
| 0.220183
| 0.189602
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
dbbca7079e41d333542d3d27bb46afa6aecbe834
| 1,580
|
py
|
Python
|
test/test_catalog_manager.py
|
weknowtraining/athena-glue-service-logs
|
b7cf77408486f2bfa941b8609617ed47aa3e2d02
|
[
"Apache-2.0"
] | 133
|
2018-09-17T12:43:14.000Z
|
2022-03-15T20:03:12.000Z
|
test/test_catalog_manager.py
|
weknowtraining/athena-glue-service-logs
|
b7cf77408486f2bfa941b8609617ed47aa3e2d02
|
[
"Apache-2.0"
] | 22
|
2018-11-19T21:51:04.000Z
|
2022-03-08T12:13:19.000Z
|
test/test_catalog_manager.py
|
weknowtraining/athena-glue-service-logs
|
b7cf77408486f2bfa941b8609617ed47aa3e2d02
|
[
"Apache-2.0"
] | 46
|
2018-10-04T04:27:26.000Z
|
2022-03-01T03:28:38.000Z
|
# pylint: skip-file
from athena_glue_service_logs.catalog_manager import BaseCatalogManager
def test_class_init(mocker):
mocker.patch.multiple(BaseCatalogManager, __abstractmethods__=set())
base_catalog = BaseCatalogManager('us-west-2', 'dbname', 'tablename', 's3://somewhere')
assert base_catalog.database_name == 'dbname'
assert base_catalog.s3_location == 's3://somewhere'
assert base_catalog.table_name == 'tablename'
def test_init_with_partitions(mocker):
mocker.patch.multiple(BaseCatalogManager, __abstractmethods__=set())
mocker.patch('athena_glue_service_logs.catalog_manager.BaseCatalogManager.does_database_exist', return_value=True)
mocker.patch('athena_glue_service_logs.catalog_manager.BaseCatalogManager.create_database')
mocker.patch('athena_glue_service_logs.catalog_manager.BaseCatalogManager.create_table')
mocker.patch('athena_glue_service_logs.catalog_manager.BaseCatalogManager.create_partitions')
base_catalog = BaseCatalogManager('us-west-2', 'dbname', 'tablename', 's3://somewhere')
base_catalog.initialize_with_partitions(['a', 'b', 'c'])
assert BaseCatalogManager.create_database.call_count == 0
BaseCatalogManager.create_table.assert_called_once()
BaseCatalogManager.create_partitions.assert_called_once_with(partition_list=['a', 'b', 'c'])
mocker.patch('athena_glue_service_logs.catalog_manager.BaseCatalogManager.does_database_exist', return_value=False)
base_catalog.initialize_with_partitions(['a', 'b', 'c'])
assert BaseCatalogManager.create_database.call_count == 1
| 50.967742
| 119
| 0.79557
| 185
| 1,580
| 6.405405
| 0.297297
| 0.064979
| 0.086076
| 0.106329
| 0.751055
| 0.71308
| 0.683544
| 0.580591
| 0.580591
| 0.580591
| 0
| 0.005571
| 0.091139
| 1,580
| 30
| 120
| 52.666667
| 0.819638
| 0.010759
| 0
| 0.285714
| 0
| 0
| 0.317745
| 0.244715
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.095238
| false
| 0
| 0.047619
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
dbc290ad28df369cc2a5189c66e670824982c619
| 28,719
|
py
|
Python
|
compass/core/_scrapers/member.py
|
MrNoScript/compass-interface-core
|
8c945ef36f7bee396bd5a744404eaa88d280a845
|
[
"MIT"
] | null | null | null |
compass/core/_scrapers/member.py
|
MrNoScript/compass-interface-core
|
8c945ef36f7bee396bd5a744404eaa88d280a845
|
[
"MIT"
] | null | null | null |
compass/core/_scrapers/member.py
|
MrNoScript/compass-interface-core
|
8c945ef36f7bee396bd5a744404eaa88d280a845
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
import re
import time
from typing import get_args, Literal, TYPE_CHECKING, Union
from lxml import html
from compass.core.interface_base import InterfaceBase
from compass.core.logger import logger
from compass.core.schemas import member as schema
from compass.core.settings import Settings
from compass.core.utility import cast
from compass.core.utility import maybe_int
from compass.core.utility import parse
if TYPE_CHECKING:
import requests
MEMBER_PROFILE_TAB_TYPES = Literal[
"Personal", "Roles", "Permits", "Training", "Awards", "Emergency", "Comms", "Visibility", "Disclosures"
]
class PeopleScraper(InterfaceBase):
"""Class directly interfaces with Compass operations to extract member data.
Compass's MemberProfile.aspx has 13 tabs:
1. Personal Details (No Key)
2. Your Children (Page=CHILD)
3. Roles (Page=ROLES)
4. Permits (Page=PERMITS)
5. Training (Page=TRAINING)
6. Awards (Page=AWARDS)
7. Youth Badges/Awards (Page=BADGES)
8. Event Invitations (Page=EVENTS)
9. Emergency Details (Page=EMERGENCY)
10. Communications (Page=COMMS)
11. Visibility (Page=VISIBILITY)
12. Disclosures (Page=DISCLOSURES)
13. Parents/Guardians (Page=PARENT)
Of these, tabs 2, 7, 8, 13 are disabled functionality.
Tab 11 (Visibility) is only shown on the members' own profile.
For member-adjdacent operations there are additional endpoints:
- /Popups/Profile/AssignNewRole.aspx
- /Popups/Maint/NewPermit.aspx
- /Popups/Profile/EditProfile.aspx
Currently we only use one of these endpoints (AssignNewRole), as all
other data we need can be found from the MemberProfile tabs.
All functions in the class output native types.
"""
def __init__(self, session: requests.Session, validate: bool = False):
"""Constructor for PeopleScraper.
takes an initialised Session object from Logon
"""
super().__init__(session)
self.validate = validate
def _get_member_profile_tab(self, membership_num: int, profile_tab: MEMBER_PROFILE_TAB_TYPES) -> bytes:
"""Returns data from a given tab in MemberProfile for a given member.
Args:
membership_num: Membership Number to use
profile_tab: Tab requested from Compass
Returns:
A dict with content and encoding, e.g.:
{"content": b"...", "encoding": "utf-8"}
Both keys will always be present.
Raises:
ValueError: The given profile_tab value is illegal
Todo:
Other possible exceptions? i.e. from Requests
"""
profile_tab = profile_tab.upper()
tabs = tuple(tab.upper() for tab in get_args(MEMBER_PROFILE_TAB_TYPES))
url = f"{Settings.base_url}/MemberProfile.aspx?CN={membership_num}"
if profile_tab == "PERSONAL": # Personal tab has no key so is a special case
response = self._get(url)
elif profile_tab in tabs:
url += f"&Page={profile_tab}&TAB"
response = self._get(url)
else:
raise ValueError(f"Specified member profile tab {profile_tab} is invalid. Allowed values are {tabs}")
return response.content
def get_personal_tab(self, membership_num: int) -> Union[schema.MemberDetails, dict]:
"""Returns data from Personal Details tab for a given member.
Args:
membership_num: Membership Number to use
Returns:
A dict mapping keys to the corresponding data from the personal
data tab.
For example:
{'membership_number': ...,
'forenames': '...',
'surname': '...',
'main_phone': '...',
'main_email': '...',
'name': '...',
'known_as': '...',
'join_date': datetime.datetime(...),
'sex': '...',
'birth_date': datetime.datetime(...),
'nationality': '...',
'ethnicity': '...',
'religion': '...',
'occupation': '...',
'address': '...'}
Keys will be present only if valid data could be extracted and
parsed from Compass.
Raises:
PermissionError:
Access to the member is not given by the current authentication
Todo:
Other possible exceptions? i.e. from Requests
"""
response = self._get_member_profile_tab(membership_num, "Personal")
tree = html.fromstring(response)
if tree.forms[0].action == "./ScoutsPortal.aspx?Invalid=AccessCN":
raise PermissionError(f"You do not have permission to the details of {membership_num}")
details = dict()
# ### Extractors
# ## Core:
details["membership_number"] = membership_num
# Name(s)
names = tree.xpath("//title//text()")[0].strip().split(" ")[3:]
details["forenames"] = names[0]
details["surname"] = " ".join(names[1:])
# Main Phone
details["main_phone"] = tree.xpath('string(//*[text()="Phone"]/../../../td[3])')
# Main Email
details["main_email"] = tree.xpath('string(//*[text()="Email"]/../../../td[3])')
# ## Core - Positional:
# Full Name
details["name"] = tree.xpath("string(//*[@id='divProfile0']//tr[1]/td[2]/label)")
# Known As
details["known_as"] = tree.xpath("string(//*[@id='divProfile0']//tr[2]/td[2]/label)")
# Join Date # TODO Unknown - take date from earliest role?
join_date_str = tree.xpath("string(//*[@id='divProfile0']//tr[4]/td[2]/label)")
details["join_date"] = parse(join_date_str) if join_date_str != "Unknown" else None
# ## Position Varies, only if authorised:
# Gender
details["sex"] = tree.xpath("string(//*[@id='divProfile0']//*[text()='Gender:']/../../td[2])")
# DOB
details["birth_date"] = parse(tree.xpath("string(//*[@id='divProfile0']//*[text()='Date of Birth:']/../../td[2])"))
# Nationality
details["nationality"] = tree.xpath("string(//*[@id='divProfile0']//*[text()='Nationality:']/../../td[2])")
# Ethnicity
details["ethnicity"] = tree.xpath("normalize-space(//*[@id='divProfile0']//*[text()='Ethnicity:']/../../td[2])")
# Religion
details["religion"] = tree.xpath("normalize-space(//*[@id='divProfile0']//*[text()='Religion/Faith:']/../../td[2])")
# Occupation
details["occupation"] = tree.xpath("normalize-space(//*[@id='divProfile0']//*[text()='Occupation:']/../../td[2])")
# Address
details["address"] = tree.xpath('string(//*[text()="Address"]/../../../td[3])')
# Filter out keys with no value.
details = {k: v for k, v in details.items() if v}
if self.validate:
return schema.MemberDetails.parse_obj(details)
else:
return details
def get_roles_tab(self, membership_num: int, keep_non_volunteer_roles: bool = False) -> Union[schema.MemberRolesDict, dict]:
"""Returns data from Roles tab for a given member.
Sanitises the data to a common format, and removes Occasional Helper, Network, and PVG roles by default.
Args:
membership_num: Membership Number to use
keep_non_volunteer_roles: Keep Helper (OH/PVG) & Network roles?
Returns:
A dict of dicts mapping keys to the corresponding data from the roles tab.
E.g.:
{1234578:
{'role_number': 1234578,
'membership_number': ...,
'role_title': '...',
'role_class': '...',
'role_type': '...',
'location_id': ...,
'location_name': '...',
'role_start_date': datetime.datetime(...),
'role_end': datetime.datetime(...),
'role_status': '...'},
{...}
}
Keys will always be present.
Raises:
PermissionError:
Access to the member is not given by the current authentication
Todo:
Other possible exceptions? i.e. from Requests
primary_role
"""
logger.debug(f"getting roles tab for member number: {membership_num}")
response = self._get_member_profile_tab(membership_num, "Roles")
tree = html.fromstring(response)
if tree.forms[0].action == "./ScoutsPortal.aspx?Invalid=AccessCN":
raise PermissionError(f"You do not have permission to the details of {membership_num}")
roles_data = {}
rows = tree.xpath("//tbody/tr")
for row in rows:
# Get children (cells in row)
cells = list(row) # filter out empty elements
# If current role allows selection of role for editing, remove tickbox
if any(el.tag == "input" for el in cells[0]):
cells.pop(0)
role_number = int(row.get("data-pk"))
status_with_review = cells[5].text_content().strip()
if status_with_review.startswith("Full Review Due "):
role_status = "Full"
review_date = parse(status_with_review.removeprefix("Full Review Due "))
else:
role_status = status_with_review
review_date = None
role_details = dict(
role_number=role_number,
membership_number=membership_num,
role_title=cells[0].text_content().strip(),
role_class=cells[1].text_content().strip(),
# role_type only visible if access to System Admin tab
role_type=[*row.xpath("./td[1]/*/@title"), None][0],
# location_id only visible if role is in hierarchy AND location still exists
location_id=cells[2][0].get("data-ng_id"),
location_name=cells[2].text_content().strip(),
role_start=parse(cells[3].text_content().strip()),
role_end=parse(cells[4].text_content().strip()),
role_status=role_status,
review_date=review_date,
can_view_details=any("VIEWROLE" in el.get("class") for el in cells[6]),
)
# Remove OHs etc from list
if not keep_non_volunteer_roles and (
"helper" in role_details["role_class"].lower()
or {role_details["role_title"].lower()} <= {"occasional helper", "pvg", "network member"}
):
continue
roles_data[role_number] = role_details
if self.validate:
return schema.MemberRolesDict.parse_obj(roles_data)
else:
return roles_data
def get_training_tab(
self, membership_num: int, ongoing_only: bool = False
) -> Union[schema.MemberTrainingTab, schema.MemberMOGLList, dict]:
"""Returns data from Training tab for a given member.
Args:
membership_num: Membership Number to use
ongoing_only: Return a dataframe of role training & OGL info? Otherwise returns all data
Returns:
A dict mapping keys to the corresponding data from the training
tab.
E.g.:
{'roles': {1234567: {'role_number': 1234567,
'role_title': '...',
'role_start': datetime.datetime(...),
'role_status': '...',
'location': '...',
'ta_data': '...',
'ta_number': '...',
'ta_name': '...',
'completion': '...',
'wood_badge_number': '...'},
...},
'plps': {1234567: [{'pk': 6142511,
'module_id': ...,
'code': '...',
'name': '...',
'learning_required': False,
'learning_method': '...',
'learning_completed': '...',
'validated_membership_number': '...',
'validated_name': '...'},
...],
...},
'mandatory': {'GDPR':
{'name': 'GDPR',
'completed_date': datetime.datetime(...)},
...}}
Keys will always be present.
Todo:
Other possible exceptions? i.e. from Requests
"""
# pylint: disable=too-many-locals,too-many-statements
response = self._get_member_profile_tab(membership_num, "Training")
tree = html.fromstring(response)
rows = tree.xpath("//table[@id='tbl_p5_TrainModules']/tr")
training_plps = {}
training_roles = {}
for row in rows:
# Personal Learning Plan (PLP) data
if "trPLP" in row.classes:
plp = row
plp_table = plp.getchildren()[0].getchildren()[0]
plp_data = []
for module_row in plp_table:
if module_row.get("class") != "msTR trMTMN":
continue
module_data = {}
child_nodes = list(module_row)
module_data["pk"] = int(module_row.get("data-pk"))
module_data["module_id"] = int(child_nodes[0].get("id")[4:])
matches = re.match(r"^([A-Z0-9]+) - (.+)$", child_nodes[0].text_content()).groups()
if matches:
module_data["code"] = str(matches[0])
module_data["name"] = matches[1]
# Skip processing if we only want ongoing learning data and the module is not GDPR.
if ongoing_only and "gdpr" not in module_data["code"].lower():
continue
learning_required = child_nodes[1].text_content().lower()
module_data["learning_required"] = "yes" in learning_required if learning_required else None
module_data["learning_method"] = child_nodes[2].text_content() or None
module_data["learning_completed"] = parse(child_nodes[3].text_content())
module_data["learning_date"] = parse(child_nodes[3].text_content())
validated_by_string = child_nodes[4].text_content()
if validated_by_string:
# Add empty item to prevent IndexError
validated_by_data = validated_by_string.split(" ", maxsplit=1) + [""]
module_data["validated_membership_number"] = maybe_int(validated_by_data[0])
module_data["validated_name"] = validated_by_data[1]
module_data["validated_date"] = parse(child_nodes[5].text_content())
plp_data.append(module_data)
training_plps[int(plp_table.get("data-pk"))] = plp_data
# Role data
if "msTR" in row.classes:
role = row
child_nodes = list(role)
info = {} # NoQA
info["role_number"] = int(role.xpath("./@data-ng_mrn")[0])
info["role_title"] = child_nodes[0].text_content()
info["role_start"] = parse(child_nodes[1].text_content())
status_with_review = child_nodes[2].text_content()
if status_with_review.startswith("Full (Review Due: "):
info["role_status"] = "Full"
info["review_date"] = parse(status_with_review.removeprefix("Full (Review Due: ").removesuffix(")"))
else:
info["role_status"] = status_with_review
info["review_date"] = None
info["location"] = child_nodes[3].text_content()
training_advisor_string = child_nodes[4].text_content()
if training_advisor_string:
info["ta_data"] = training_advisor_string
# Add empty item to prevent IndexError
training_advisor_data = training_advisor_string.split(" ", maxsplit=1) + [""]
info["ta_number"] = maybe_int(training_advisor_data[0])
info["ta_name"] = training_advisor_data[1]
completion_string = child_nodes[5].text_content()
if completion_string:
info["completion"] = completion_string
parts = completion_string.split(":")
info["completion_type"] = parts[0].strip()
info["completion_date"] = parse(parts[1].strip())
assert len(parts) <= 2, parts[2:]
# info["ct"] = parts[3:] # TODO what is this? From CompassRead.php
info["wood_badge_number"] = child_nodes[5].get("id", "").removeprefix("WB_") or None
training_roles[info["role_number"]] = info
# Handle GDPR:
# Get latest GDPR date
training_ogl = {
"GDPR": dict(
name="GDPR",
completed_date=next(
reversed(
sorted(mod["validated_date"] for plp in training_plps.values() for mod in plp if mod["code"] == "GDPR")
),
None,
),
),
}
for ongoing_learning in tree.xpath("//tr[@data-ng_code]"):
cell_text = {c.get("id", "<None>").split("_")[0]: c.text_content() for c in ongoing_learning}
training_ogl[ongoing_learning.get("data-ng_code")] = dict(
name=cell_text.get("<None>"),
completed_date=parse(cell_text.get("tdLastComplete")),
renewal_date=parse(cell_text.get("tdRenewal")),
)
# TODO missing data-pk from list(cell)[0].tag == "input", and module names/codes. Are these important?
if ongoing_only:
return schema.MemberMOGLList.parse_obj(training_ogl) if self.validate else training_ogl
training_data = {
"roles": training_roles,
"plps": training_plps,
"mandatory": training_ogl,
}
return schema.MemberTrainingTab.parse_obj(training_data) if self.validate else training_data
def get_permits_tab(self, membership_num: int) -> Union[schema.MemberPermitsList, list]:
"""Returns data from Permits tab for a given member.
If a permit has been revoked, the expires value is None and the status is PERM_REV
Args:
membership_num: Membership Number to use
Returns:
A list of dicts mapping keys to the corresponding data from the
permits tab.
Keys will always be present.
Todo:
Other possible exceptions? i.e. from Requests
"""
response = self._get_member_profile_tab(membership_num, "Permits")
tree = html.fromstring(response)
# Get rows with permit content
rows = tree.xpath('//table[@id="tbl_p4_permits"]//tr[@class="msTR msTRPERM"]')
permits = []
for row in rows:
permit = dict(membership_number=membership_num)
child_nodes = list(row)
permit["permit_type"] = child_nodes[1].text_content()
permit["category"] = child_nodes[2].text_content()
permit["type"] = child_nodes[3].text_content()
permit["restrictions"] = child_nodes[4].text_content()
expires = child_nodes[5].text_content()
permit["expires"] = parse(expires) if expires != "Revoked" else None
permit["status"] = child_nodes[5].get("class")
permits.append(permit)
if self.validate:
return schema.MemberPermitsList.parse_obj(permits)
else:
return permits
# See getAppointment in PGS\Needle
def get_roles_detail(
self, role_number: int, response: Union[str, requests.Response] = None
) -> Union[schema.MemberRolePopup, dict]:
"""Returns detailed data from a given role number.
Args:
role_number: Role Number to use
response: Pre-generated response to use
Returns:
A dicts mapping keys to the corresponding data from the
role detail data.
E.g.:
{'hierarchy': {'organisation': 'The Scout Association',
'country': '...',
'region': '...',
'county': '...',
'district': '...',
'group': '...',
'section': '...'},
'details': {'role_number': ...,
'organisation_level': '...',
'birth_date': datetime.datetime(...),
'membership_number': ...,
'name': '...',
'role_title': '...',
'role_start': datetime.datetime(...),
'role_status': '...',
'line_manager_number': ...,
'line_manager': '...',
'ce_check': datetime.datetime(...),
'disclosure_check': '...',
'references': '...',
'appointment_panel_approval': '...',
'commissioner_approval': '...',
'committee_approval': '...'},
'getting_started': {...: {'name': '...',
'validated': datetime.datetime(...),
'validated_by': '...'},
...
}}
Keys will always be present.
Todo:
Other possible exceptions? i.e. from Requests
"""
# pylint: disable=too-many-locals,too-many-statements
renamed_levels = {
"County / Area / Scottish Region / Overseas Branch": "County",
}
renamed_modules = {
1: "module_01",
"TRST": "trustee_intro",
2: "module_02",
3: "module_03",
4: "module_04",
"GDPR": "GDPR",
}
unset_vals = {"--- Not Selected ---", "--- No Items Available ---", "--- No Line Manager ---"}
module_names = {
"Essential Information": "M01",
"Trustee Introduction": "TRST",
"PersonalLearningPlan": "M02",
"Tools for the Role (Section Leaders)": "M03",
"Tools for the Role (Managers and Supporters)": "M04",
"General Data Protection Regulations": "GDPR",
}
references_codes = {
"NC": "Not Complete",
"NR": "Not Required",
"RR": "References Requested",
"S": "References Satisfactory",
"U": "References Unsatisfactory",
}
start_time = time.time()
if response is None:
response = self._get(f"{Settings.base_url}/Popups/Profile/AssignNewRole.aspx?VIEW={role_number}")
logger.debug(f"Getting details for role number: {role_number}. Request in {(time.time() - start_time):.2f}s")
post_response_time = time.time()
if isinstance(response, (str, bytes)):
tree = html.fromstring(response)
else:
tree = html.fromstring(response.content)
form = tree.forms[0]
if form.action == "./ScoutsPortal.aspx?Invalid=Access":
raise PermissionError(f"You do not have permission to the details of role {role_number}")
member_string = form.fields.get("ctl00$workarea$txt_p1_membername")
ref_code = form.fields.get("ctl00$workarea$cbo_p2_referee_status")
role_details = dict()
# Approval and Role details
role_details["role_number"] = role_number
role_details["organisation_level"] = form.fields.get("ctl00$workarea$cbo_p1_level")
role_details["birth_date"] = parse(form.inputs["ctl00$workarea$txt_p1_membername"].get("data-dob"))
role_details["membership_number"] = int(form.fields.get("ctl00$workarea$txt_p1_memberno"))
role_details["name"] = member_string.split(" ", maxsplit=1)[1] # TODO does this make sense - should name be in every role??
role_details["role_title"] = form.fields.get("ctl00$workarea$txt_p1_alt_title")
role_details["role_start"] = parse(form.fields.get("ctl00$workarea$txt_p1_startdate"))
# Role Status
role_details["role_status"] = form.fields.get("ctl00$workarea$txt_p2_status")
# Line Manager
line_manager_el = next((op for op in form.inputs["ctl00$workarea$cbo_p2_linemaneger"] if op.get("selected")), None)
role_details["line_manager_number"] = maybe_int(line_manager_el.get("value")) if line_manager_el is not None else None
role_details["line_manager"] = line_manager_el.text.strip() if line_manager_el is not None else None
# Review Date
role_details["review_date"] = parse(form.fields.get("ctl00$workarea$txt_p2_review"))
# CE (Confidential Enquiry) Check # TODO if CE check date != current date then is valid
role_details["ce_check"] = parse(form.fields.get("ctl00$workarea$txt_p2_cecheck"))
# Disclosure Check
disclosure_with_date = form.fields.get("ctl00$workarea$txt_p2_disclosure")
if disclosure_with_date.startswith("Disclosure Issued : "):
disclosure_date = parse(disclosure_with_date.removeprefix("Disclosure Issued : "))
disclosure_check = "Disclosure Issued"
else:
disclosure_date = None
disclosure_check = disclosure_with_date
role_details["disclosure_check"] = disclosure_check # TODO extract date
role_details["disclosure_date"] = disclosure_date # TODO extract date
# References
role_details["references"] = references_codes.get(ref_code, ref_code)
approval_values = {}
for row in tree.xpath("//tr[@class='trProp']"):
select = row[1][0]
code = select.get("data-app_code")
approval_values[code] = select.get("data-db")
# select.get("title") gives title text, but this is not useful as it does not reflect latest changes,
# but only who added the role to Compass.
# Appointment Panel Approval
role_details["appointment_panel_approval"] = approval_values.get("ROLPRP|AACA")
# Commissioner Approval
role_details["commissioner_approval"] = approval_values.get("ROLPRP|CAPR")
# Committee Approval
role_details["committee_approval"] = approval_values.get("ROLPRP|CCA")
if role_details["line_manager_number"] in unset_vals:
role_details["line_manager_number"] = None
# Filter null values
role_details = {k: v for k, v in role_details.items() if v is not None}
# Getting Started
modules_output = {}
getting_started_modules = tree.xpath("//tr[@class='trTrain trTrainData']")
# Get all training modules and then extract the required modules to a dictionary
for module in getting_started_modules:
module_name = module[0][0].text.strip()
if module_name in module_names:
info = {
# "name": module_names[module_name], # short_name
"validated": parse(module[2][0].value), # Save module validation date
"validated_by": module[1][1].value or None, # Save who validated the module
}
mod_code = cast(module[2][0].get("data-ng_value")) # int or str
modules_output[renamed_modules[mod_code]] = info
# Get all levels of the org hierarchy and select those that will have information:
# Get all inputs with location data
org_levels = [v for k, v in sorted(dict(form.inputs).items()) if "ctl00$workarea$cbo_p1_location" in k]
# TODO
all_locations = {row.get("title"): row.findtext("./option") for row in org_levels}
clipped_locations = {
renamed_levels.get(key, key).lower(): value for key, value in all_locations.items() if value not in unset_vals
}
logger.debug(
f"Processed details for role number: {role_number}. "
f"Compass: {(post_response_time - start_time):.3f}s; Processing: {(time.time() - post_response_time):.4f}s"
)
# TODO data-ng_id?, data-rtrn_id?
full_details = {
"hierarchy": clipped_locations,
"details": role_details,
"getting_started": modules_output,
}
if self.validate:
return schema.MemberRolePopup.parse_obj(full_details)
else:
return full_details
| 41.262931
| 132
| 0.568126
| 3,153
| 28,719
| 4.994925
| 0.174437
| 0.020255
| 0.008254
| 0.011429
| 0.260461
| 0.189409
| 0.160455
| 0.123436
| 0.099879
| 0.090037
| 0
| 0.011589
| 0.305965
| 28,719
| 695
| 133
| 41.322302
| 0.778547
| 0.281765
| 0
| 0.10061
| 0
| 0.006098
| 0.213864
| 0.081701
| 0
| 0
| 0
| 0.01295
| 0.003049
| 1
| 0.021341
| false
| 0.02439
| 0.039634
| 0
| 0.097561
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
dbc52992fc79a5adada939783cc09ffe329b0264
| 1,623
|
py
|
Python
|
konnection/settings/local.py
|
IanSeng/CMPUT404_PROJECT
|
80acd2c57de4b091e0e66ad9f5f2df17801bf09e
|
[
"W3C-20150513"
] | null | null | null |
konnection/settings/local.py
|
IanSeng/CMPUT404_PROJECT
|
80acd2c57de4b091e0e66ad9f5f2df17801bf09e
|
[
"W3C-20150513"
] | null | null | null |
konnection/settings/local.py
|
IanSeng/CMPUT404_PROJECT
|
80acd2c57de4b091e0e66ad9f5f2df17801bf09e
|
[
"W3C-20150513"
] | null | null | null |
from konnection.settings.base import *
from pathlib import Path
import os
import dotenv
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent.parent
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
SECRET_KEY = 'temporaryKey'
# For tests
# https://stackoverflow.com/a/35224204
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
NOSE_ARGS = ['--with-spec', '--spec-color']
# Adding secrets to env file
# From StackOverflow https://stackoverflow.com/a/61437799
# From Zack Plauché https://stackoverflow.com/users/10415970/zack-plauch%c3%a9
dotenv_file = os.path.join(BASE_DIR, ".env")
if os.path.isfile(dotenv_file):
dotenv.load_dotenv(dotenv_file)
# Connecting PostgreSQL to Django
# From https://www.digitalocean.com/community/tutorials/how-to-use-postgresql-with-your-django-application-on-ubuntu-14-04
# From Digital Ocean
# From Justin Ellingwood https://www.digitalocean.com/community/users/jellingwood
if os.getenv('GITHUB_WORKFLOW'):
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'github-actions',
'USER': 'postgres',
'PASSWORD': 'postgres',
'HOST': 'localhost',
'PORT': '5432'
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'myproject',
'USER': os.environ['DB_USER'],
'PASSWORD': os.environ['DB_PASSWORD'],
'HOST': 'localhost',
'PORT': '',
}
}
| 31.211538
| 122
| 0.653112
| 191
| 1,623
| 5.450262
| 0.554974
| 0.020173
| 0.060519
| 0.042267
| 0.153698
| 0.092219
| 0.092219
| 0
| 0
| 0
| 0
| 0.027216
| 0.20764
| 1,623
| 52
| 123
| 31.211538
| 0.782271
| 0.360444
| 0
| 0.176471
| 0
| 0
| 0.296982
| 0.095424
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.058824
| 0.117647
| 0
| 0.117647
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
dbc6414ac2f786d426d11b5f7b21e310e975369d
| 23,614
|
py
|
Python
|
pyx12/test/test_x12context.py
|
arenius/pyx12
|
537493deaa0b8e18a3fa72eb1b3eeae9ef043b11
|
[
"BSD-3-Clause"
] | 1
|
2019-11-06T21:22:28.000Z
|
2019-11-06T21:22:28.000Z
|
pyx12/test/test_x12context.py
|
arenius/pyx12
|
537493deaa0b8e18a3fa72eb1b3eeae9ef043b11
|
[
"BSD-3-Clause"
] | null | null | null |
pyx12/test/test_x12context.py
|
arenius/pyx12
|
537493deaa0b8e18a3fa72eb1b3eeae9ef043b11
|
[
"BSD-3-Clause"
] | 1
|
2021-04-12T14:32:41.000Z
|
2021-04-12T14:32:41.000Z
|
import unittest
#import tempfile
try:
from StringIO import StringIO
except:
from io import StringIO
import pyx12.error_handler
from pyx12.errors import EngineError # , X12PathError
import pyx12.x12context
import pyx12.params
from pyx12.test.x12testdata import datafiles
class X12fileTestCase(unittest.TestCase):
def setUp(self):
self.param = pyx12.params.params()
def _makeFd(self, x12str=None):
try:
if x12str:
fd = StringIO(x12str)
else:
fd = StringIO()
except:
if x12str:
fd = StringIO(x12str, encoding='ascii')
else:
fd = StringIO(encoding='ascii')
fd.seek(0)
return fd
class Delimiters(X12fileTestCase):
def test_arbitrary_delimiters(self):
str1 = 'ISA&00& &00& &ZZ&ZZ000 &ZZ&ZZ001 &030828&1128&U&00401&000010121&0&T&!+\n'
str1 += 'GS&HC&ZZ000&ZZ001&20030828&1128&17&X&004010X098A1+\n'
str1 += 'ST&837&11280001+\n'
str1 += 'REF&87&004010X098A1+\n'
str1 += 'SE&3&11280001+\n'
str1 += 'GE&1&17+\n'
str1 += 'IEA&1&000010121+\n'
fd = self._makeFd(str1)
errh = pyx12.error_handler.errh_null()
src = pyx12.x12context.X12ContextReader(self.param, errh, fd)
for datatree in src.iter_segments():
pass
self.assertEqual(src.subele_term, '!')
self.assertEqual(src.ele_term, '&')
self.assertEqual(src.seg_term, '+')
def test_binary_delimiters(self):
str1 = 'ISA&00& &00& &ZZ&ZZ000 &ZZ&ZZ001 &030828&1128&U&00401&000010121&0&T&!+\n'
str1 += 'GS&HC&ZZ000&ZZ001&20030828&1128&17&X&004010X098A1+\n'
str1 += 'ST&837&11280001+\n'
str1 += 'REF&87&004010X098A1+\n'
str1 += 'SE&3&11280001+\n'
str1 += 'GE&1&17+\n'
str1 += 'IEA&1&000010121+\n'
str1 = str1.replace('&', chr(0x1C))
str1 = str1.replace('+', chr(0x1D))
str1 = str1.replace('!', chr(0x1E))
fd = self._makeFd(str1)
errors = []
errh = pyx12.error_handler.errh_null()
src = pyx12.x12context.X12ContextReader(self.param, errh, fd)
for datatree in src.iter_segments():
pass
self.assertEqual(src.subele_term, chr(0x1E))
self.assertEqual(src.ele_term, chr(0x1C))
self.assertEqual(src.seg_term, chr(0x1D))
class TreeGetValue(X12fileTestCase):
def setUp(self):
fd = self._makeFd(datafiles['simple_837p']['source'])
param = pyx12.params.params()
errh = pyx12.error_handler.errh_null()
self.src = pyx12.x12context.X12ContextReader(param, errh, fd)
for datatree in self.src.iter_segments('2300'):
if datatree.id == '2300':
self.loop2300 = datatree
break
def test_get_line_numbers_2200(self):
loop2400 = self.loop2300.first('2400')
self.assertEqual(self.loop2300.seg_count, 19)
self.assertEqual(self.loop2300.cur_line_number, 21)
for seg in loop2400.select('CLM'):
self.assertEqual(seg.seg_count, 25)
self.assertEqual(seg.cur_line_number, 2271)
break
def test_get_line_numbers_2400(self):
loop2400 = self.loop2300.first('2400')
self.assertEqual(loop2400.seg_count, 35)
self.assertEqual(loop2400.cur_line_number, 37)
for svc in loop2400.select('SV1'):
self.assertEqual(svc.seg_count, 36)
self.assertEqual(svc.cur_line_number, 38)
break
def test_get_seg_value(self):
self.assertEqual(self.loop2300.get_value('CLM02'), '21')
self.assertEqual(self.loop2300.get_value('CLM99'), None)
def test_get_seg_value_fail_no_element_index(self):
self.assertRaises(IndexError, self.loop2300.get_value, 'CLM')
def test_get_parent_value(self):
loop2400 = self.loop2300.first('2400')
self.assertEqual(loop2400.get_value('../CLM01'), '3215338')
self.assertEqual(loop2400.get_value('../2310B/NM109'), '222185735')
def test_get_seg_value_idx(self):
for clm in self.loop2300.select('CLM'):
self.assertEqual(clm.get_value('02'), '21')
self.assertEqual(clm.get_value('05-3'), '1')
def test_get_first_value(self):
self.assertEqual(self.loop2300.get_value('2400/SV101'), 'HC:H2015:TT')
self.assertEqual(self.loop2300.get_value('2400/SV101-2'), 'H2015')
self.assertEqual(self.loop2300.get_value('2400/REF[6R]02'), '1057296')
self.assertEqual(self.loop2300.get_value('2400/2430/SVD02'), '21')
self.assertEqual(self.loop2300.get_value('2400/AMT[AAE]02'), '21')
def test_get_first_value_2400(self):
loop2400 = self.loop2300.first('2400')
self.assertEqual(loop2400.get_value('AMT[AAE]02'), '21')
self.assertEqual(loop2400.get_value('2430/AMT[AAE]02'), None)
def test_get_no_value(self):
self.assertEqual(self.loop2300.get_value('2400/SV199'), None)
self.assertEqual(self.loop2300.get_value('2400'), None)
def test_get_parent_no_value(self):
loop2400 = self.loop2300.first('2400')
self.assertEqual(loop2400.get_value('../2310E/NM109'), None)
def test_get_specific_qual(self):
self.assertEqual(self.loop2300.get_value('2400/REF[6R]02'), '1057296')
self.assertEqual(self.loop2300.get_value('2400/REF[G1]02'), None)
self.assertEqual(self.loop2300.get_value('2400/REF[XX]02'), None)
class TreeSetValue(X12fileTestCase):
def setUp(self):
fd = self._makeFd(datafiles['simple_837p']['source'])
param = pyx12.params.params()
errh = pyx12.error_handler.errh_null()
self.src = pyx12.x12context.X12ContextReader(param, errh, fd)
for datatree in self.src.iter_segments('2300'):
if datatree.id == '2300':
self.loop2300 = datatree
break
def test_set_seg_value(self):
self.loop2300.set_value('CLM02', '50')
self.assertEqual(self.loop2300.get_value('CLM02'), '50')
def test_set_first_value_2400(self):
loop2400 = self.loop2300.first('2400')
loop2400.set_value('AMT[AAE]02', '25')
self.assertEqual(loop2400.get_value('AMT[AAE]02'), '25')
class TreeSelect(X12fileTestCase):
def setUp(self):
fd = self._makeFd(datafiles['simple_837p']['source'])
self.param = pyx12.params.params()
errh = pyx12.error_handler.errh_null()
src = pyx12.x12context.X12ContextReader(self.param, errh, fd)
for datatree in src.iter_segments('2300'):
if datatree.id == '2300':
self.loop2300 = datatree
break
#def test_select_loop_and_parent(self):
# loop2400 = self.loop2300.first('2400')
# assert loop2400.id == '2400', 'Not in 2400'
# ct = 0
# newtree = loop2400.parent
# for newtree in loop2400.select('../'):
# self.assertEqual(newtree.id, '2300')
# ct += 1
# self.assertEqual(ct, 1)
def test_select_loops(self):
ct = 0
for newtree in self.loop2300.select('2400'):
self.assertEqual(newtree.id, '2400')
ct += 1
self.assertEqual(ct, 2)
def test_select_seg(self):
ct = 0
for newtree in self.loop2300.select('2400/SV1'):
self.assertEqual(newtree.id, 'SV1')
self.assertEqual(newtree.get_value('SV102'), '21')
ct += 1
self.assertEqual(ct, 2)
def test_select_parent_seg(self):
loop2400 = self.loop2300.first('2400')
assert loop2400.id == '2400', 'Not in 2400'
ct = 0
for newtree in loop2400.select('../CLM'):
self.assertEqual(newtree.id, 'CLM')
self.assertEqual(newtree.get_value('CLM01'), '3215338')
ct += 1
self.assertEqual(ct, 1)
def test_select_from_st(self):
fd = self._makeFd(datafiles['835id']['source'])
errh = pyx12.error_handler.errh_null()
src = pyx12.x12context.X12ContextReader(self.param, errh, fd)
ct = 0
for datatree in src.iter_segments('ST_LOOP'):
if datatree.id == 'ST_LOOP':
for claim in datatree.select('DETAIL/2000/2100'):
self.assertEqual(claim.id, '2100')
ct += 1
self.assertEqual(
ct, 3, 'Found %i 2100 loops. Should have %i' % (ct, 3))
def test_select_from_gs(self):
fd = self._makeFd(datafiles['simple_837i']['source'])
errh = pyx12.error_handler.errh_null()
src = pyx12.x12context.X12ContextReader(self.param, errh, fd)
ct = 0
for datatree in src.iter_segments('GS_LOOP'):
if datatree.id == 'GS_LOOP':
for sub in datatree.select('ST_LOOP/DETAIL/2000A/2000B/2300/2400'):
self.assertEqual(sub.id, '2400')
ct += 1
self.assertEqual(
ct, 6, 'Found %i 2400 loops. Should have %i' % (ct, 6))
class TreeSelectFromSegment(X12fileTestCase):
def test_select_from_seg_fail(self):
fd = self._makeFd(datafiles['835id']['source'])
param = pyx12.params.params()
errh = pyx12.error_handler.errh_null()
src = pyx12.x12context.X12ContextReader(param, errh, fd)
for datatree in src.iter_segments('ST_LOOP'):
if datatree.id == 'GS':
#self.assertFalseRaises(AttributeError, datatree.select, 'DETAIL/2000/2100')
for claim in datatree.select('DETAIL/2000/2100'):
pass
class TreeAddSegment(X12fileTestCase):
def setUp(self):
fd = self._makeFd(datafiles['simple_837p']['source'])
param = pyx12.params.params()
errh = pyx12.error_handler.errh_null()
self.src = pyx12.x12context.X12ContextReader(param, errh, fd)
for datatree in self.src.iter_segments('2300'):
if datatree.id == '2300':
self.loop2300 = datatree
break
def test_add_new_plain(self):
seg_data = pyx12.segment.Segment('HCP*00*7.11~', '~', '*', ':')
new_node = self.loop2300.add_segment(seg_data)
self.assertNotEqual(new_node, None)
def test_add_new_id(self):
seg_data = pyx12.segment.Segment('REF*F5*6.11~', '~', '*', ':')
new_node = self.loop2300.add_segment(seg_data)
self.assertNotEqual(new_node, None)
def test_add_new_not_exists(self):
seg_data = pyx12.segment.Segment('ZZZ*00~', '~', '*', ':')
self.assertRaises(pyx12.errors.X12PathError,
self.loop2300.add_segment, seg_data)
class TreeAddSegmentString(X12fileTestCase):
def setUp(self):
fd = self._makeFd(datafiles['simple_837p']['source'])
param = pyx12.params.params()
errh = pyx12.error_handler.errh_null()
self.src = pyx12.x12context.X12ContextReader(param, errh, fd)
for datatree in self.src.iter_segments('2300'):
if datatree.id == '2300':
self.loop2300 = datatree
break
def test_add_new_plain(self):
new_node = self.loop2300.add_segment('HCP*00*7.11~')
self.assertNotEqual(new_node, None)
def test_add_new_id(self):
new_node = self.loop2300.add_segment('REF*F5*6.11')
self.assertNotEqual(new_node, None)
def test_add_new_not_exists(self):
self.assertRaises(pyx12.errors.X12PathError,
self.loop2300.add_segment, 'ZZZ*00~')
class SegmentExists(X12fileTestCase):
def setUp(self):
fd = self._makeFd(datafiles['simple_837p']['source'])
self.param = pyx12.params.params()
errh = pyx12.error_handler.errh_null()
self.src = pyx12.x12context.X12ContextReader(self.param, errh, fd)
for datatree in self.src.iter_segments('2300'):
if datatree.id == '2300':
self.loop2300 = datatree
break
def test_qual_segment(self):
self.assertTrue(self.loop2300.exists('2310B'))
self.assertTrue(self.loop2300.exists('2310B/NM1[82]'))
for loop2310b in self.loop2300.select('2310B'):
self.assertTrue(loop2310b.exists('NM1'))
self.assertTrue(loop2310b.exists('NM1[82]'))
def test_qual_segment_sub_loop(self):
self.assertTrue(self.loop2300.exists('2400/2430'))
self.assertTrue(self.loop2300.exists('2400/2430/DTP[573]'))
self.assertFalse(self.loop2300.exists('2400/2430/DTP[111]'))
self.assertTrue(self.loop2300.exists('2400/2430/DTP[573]03'))
def test_qual_segment_select_sub_loop(self):
loop2430 = self.loop2300.first('2400/2430')
self.assertTrue(loop2430.exists('DTP'))
self.assertTrue(loop2430.exists('DTP[573]'))
self.assertTrue(loop2430.exists('DTP[573]03'))
def test_qual_834_dtp(self):
fd = self._makeFd(datafiles['834_lui_id']['source'])
errh = pyx12.error_handler.errh_null()
src = pyx12.x12context.X12ContextReader(self.param, errh, fd)
for datatree in src.iter_segments('2300'):
if datatree.id == '2300':
loop2300 = datatree
break
self.assertTrue(loop2300.exists('DTP[348]'))
self.assertFalse(loop2300.exists('DTP[349]'))
class TreeAddLoop(X12fileTestCase):
def setUp(self):
fd = self._makeFd(datafiles['simple_837p']['source'])
param = pyx12.params.params()
errh = pyx12.error_handler.errh_null()
self.src = pyx12.x12context.X12ContextReader(param, errh, fd)
for datatree in self.src.iter_segments('2300'):
if datatree.id == '2300':
self.loop2300 = datatree
break
def test_add_new_plain(self):
seg_data = pyx12.segment.Segment(
'NM1*82*2*Provider 1*****ZZ*9898798~', '~', '*', ':')
new_node = self.loop2300.add_loop(seg_data)
self.assertNotEqual(new_node, None)
self.assertTrue(self.loop2300.exists('2310B'))
for loop2310b in self.loop2300.select('2310B'):
self.assertTrue(loop2310b.exists('NM1'))
self.assertTrue(loop2310b.exists('NM1[82]'))
def test_add_new_string_seg(self):
old_ct = self.loop2300.count('2400')
new_node = self.loop2300.add_loop('LX*5~')
self.assertNotEqual(new_node, None)
self.assertTrue(self.loop2300.exists('2400'))
self.assertEqual(old_ct + 1, self.loop2300.count('2400'))
for loop2400 in self.loop2300.select('2400'):
self.assertTrue(loop2400.exists('LX'))
class TreeAddLoopDetail(X12fileTestCase):
def test_add_loops_under_detail(self):
str1 = 'ISA&00& &00& &ZZ&ZZ000 &ZZ&ZZ001 &030828&1128&U&00401&000010121&0&T&!+\n'
str1 += 'GS&BE&ZZ000&ZZ001&20030828&1128&17&X&004010X095A1+\n'
str1 += 'ST&834&11280001+\n'
str1 += 'BGN&+\n'
str1 += 'INS&Y&18&30&XN&AE&RT+\n'
str1 += 'SE&4&11280001+\n'
str1 += 'GE&1&17+\n'
str1 += 'IEA&1&000010121+\n'
fd = self._makeFd(str1)
errors = []
param = pyx12.params.params()
errh = pyx12.error_handler.errh_null()
src = pyx12.x12context.X12ContextReader(param, errh, fd)
for st_loop in src.iter_segments('ST_LOOP'):
if st_loop.id == 'ST_LOOP' and st_loop.exists('DETAIL'):
detail = st_loop.first('DETAIL')
self.assertTrue(detail.exists('2000'))
detail.first('2000').delete()
self.assertFalse(detail.exists('2000'))
detail.add_loop('INS&Y&18&30&XN&AE&RT+')
self.assertTrue(detail.exists('2000'))
class TreeAddNode(X12fileTestCase):
def setUp(self):
self.param = pyx12.params.params()
def test_add_loop(self):
fd = self._makeFd(datafiles['simple_837p']['source'])
errh = pyx12.error_handler.errh_null()
self.src = pyx12.x12context.X12ContextReader(self.param, errh, fd)
for datatree in self.src.iter_segments('2300'):
if datatree.id == '2300':
loop2300 = datatree
break
self.assertEqual(self._get_count(loop2300, '2400'), 2)
for node in loop2300.select('2400'):
loop2300.add_node(node)
self.assertEqual(self._get_count(loop2300, '2400'), 4)
def test_add_segment(self):
fd = self._makeFd(datafiles['simple_837p']['source'])
errh = pyx12.error_handler.errh_null()
self.src = pyx12.x12context.X12ContextReader(self.param, errh, fd)
for datatree in self.src.iter_segments('2300'):
if datatree.id == '2300':
loop2300 = datatree
break
self.assertEqual(self._get_count(loop2300, 'CN1'), 1)
for node in loop2300.select('CN1'):
loop2300.add_node(node)
self.assertEqual(self._get_count(loop2300, 'CN1'), 2)
def test_fail(self):
fd = self._makeFd(datafiles['simple_837p']['source'])
errh = pyx12.error_handler.errh_null()
self.src = pyx12.x12context.X12ContextReader(self.param, errh, fd)
for datatree in self.src.iter_segments('2300'):
if datatree.id == '2300':
loop2300 = datatree
break
for node in loop2300.select('CN1'):
cn1 = node
break
n2400 = None
for node in loop2300.select('2400'):
n2400 = node
break
assert n2400 is not None, 'Loop 2400 was not matched'
self.assertRaises(pyx12.errors.X12PathError, n2400.add_node, cn1)
def _get_count(self, node, loop_id):
ct = 0
for n in node.select(loop_id):
ct += 1
return ct
class CountRepeatingLoop(X12fileTestCase):
def setUp(self):
fd = self._makeFd(datafiles['simple_837p']['source'])
param = pyx12.params.params()
errh = pyx12.error_handler.errh_null()
self.src = pyx12.x12context.X12ContextReader(param, errh, fd)
for datatree in self.src.iter_segments('2300'):
if datatree.id == '2300' and datatree.get_value('CLM01') == '5555':
self.loop2300 = datatree
break
def test_repeat_2400(self):
ct = 0
for loop_2400 in self.loop2300.select('2400'):
ct += 1
self.assertEqual(
ct, 3, 'Found %i 2400 loops. Should have %i' % (ct, 3))
def test_repeat_2430(self):
ct = 0
for loop_2430 in self.loop2300.select('2400/2430'):
ct += 1
self.assertEqual(
ct, 0, 'Found %i 2430 loops. Should have %i' % (ct, 0))
class IterateTree(X12fileTestCase):
def setUp(self):
fd = self._makeFd(datafiles['simple_837p']['source'])
param = pyx12.params.params()
errh = pyx12.error_handler.errh_null()
self.src = pyx12.x12context.X12ContextReader(param, errh, fd)
def test_iterate_all(self):
ct_2000a = 0
ct_other = 0
for datatree in self.src.iter_segments('2000A'):
if datatree.id == '2000A':
ct_2000a += 1
else:
ct_other += 1
self.assertEqual(ct_2000a, 1,
'Found %i 2000A loops. Should have %i' % (ct_2000a, 1))
self.assertEqual(ct_other, 11, 'Found %i external segments. Should have %i' % (ct_other, 11))
class TreeDeleteSegment(X12fileTestCase):
def setUp(self):
fd = self._makeFd(datafiles['simple_837p']['source'])
param = pyx12.params.params()
errh = pyx12.error_handler.errh_null()
self.src = pyx12.x12context.X12ContextReader(param, errh, fd)
for datatree in self.src.iter_segments('2300'):
if datatree.id == '2300':
self.loop2300 = datatree
break
def test_delete(self):
assert self.loop2300.get_value('CN101') == '05'
seg_data = pyx12.segment.Segment('CN1*05~', '~', '*', ':')
self.assertTrue(self.loop2300.delete_segment(seg_data))
self.assertEqual(self.loop2300.get_value('CN101'), None)
def test_delete_fail(self):
seg_data = pyx12.segment.Segment('HCP*00*7.11~', '~', '*', ':')
self.assertFalse(self.loop2300.delete_segment(seg_data))
class TreeDeleteLoop(X12fileTestCase):
def setUp(self):
fd = self._makeFd(datafiles['simple_837p']['source'])
param = pyx12.params.params()
errh = pyx12.error_handler.errh_null()
self.src = pyx12.x12context.X12ContextReader(param, errh, fd)
for datatree in self.src.iter_segments('2300'):
if datatree.id == '2300':
self.loop2300 = datatree
break
def test_delete(self):
self.assertEqual(self.loop2300.get_value('2400/LX01'), '1')
self.assertTrue(self.loop2300.delete_node('2400'))
self.assertEqual(self.loop2300.get_value('2400/LX01'), '2')
def test_delete_fail(self):
self.assertFalse(self.loop2300.delete_node('2500'))
class NodeDeleteSelf(X12fileTestCase):
def setUp(self):
fd = self._makeFd(datafiles['simple_837p']['source'])
param = pyx12.params.params()
errh = pyx12.error_handler.errh_null()
self.src = pyx12.x12context.X12ContextReader(param, errh, fd)
for datatree in self.src.iter_segments('2300'):
if datatree.id == '2300':
self.loop2300 = datatree
break
def test_delete(self):
cn1 = self.loop2300.first('CN1')
assert cn1.id == 'CN1'
cn1.delete()
try:
a = cn1.id
except EngineError:
pass
except:
a = cn1.id
#self.assertRaises(EngineError, cn1.id)
class TreeCopy(X12fileTestCase):
def setUp(self):
self.param = pyx12.params.params()
def test_add_node(self):
fd = self._makeFd(datafiles['835id']['source'])
errh = pyx12.error_handler.errh_null()
src = pyx12.x12context.X12ContextReader(self.param, errh, fd)
for datatree in src.iter_segments('2100'):
if datatree.id == '2100':
for svc in datatree.select('2110'):
new_svc = svc.copy()
new_svc.set_value('SVC01', 'XX:AAAAA')
self.assertTrue(not svc is new_svc)
datatree.add_node(new_svc)
#for svc in datatree.select('2110'):
# print svc.get_value('SVC01')
break
def test_copy_seg(self):
fd = self._makeFd(datafiles['835id']['source'])
errh = pyx12.error_handler.errh_null()
src = pyx12.x12context.X12ContextReader(self.param, errh, fd)
for datatree in src.iter_segments('2100'):
if datatree.id == '2100':
for svc in datatree.select('2110'):
new_svc = svc.copy()
self.assertFalse(svc is new_svc)
self.assertEqual(svc.get_value('SVC01'),
new_svc.get_value('SVC01'))
new_svc.set_value('SVC01', 'XX:AAAAA')
self.assertFalse(svc is new_svc)
self.assertNotEqual(svc.get_value('SVC01'),
new_svc.get_value('SVC01'))
break
| 38.210356
| 125
| 0.598247
| 2,840
| 23,614
| 4.833451
| 0.091549
| 0.062942
| 0.030961
| 0.036716
| 0.776717
| 0.700517
| 0.641801
| 0.609383
| 0.562468
| 0.517739
| 0
| 0.122283
| 0.269289
| 23,614
| 617
| 126
| 38.272285
| 0.673254
| 0.021343
| 0
| 0.582178
| 0
| 0.005941
| 0.109331
| 0.01719
| 0
| 0
| 0.001039
| 0
| 0.20396
| 1
| 0.126733
| false
| 0.007921
| 0.015842
| 0
| 0.182178
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
dbce1d6ebf5fac46543c3b47688a5f1e1c7cc668
| 8,981
|
py
|
Python
|
dmarc_storage.py
|
Schramp/dmarc-monitoring
|
619a162f71a788e81d92ca281ec0bdcf13c2e8e8
|
[
"MIT"
] | 1
|
2020-05-25T05:09:18.000Z
|
2020-05-25T05:09:18.000Z
|
dmarc_storage.py
|
Schramp/dmarc-monitoring
|
619a162f71a788e81d92ca281ec0bdcf13c2e8e8
|
[
"MIT"
] | 30
|
2019-08-12T05:10:50.000Z
|
2021-07-21T04:25:02.000Z
|
dmarc_storage.py
|
Schramp/dmarc-monitoring
|
619a162f71a788e81d92ca281ec0bdcf13c2e8e8
|
[
"MIT"
] | 1
|
2022-03-12T19:24:24.000Z
|
2022-03-12T19:24:24.000Z
|
import sqlite3
import os
import datetime
__all__ = ['DMARCStorage', 'totimestamp']
def totimestamp(datetime_object):
if datetime_object.utcoffset() is not None:
utc_naive = datetime_object.replace(tzinfo=None) - datetime_object.utcoffset()
else:
utc_naive = datetime_object
return (utc_naive - datetime.datetime(1970, 1, 1)).total_seconds()
class DMARCStorage(object):
def __init__(self, database_filename='dmarc.sqlite', database_directory="./results"):
# Create or connect to the database:
database_path = os.path.join(database_directory, database_filename)
if not os.path.exists(database_directory):
os.makedirs(database_directory)
self._conn = sqlite3.connect(database_path)
# Set automcommit to true and initialise cursor:
self._conn.isolation_level = None
self._cur = self._conn.cursor()
# Create the tables if they don't exist already:
self._init_database()
def __del__(self):
if self._conn is not None:
self._close_connection()
def _init_database(self):
self._cur.execute("PRAGMA foreign_keys = ON;")
self._cur.execute("""CREATE TABLE IF NOT EXISTS dmarc_reports (
report_id TEXT PRIMARY KEY,
receiver TEXT,
report_filename TEXT,
report_start INTEGER,
report_end INTEGER
);""")
self._cur.execute("""CREATE TABLE IF NOT EXISTS dmarc_records (
report_id TEXT REFERENCES dmarc_reports(report_id) ON DELETE CASCADE,
record_id INTEGER,
ip_address TEXT,
hostname TEXT,
disposition TEXT,
reason TEXT,
spf_pass INTEGER,
dkim_pass INTEGER,
header_from TEXT,
envelope_from TEXT,
count INTEGER,
PRIMARY KEY (report_id, record_id)
);""")
self._cur.execute("""CREATE TABLE IF NOT EXISTS spf_results (
report_id TEXT,
record_id INTEGER,
spf_id INTEGER,
domain TEXT,
result TEXT,
PRIMARY KEY (report_id, record_id, spf_id),
FOREIGN KEY (report_id, record_id)
REFERENCES dmarc_records(report_id, record_id)
ON DELETE CASCADE
);""")
self._cur.execute("""CREATE TABLE IF NOT EXISTS dkim_signatures (
report_id TEXT,
record_id INTEGER,
signature_id INTEGER,
domain TEXT,
result TEXT,
selector TEXT,
PRIMARY KEY (report_id, record_id, signature_id),
FOREIGN KEY (report_id, record_id)
REFERENCES dmarc_records(report_id, record_id)
ON DELETE CASCADE,
CONSTRAINT unique_dkim_sig
UNIQUE (report_id, record_id, domain, result, selector)
);""")
def _delete_all_data(self):
# Drop the tables in the right order:
self._cur.execute("DROP TABLE dkim_signatures;")
self._cur.execute("DROP TABLE spf_results;")
self._cur.execute("DROP TABLE dmarc_records;")
self._cur.execute("DROP TABLE dmarc_reports;")
# Recreate them again, empty:
self._init_database()
def _close_connection(self):
self._conn.close()
self._conn = None
def report_already_exists(self, report_filename):
# Check if a report with that filename already exists:
self._cur.execute("SELECT report_filename FROM dmarc_reports WHERE report_filename=?;", (report_filename,))
already_exists = self._cur.fetchone() is not None
return already_exists
def save_new_report(self, report):
# Persist the report itself:
self._cur.execute("INSERT INTO dmarc_reports VALUES (?,?,?,?,?);",
[report.id, report.receiver, report.filename,
totimestamp(report.start_date), totimestamp(report.end_date)])
# Persist each record of that report with a generated ID:
for rec_id, rec in enumerate(report.records):
self._cur.execute("INSERT INTO dmarc_records VALUES (?,?,?,?,?,?,?,?,?,?,?);",
[report.id, rec_id, rec.ip, rec.host, rec.disposition, rec.reason,
rec.spf_pass, rec.dkim_pass, rec.header_from, rec.envelope_from,
rec.count])
# Persist the SPF data:
for spf_id, spf_result in enumerate(rec.spf_results):
self._cur.execute("INSERT INTO spf_results VALUES (?,?,?,?,?);",
[report.id, rec_id, spf_id, spf_result["domain"], spf_result["result"]])
# Persist all the DKIM signatures with generated IDs
for sig_id, sig in enumerate(rec.dkim_signatures):
self._cur.execute("INSERT INTO dkim_signatures VALUES (?,?,?,?,?,?);",
[report.id, rec_id, sig_id, sig["domain"], sig["result"], sig["selector"]])
def get_reporting_start_date(self):
self._cur.execute("SELECT min(report_start) FROM dmarc_reports;")
return datetime.datetime.utcfromtimestamp(self._cur.fetchone()[0])
def get_reporting_end_date(self):
self._cur.execute("SELECT max(report_start) FROM dmarc_reports;")
return datetime.datetime.utcfromtimestamp(self._cur.fetchone()[0])
def get_number_reports(self):
self._cur.execute("SELECT count(*) FROM dmarc_reports;")
return self._cur.fetchone()[0]
def get_count_by_disposition(self):
self._cur.execute("SELECT disposition, sum(count) FROM dmarc_records GROUP BY disposition;")
return {str(r[0]): r[1] for r in self._cur.fetchall()}
def get_count_by_hostnames(self):
self._cur.execute("SELECT hostname, ip_address, sum(count) FROM dmarc_records GROUP BY hostname, ip_address;")
return {str(r[0]) if r[0] is not None else str(r[1]): r[2] for r in self._cur.fetchall()}
def get_count_by_receiver(self):
self._cur.execute("SELECT receiver, sum(count) FROM dmarc_reports JOIN dmarc_records " +
"ON dmarc_reports.report_id=dmarc_records.report_id GROUP BY receiver;")
return {str(r[0]): r[1] for r in self._cur.fetchall()}
def get_count_by_dkim_domain(self):
self._cur.execute("SELECT domain, sum(count) FROM dmarc_records JOIN dkim_signatures " +
"ON dmarc_records.report_id=dkim_signatures.report_id AND " +
"dmarc_records.record_id=dkim_signatures.record_id GROUP BY domain;")
return {str(r[0]): r[1] for r in self._cur.fetchall()}
def get_count_by_status_string(self):
self._cur.execute("SELECT spf_pass, dkim_pass, sum(count) FROM dmarc_records GROUP BY spf_pass, dkim_pass;")
status = {1: "pass", 0: "fail", None: "n/a"}
return {"SPF:%s, DKIM:%s" % (status[r[0]], status[r[1]]): r[2] for r in self._cur.fetchall()}
def get_raw_spf_status_count_by_timestamp(self):
self._cur.execute("SELECT report_start, spf_pass, count FROM dmarc_reports JOIN dmarc_records " +
"ON dmarc_reports.report_id=dmarc_records.report_id;")
return self._cur.fetchall()
def get_raw_dkim_status_count_by_timestamp(self):
self._cur.execute("SELECT report_start, dkim_pass, count FROM dmarc_reports JOIN dmarc_records " +
"ON dmarc_reports.report_id=dmarc_records.report_id;")
return self._cur.fetchall()
def get_raw_dmarc_status_count_by_timestamp(self):
self._cur.execute("SELECT report_start, spf_pass + dkim_pass, count " +
"FROM dmarc_reports JOIN dmarc_records " +
"ON dmarc_reports.report_id=dmarc_records.report_id;")
return self._cur.fetchall()
def execute_query(self, sql, values=None):
if values is not None:
self._cur.execute(sql, values)
else:
self._cur.execute(sql)
return self._cur.fetchall()
| 49.894444
| 118
| 0.565639
| 1,012
| 8,981
| 4.75
| 0.156126
| 0.059705
| 0.078635
| 0.044934
| 0.462867
| 0.370917
| 0.302476
| 0.270647
| 0.255669
| 0.23861
| 0
| 0.004403
| 0.342501
| 8,981
| 179
| 119
| 50.173184
| 0.809653
| 0.04465
| 0
| 0.227586
| 0
| 0
| 0.471
| 0.044929
| 0
| 0
| 0
| 0
| 0
| 1
| 0.137931
| false
| 0.055172
| 0.02069
| 0
| 0.262069
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
dbd0c614614154cd50e0792871e7aa778a2a1459
| 557
|
py
|
Python
|
setup.py
|
mcdruid/sumologic-python-sdk
|
cb1d649d0166976fb104866e9174a41bd558b817
|
[
"Apache-2.0"
] | 4
|
2019-05-09T01:31:15.000Z
|
2019-12-08T03:35:32.000Z
|
setup.py
|
blaise-sumo/sumologic-python-sdk
|
97c38fc2d493b94741fd17711923ec7e39264610
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
blaise-sumo/sumologic-python-sdk
|
97c38fc2d493b94741fd17711923ec7e39264610
|
[
"Apache-2.0"
] | null | null | null |
from setuptools import setup, find_packages
setup(
name="sumologic-sdk",
version="0.1.9",
packages=find_packages(),
install_requires=['requests>=2.2.1'],
# PyPI metadata
author="Yoway Buorn, Melchi Salins",
author_email="[email protected], [email protected]",
description="Sumo Logic Python SDK",
license="PSF",
keywords="sumologic python sdk rest api log management analytics logreduce splunk security siem collector forwarder",
url="https://github.com/SumoLogic/sumologic-python-sdk",
zip_safe=True
)
| 32.764706
| 121
| 0.716338
| 71
| 557
| 5.549296
| 0.746479
| 0.068528
| 0.091371
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012848
| 0.16158
| 557
| 16
| 122
| 34.8125
| 0.830835
| 0.023339
| 0
| 0
| 0
| 0
| 0.512915
| 0.042435
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.071429
| 0
| 0.071429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
dbdce6502afcfa5e2708f1c6de7ac5e46b73c5d7
| 3,303
|
py
|
Python
|
template/misc.py
|
da-h/tf-boilerplate
|
ab8409c935d3fcbed07bbefd1cb0049d45283222
|
[
"MIT"
] | null | null | null |
template/misc.py
|
da-h/tf-boilerplate
|
ab8409c935d3fcbed07bbefd1cb0049d45283222
|
[
"MIT"
] | null | null | null |
template/misc.py
|
da-h/tf-boilerplate
|
ab8409c935d3fcbed07bbefd1cb0049d45283222
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
from tensorflow.python.training.session_run_hook import SessionRunArgs
# Define data loaders #####################################
# See https://gist.github.com/peterroelants/9956ec93a07ca4e9ba5bc415b014bcca
class IteratorInitializerHook(tf.train.SessionRunHook):
"""Hook to initialise data iterator after Session is created."""
def __init__(self, func=None):
super(IteratorInitializerHook, self).__init__()
self.iterator_initializer_func = func
def after_create_session(self, session, coord):
"""Initialise the iterator after the session has been created."""
self.iterator_initializer_func(session)
# redefine summarysaverhook (for more accurate saving)
class CustomSummarySaverHook(tf.train.SummarySaverHook):
"""Saves summaries every N steps."""
def __init__(self,save_steps,*args,**kwargs):
super(CustomSummarySaverHook, self).__init__(*args,save_steps=save_steps,**kwargs)
def begin(self):
super().begin()
self._timer.reset()
self._iter_count = 0
def before_run(self, run_context): # pylint: disable=unused-argument
self._request_summary = ((self._iter_count + 1) % self.save_steps == 0)
requests = {"global_step": self._global_step_tensor}
if self._request_summary:
if self._get_summary_op() is not None:
# print(self._iter_count)
requests["summary"] = self._get_summary_op()
return SessionRunArgs(requests)
def after_run(self, run_context, run_values):
super().after_run(run_context,run_values)
self._iter_count += 1
class OneTimeSummarySaverHook(tf.train.SummarySaverHook):
"""One-Time SummarySaver
Saves summaries every N steps.
E.g. can be used for saving the source code as text.
"""
def __init__(self, output_dir=None, summary_writer=None, scaffold=None, summary_op=None):
self._summary_op = summary_op
self._summary_writer = summary_writer
self._output_dir = output_dir
self._scaffold = scaffold
class emptytimer():
def update_last_triggered_step(*args,**kwargs):
pass
self._timer = emptytimer()
def begin(self):
super().begin()
self._done = False
def before_run(self, run_context): # pylint: disable=unused-argument
self._request_summary = not self._done
requests = {"global_step": self._global_step_tensor}
if self._request_summary:
if self._get_summary_op() is not None:
# print(self._iter_count)
requests["summary"] = self._get_summary_op()
return SessionRunArgs(requests)
def after_run(self, run_context, run_values):
super().after_run(run_context,run_values)
self._done = True
def ExperimentTemplate() -> str:
"""A template with Markdown syntax.
:return: str with Markdown template
"""
return """
Experiment
==========
Any [markdown code](https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet) can be used to describe this experiment.
For instance, you can find the automatically generated used settings of this run below.
Current Settings
----------------
| Argument | Value |
| -------- | ----- |
"""
| 32.70297
| 126
| 0.666969
| 386
| 3,303
| 5.430052
| 0.34715
| 0.030057
| 0.031011
| 0.032443
| 0.337786
| 0.313931
| 0.289122
| 0.289122
| 0.289122
| 0.289122
| 0
| 0.008089
| 0.214048
| 3,303
| 100
| 127
| 33.03
| 0.799307
| 0.17802
| 0
| 0.338983
| 0
| 0.016949
| 0.134909
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.20339
| false
| 0.016949
| 0.033898
| 0
| 0.355932
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
dbddc1c2c35c862c97e10c987a1255308c864f59
| 2,825
|
py
|
Python
|
examples/dehydrogenation/3-property-mappings/mappings_from_ontology/run_w_onto.py
|
TorgeirUstad/dlite
|
1d7b4ccec0e76799a25992534cd295a80d83878a
|
[
"MIT"
] | null | null | null |
examples/dehydrogenation/3-property-mappings/mappings_from_ontology/run_w_onto.py
|
TorgeirUstad/dlite
|
1d7b4ccec0e76799a25992534cd295a80d83878a
|
[
"MIT"
] | null | null | null |
examples/dehydrogenation/3-property-mappings/mappings_from_ontology/run_w_onto.py
|
TorgeirUstad/dlite
|
1d7b4ccec0e76799a25992534cd295a80d83878a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from typing import Dict, AnyStr
from pathlib import Path
from ontopy import get_ontology
import dlite
from dlite.mappings import make_instance
# Setup dlite paths
thisdir = Path(__file__).parent.absolute()
rootdir = thisdir.parent.parent
workflow1dir = rootdir / '1-simple-workflow'
entitiesdir = rootdir / 'entities'
atomdata = workflow1dir / 'atomscaledata.json'
dlite.storage_path.append(f'{entitiesdir}/*.json')
# Define the calculation
def get_energy(reaction):
"""Calculates reaction energies with data from Substance entity
data is harvested from collection and mapped to Substance according to
mappings.
Args:
reaction: dict with names of reactants and products ase keys
and stochiometric coefficient as value
Negative stochiometric coefficients for reactants.
Positive stochiometric coefficients for products.
Returns:
reaction energy
"""
energy = 0
for label, n in reaction.items():
inst = make_instance(Substance, coll[label], mappings,
mapsTo=mapsTo)
energy+=n*inst.molecule_energy
return energy
# Import ontologies with mappings
molecules_onto = get_ontology(f'{thisdir}/mapping_mols.ttl').load()
reaction_onto = get_ontology(f'{thisdir}/mapping_substance.ttl').load()
# Convert to mappings to a single list of triples
mappings = list(molecules_onto.get_unabbreviated_triples())
mappings.extend(list(reaction_onto.get_unabbreviated_triples()))
# Obtain the Metadata to be mapped to each other
Molecule = dlite.get_instance('http://onto-ns.com/meta/0.1/Molecule')
Substance = dlite.get_instance('http://onto-ns.com/meta/0.1/Substance')
# Find mapping relation
# TODO: investigate what to do if the two cases
# use a different mappings relation. As of now it is a
# hard requirement that they use the same.
mapsTo = molecules_onto.mapsTo.iri
# Define where the molecule data is obtained from
# This is a dlite collection
coll = dlite.Collection(f'json://{atomdata}?mode=r#molecules', 0)
# input from chemical engineer, e.g. what are reactants and products
# reactants (left side of equation) have negative stochiometric coefficient
# products (right side of equation) have positive stochiometric coefficient
reaction1 = {'C2H6':-1, 'C2H4':1,'H2':1}
reaction_energy = get_energy(reaction1)
print('Reaction energy 1', reaction_energy)
reaction2 = {'C3H8':-1, 'H2': -2,'CH4':3}
reaction_energy2 = get_energy(reaction2)
print('Reaction energy 1', reaction_energy2)
# Map instance Molecule with label 'H2' to Substance
#inst = make_instance(Substance, coll['H2'], mappings)
#print(inst)
# Map instance Molecule with label 'H2' to itself
#inst2 = make_instance(Molecule, coll['H2'], mappings, strict=False)
#print(inst2)
| 31.388889
| 75
| 0.735929
| 377
| 2,825
| 5.427056
| 0.413793
| 0.034213
| 0.01955
| 0.024438
| 0.150538
| 0.094819
| 0.065494
| 0.034213
| 0.034213
| 0.034213
| 0
| 0.016617
| 0.169204
| 2,825
| 89
| 76
| 31.741573
| 0.855134
| 0.463009
| 0
| 0
| 0
| 0
| 0.193103
| 0.062759
| 0
| 0
| 0
| 0.011236
| 0
| 1
| 0.03125
| false
| 0
| 0.15625
| 0
| 0.21875
| 0.0625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
91551c7d6fac7874ebf8acc4dfa5dfb4b2e853a5
| 6,479
|
py
|
Python
|
forms.py
|
lendoo73/my_idea_boxes
|
c0d0e7bbd0b64ae35146f3792cd477d1ec8461b5
|
[
"MIT"
] | null | null | null |
forms.py
|
lendoo73/my_idea_boxes
|
c0d0e7bbd0b64ae35146f3792cd477d1ec8461b5
|
[
"MIT"
] | null | null | null |
forms.py
|
lendoo73/my_idea_boxes
|
c0d0e7bbd0b64ae35146f3792cd477d1ec8461b5
|
[
"MIT"
] | null | null | null |
from flask_wtf import FlaskForm
from flask_wtf.file import FileField, FileAllowed, FileRequired
from wtforms import StringField, PasswordField, BooleanField, TextAreaField, SubmitField, RadioField, HiddenField
from wtforms.fields.html5 import DateField, IntegerField
from wtforms.validators import ValidationError, DataRequired, Email, EqualTo, NumberRange
from models import Colleagues, Admins, Boxes, Ideas
class RegistrationFormCompany(FlaskForm):
company_name = StringField("Company name", validators = [DataRequired()])
user_name = StringField("Your User name", validators = [DataRequired()])
first_name = StringField("Your First name", validators = [DataRequired()])
last_name = StringField("Your Last name", validators = [DataRequired()])
position = StringField("Your Position", validators = [DataRequired()])
email = StringField("Email", validators = [DataRequired(), Email()])
founder_password = PasswordField("Your own Password", validators = [DataRequired()])
repeat_founder_password = PasswordField(
"Repeat Your Password",
validators = [DataRequired(),
EqualTo("founder_password")]
)
joining_password = PasswordField("Password for Colleagues to Joining", validators = [DataRequired()])
repeat_joining_password = PasswordField(
"Repeat Joining Password",
validators = [DataRequired(),
EqualTo("joining_password")]
)
submit = SubmitField("Register your Company")
class RegistrationFormColleague(FlaskForm):
company_name = StringField("Company name", validators = [DataRequired()])
joining_password = PasswordField("Password for Colleagues to Joining", validators = [DataRequired()])
user_name = StringField("Your User name", validators = [DataRequired()])
email = StringField("Email", validators = [DataRequired(), Email()])
first_name = StringField("Your First name", validators = [DataRequired()])
last_name = StringField("Your Last name", validators = [DataRequired()])
position = StringField("Your Position", validators = [DataRequired()])
password = PasswordField("Your Password", validators = [DataRequired()])
repeat_password = PasswordField(
"Repeat Password",
validators = [DataRequired(),
EqualTo("password")]
)
submit = SubmitField("Register")
class LoginForm(FlaskForm):
email_or_user_name = StringField("Email or User name", validators = [DataRequired()])
password = PasswordField("Password", validators = [DataRequired()])
remember_me = BooleanField("Remember Me")
submit = SubmitField("Sign In")
class ConfirmEmailForm(FlaskForm):
email = HiddenField("Email")
code = IntegerField(
"Confirmation code",
validators = [
DataRequired(),
NumberRange(
min = 100000,
max = 999999,
message = "Please enter the 6 digits you received in the email."
)
]
)
submit = SubmitField("Confirm my Email")
class UpdateFirstNameForm(FlaskForm):
first_name = StringField("First Name", validators = [DataRequired()])
submit = SubmitField("Update")
class UpdateLastNameForm(FlaskForm):
last_name = StringField("Last Name", validators = [DataRequired()])
submit = SubmitField("Update")
class UpdateEmailForm(FlaskForm):
email = StringField("Email", validators = [DataRequired(), Email()])
password = PasswordField("Password", validators = [DataRequired()])
submit = SubmitField("Update")
class UpdatePositionForm(FlaskForm):
position = StringField("Your Position", validators = [DataRequired()])
submit = SubmitField("Update")
class UpdatePasswordForm(FlaskForm):
password = PasswordField("Your Current Password", validators = [DataRequired()])
new_password = PasswordField("Your New Password", validators = [DataRequired()])
repeat_new_password = PasswordField(
"Repeat your New Password",
validators = [DataRequired(),
EqualTo("repeat_new_password")]
)
submit = SubmitField("Update")
allowed_format = ['png', 'svg', 'jpg', "jpeg"]
class UpdateAvatarForm(FlaskForm):
avatar = FileField(
"Choose an Avatar:",
validators = [
FileRequired(),
FileAllowed(allowed_format, f"Wrong format! Allowed: {allowed_format}.")
]
)
submit = SubmitField("Upload Avatar")
class DeleteColleagueForm(FlaskForm):
password = PasswordField("Your Password", validators = [DataRequired()])
submit = SubmitField("Delete Registration")
class UpdateLogoForm(FlaskForm):
logo = FileField(
"Choose your Company Logo:",
validators = [
FileRequired(),
FileAllowed(allowed_format, f"Wrong format! Allowed: {allowed_format}.")
]
)
submit = SubmitField("Upload Logo")
class UpdateCompanyNameForm(FlaskForm):
company_name = StringField("Company Name", validators = [DataRequired()])
submit = SubmitField("Update")
class UpdateJoiningPasswordForm(FlaskForm):
password = PasswordField("Current Joining Password", validators = [DataRequired()])
new_password = PasswordField("New Joining Password", validators = [DataRequired()])
repeat_new_password = PasswordField(
"Repeat New Password",
validators = [DataRequired(),
EqualTo("repeat_new_password")]
)
submit = SubmitField("Update")
class UpdatePrivilegsForm(FlaskForm):
update_company = BooleanField("Update Company")
update_privilegs = BooleanField("Update Privilegs")
update_colleague = BooleanField("Update Colleague")
update_box = BooleanField("Update Idea Box")
password = PasswordField("Your Password", validators = [DataRequired()])
submit = SubmitField("Update Privilegs")
class CreateBoxForm(FlaskForm):
name = StringField("Title", validators = [DataRequired()])
description = TextAreaField("Description", validators = [DataRequired()])
close_at = DateField("Close at", format = "%Y-%m-%d")
submit = SubmitField("Create Box")
class CreateIdeaForm(FlaskForm):
idea = TextAreaField("My Idea", validators= [DataRequired()])
sign = RadioField(
"Sign",
choices = [
("incognito", "incognito"),
("username", "username"),
("first name", "first name"),
("full name", "full name")
]
)
submit = SubmitField("Share my Idea")
| 40.49375
| 113
| 0.679426
| 567
| 6,479
| 7.680776
| 0.215168
| 0.197015
| 0.10333
| 0.062687
| 0.479679
| 0.46062
| 0.378875
| 0.347646
| 0.223192
| 0.223192
| 0
| 0.002705
| 0.201111
| 6,479
| 160
| 114
| 40.49375
| 0.838679
| 0
| 0
| 0.314286
| 0
| 0
| 0.183025
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.214286
| 0.042857
| 0
| 0.635714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
915a53aa4a7088b23b53c3227ab2635547e8ba50
| 1,593
|
py
|
Python
|
setup.py
|
abhiomkar/couchdbkit
|
035062b504b57c1cc6e576be47fb05423fb1ddb3
|
[
"MIT"
] | 1
|
2021-06-03T21:34:38.000Z
|
2021-06-03T21:34:38.000Z
|
setup.py
|
abhiomkar/couchdbkit
|
035062b504b57c1cc6e576be47fb05423fb1ddb3
|
[
"MIT"
] | null | null | null |
setup.py
|
abhiomkar/couchdbkit
|
035062b504b57c1cc6e576be47fb05423fb1ddb3
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -
#
# This file is part of couchdbkit released under the MIT license.
# See the NOTICE for more information.
import os
import sys
if not hasattr(sys, 'version_info') or sys.version_info < (2, 5, 0, 'final'):
raise SystemExit("couchdbkit requires Python 2.5 or later.")
from setuptools import setup, find_packages
from couchdbkit import __version__
setup(
name = 'couchdbkit',
version = __version__,
description = 'Python couchdb kit',
long_description = file(
os.path.join(
os.path.dirname(__file__),
'README.rst'
)
).read(),
author = 'Benoit Chesneau',
author_email = '[email protected]',
license = 'Apache License 2',
url = 'http://couchdbkit.org',
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Other Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Database',
'Topic :: Utilities',
'Topic :: Software Development :: Libraries :: Python Modules',
],
packages = find_packages(exclude=['tests']),
zip_safe = False,
install_requires = [
'restkit>=3.2',
],
entry_points="""
[couchdbkit.consumers]
sync=couchdbkit.consumer.sync:SyncConsumer
eventlet=couchdbkit.consumer.ceventlet:EventletConsumer
gevent=couchdbkit.consumer.cgevent:GeventConsumer
""",
test_suite='noses',
)
| 27
| 77
| 0.626491
| 165
| 1,593
| 5.915152
| 0.672727
| 0.055328
| 0.028689
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008403
| 0.252982
| 1,593
| 58
| 78
| 27.465517
| 0.811765
| 0.075957
| 0
| 0.045455
| 0
| 0
| 0.466258
| 0.114519
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.090909
| 0
| 0.090909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
9163be87e7924e53bd340c783bc5110d591ba91f
| 1,386
|
py
|
Python
|
fairseq/scoring/__init__.py
|
fairseq-FT/fairseq
|
18725499144c1bba7c151b796ba774e59d36eaa9
|
[
"MIT"
] | 33
|
2021-01-06T18:03:55.000Z
|
2022-03-28T12:07:44.000Z
|
fairseq/scoring/__init__.py
|
fairseq-FT/fairseq
|
18725499144c1bba7c151b796ba774e59d36eaa9
|
[
"MIT"
] | 8
|
2021-06-11T03:11:37.000Z
|
2022-03-08T19:15:42.000Z
|
fairseq/scoring/__init__.py
|
fairseq-FT/fairseq
|
18725499144c1bba7c151b796ba774e59d36eaa9
|
[
"MIT"
] | 14
|
2021-05-17T06:55:01.000Z
|
2022-03-28T12:07:42.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib
import os
from abc import ABC, abstractmethod
from fairseq import registry
from omegaconf import DictConfig
class BaseScorer(ABC):
def __init__(self, cfg):
self.cfg = cfg
self.ref = []
self.pred = []
def add_string(self, ref, pred):
self.ref.append(ref)
self.pred.append(pred)
@abstractmethod
def score(self) -> float:
pass
@abstractmethod
def result_string(self) -> str:
pass
_build_scorer, register_scorer, SCORER_REGISTRY, _ = registry.setup_registry(
"--scoring", default="bleu"
)
def build_scorer(choice, tgt_dict):
if isinstance(choice, DictConfig):
choice = choice._name
if choice == "bleu":
from fairseq.scoring import bleu
return bleu.Scorer(
bleu.BleuConfig(pad=tgt_dict.pad(), eos=tgt_dict.eos(), unk=tgt_dict.unk())
)
return _build_scorer(choice)
# automatically import any Python files in the current directory
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith(".py") and not file.startswith("_"):
module = file[: file.find(".py")]
importlib.import_module("fairseq.scoring." + module)
| 24.315789
| 87
| 0.665945
| 178
| 1,386
| 5.039326
| 0.460674
| 0.031215
| 0.024526
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.229437
| 1,386
| 56
| 88
| 24.75
| 0.839888
| 0.166667
| 0
| 0.114286
| 0
| 0
| 0.034813
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0.057143
| 0.2
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
916d6d6dc88be47cd9a443a50f8be165dfb36ec7
| 3,167
|
py
|
Python
|
io_import_rbsp/rbsp/rpak_materials.py
|
snake-biscuits/io_import_rbsp
|
0de47dc70c373cc0417cc222d5d83e6dde72068b
|
[
"MIT"
] | 7
|
2021-09-30T11:13:00.000Z
|
2022-03-25T16:19:19.000Z
|
io_import_rbsp/rbsp/rpak_materials.py
|
snake-biscuits/io_import_rbsp
|
0de47dc70c373cc0417cc222d5d83e6dde72068b
|
[
"MIT"
] | 1
|
2021-11-15T18:36:51.000Z
|
2021-11-15T18:36:51.000Z
|
io_import_rbsp/rbsp/rpak_materials.py
|
snake-biscuits/io_import_rbsp
|
0de47dc70c373cc0417cc222d5d83e6dde72068b
|
[
"MIT"
] | null | null | null |
# by MrSteyk & Dogecore
# TODO: extraction instructions & testing
import json
import os.path
from typing import List
import bpy
loaded_materials = {}
MATERIAL_LOAD_PATH = "" # put your path here
# normal has special logic
MATERIAL_INPUT_LINKING = {
"color": "Base Color",
"rough": "Roughness",
"spec": "Specular",
"illumm": "Emission",
}
def load_material_data_from_name(subpath):
full_path = MATERIAL_LOAD_PATH + subpath + ".json"
if not os.path.isfile(full_path):
return False
return json.load(open(full_path, "rb"))
def load_image_from_subpath(subpath):
full_path = MATERIAL_LOAD_PATH + subpath
if not os.path.isfile(full_path):
return False
return bpy.data.images.load(full_path)
def load_materials(bsp) -> List[bpy.types.Material]:
materials = []
for material_name in bsp.TEXTURE_DATA_STRING_DATA:
if material_name in loaded_materials:
materials.append(loaded_materials[material_name])
continue
mat_data = load_material_data_from_name(material_name)
material = bpy.data.materials.new("materials/" + material_name)
if not mat_data:
loaded_materials[material_name] = material
materials.append(material)
# raise ValueError(f"Material data for material {material_name} does not exist!")
continue
# print(material_name, mat_data)
material.use_nodes = True
bsdf = material.node_tree.nodes["Principled BSDF"]
# data link
for mat_data_entry in MATERIAL_INPUT_LINKING.keys():
texture_file = mat_data[mat_data_entry]
if texture_file == "":
print(f"Texture type {mat_data_entry} doesn't exist in {material_name}'s material data, skipping.")
continue
img = load_image_from_subpath(texture_file)
if not img:
raise ValueError(f"{material_name}'s texture {texture_file} ({mat_data_entry}) doesn't exist!")
continue
tex = material.node_tree.nodes.new("ShaderNodeTexImage")
tex.image = img
material.node_tree.links.new(bsdf.inputs[MATERIAL_INPUT_LINKING[mat_data_entry]], tex.outputs["Color"])
if mat_data_entry == "color":
material.node_tree.links.new(bsdf.inputs["Alpha"], tex.outputs["Alpha"])
# normal link
if mat_data["normal"] != "":
texture_file = mat_data["normal"]
normalmap = material.node_tree.nodes.new("ShaderNodeNormalMap")
img = load_image_from_subpath(texture_file)
if not img:
raise ValueError(f"Texture {texture_file} for material {material_name} (normal) doesn't exist!")
continue
tex = material.node_tree.nodes.new("ShaderNodeTexImage")
tex.image = img
material.node_tree.links.new(normalmap.inputs["Color"], tex.outputs["Color"])
material.node_tree.links.new(bsdf.inputs["Normal"], normalmap.outputs["Normal"])
loaded_materials[material_name] = material
materials.append(material)
return materials
| 38.156627
| 115
| 0.649826
| 382
| 3,167
| 5.151832
| 0.23822
| 0.073171
| 0.065041
| 0.042683
| 0.398882
| 0.362297
| 0.344512
| 0.300813
| 0.202236
| 0.202236
| 0
| 0
| 0.250079
| 3,167
| 82
| 116
| 38.621951
| 0.828632
| 0.07515
| 0
| 0.328125
| 0
| 0
| 0.14863
| 0
| 0
| 0
| 0
| 0.012195
| 0
| 1
| 0.046875
| false
| 0
| 0.0625
| 0
| 0.1875
| 0.015625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
916f9138f4bbb1766481eef3ea77cac318445838
| 3,291
|
py
|
Python
|
aardvark/conf/reaper_conf.py
|
ttsiouts/aardvark
|
cbf29f332df86814dd581152faf863c0d29ae41c
|
[
"Apache-2.0"
] | null | null | null |
aardvark/conf/reaper_conf.py
|
ttsiouts/aardvark
|
cbf29f332df86814dd581152faf863c0d29ae41c
|
[
"Apache-2.0"
] | null | null | null |
aardvark/conf/reaper_conf.py
|
ttsiouts/aardvark
|
cbf29f332df86814dd581152faf863c0d29ae41c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2018 European Organization for Nuclear Research.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
reaper_group = cfg.OptGroup(
'reaper',
title='Aardvark Service Options',
help="Configuration options for Aardvark service")
reaper_opts = [
cfg.StrOpt('reaper_driver',
default='chance_driver',
help="""
The driver that the reaper will use
Possible choices:
* strict_driver: The purpose of the preemptibles existence is to eliminate the
idling resources. This driver gets all the possible offers
from the relevant hosts and tries to find the best matching
for the requested resources. The best matching offer is the
combination of preemptible servers that leave the least
possible resources unused.
* chance_driver: A valid host is selected randomly and in a number of
preconfigured retries, the driver tries to find the instances
that have to be culled in order to have the requested
resources available.
"""
),
cfg.IntOpt('alternatives',
default=1,
help="""
The number of alternative slots that the the reaper will try to free up for
each requested slot.
"""
),
cfg.IntOpt('max_attempts',
default=5,
help="""
The number of alternative slots that the the reaper will try to free up for
each requested slot.
"""
),
cfg.ListOpt('watched_aggregates',
default=[],
help="""
The list of aggregate names that the reaper will try to make space to
Each element of the list can be an aggregate or a combination of aggregates.
Combination of aggregates is a single string with a vertical-line-separated
aggregate names.
e.g. watched_aggregates={agg_name1},{agg_name2}|{agg_name3}',....
For each element in the list, a reaper thread will be spawned and the request
will be forwarded to the responsible worker.
If the provided list is empty, only one worker will be spawned, responsible for
the whole system.
"""
),
cfg.StrOpt('job_backend',
default='redis',
choices=('redis', 'zookeeper'),
help="""
The backend to use for distributed task management.
For this purpose the Reaper uses OpenStack Taskflow. The two supported
backends are redis and zookeper.
"""
),
cfg.StrOpt('backend_host',
default='localhost',
help="""
Specifies the host where the job board backend can be found.
"""
),
]
def register_opts(conf):
conf.register_group(reaper_group)
conf.register_opts(reaper_opts, group=reaper_group)
| 32.91
| 79
| 0.671832
| 439
| 3,291
| 4.98861
| 0.448747
| 0.027397
| 0.023744
| 0.021918
| 0.085845
| 0.077626
| 0.077626
| 0.077626
| 0.077626
| 0.077626
| 0
| 0.005337
| 0.259799
| 3,291
| 99
| 80
| 33.242424
| 0.893678
| 0.192343
| 0
| 0.314286
| 0
| 0
| 0.711691
| 0.031404
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014286
| false
| 0
| 0.014286
| 0
| 0.028571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
9170343444c1172d149626528603249b2f63831c
| 370
|
py
|
Python
|
count_files.py
|
xuannianc/keras-retinanet
|
d1da39592042927aaf3b3eb905a308c327983bed
|
[
"Apache-2.0"
] | null | null | null |
count_files.py
|
xuannianc/keras-retinanet
|
d1da39592042927aaf3b3eb905a308c327983bed
|
[
"Apache-2.0"
] | null | null | null |
count_files.py
|
xuannianc/keras-retinanet
|
d1da39592042927aaf3b3eb905a308c327983bed
|
[
"Apache-2.0"
] | null | null | null |
import csv
vat_filenames = set()
train_csv_filename = 'train_annotations.csv'
val_csv_filename = 'val_annotations.csv'
for csv_filename in [train_csv_filename, val_csv_filename]:
for line in csv.reader(open(csv_filename)):
vat_filename = line[0].split('/')[-1]
vat_filenames.add(vat_filename)
print(len(vat_filenames))
vat_filenames.clear()
| 30.833333
| 59
| 0.735135
| 53
| 370
| 4.792453
| 0.396226
| 0.259843
| 0.125984
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006349
| 0.148649
| 370
| 11
| 60
| 33.636364
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0.110811
| 0.056757
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.1
| 0
| 0.1
| 0.1
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
91708273d963214e9092983f15d8ef3340677e15
| 814
|
py
|
Python
|
em Python/Roteiro7/Roteiro7__testes_dijkstra.py
|
GuilhermeEsdras/Grafos
|
b6556c3d679496d576f65b798a1a584cd73e40f4
|
[
"MIT"
] | null | null | null |
em Python/Roteiro7/Roteiro7__testes_dijkstra.py
|
GuilhermeEsdras/Grafos
|
b6556c3d679496d576f65b798a1a584cd73e40f4
|
[
"MIT"
] | null | null | null |
em Python/Roteiro7/Roteiro7__testes_dijkstra.py
|
GuilhermeEsdras/Grafos
|
b6556c3d679496d576f65b798a1a584cd73e40f4
|
[
"MIT"
] | null | null | null |
from Roteiro7.Roteiro7__funcoes import GrafoComPesos
# .:: Arquivo de Testes do Algoritmo de Dijkstra ::. #
# --------------------------------------------------------------------------- #
grafo_aula = GrafoComPesos(
['E', 'A', 'B', 'C', 'D'],
{
'E-A': 1,
'E-C': 10,
'A-B': 2,
'B-C': 4,
'C-D': 3
}
)
print(grafo_aula)
print('Menor caminho por Dijkstra: ', grafo_aula.dijkstra('E', 'D'))
print("-------------------------")
grafo_aula2 = GrafoComPesos(
['A', 'B', 'C', 'D', 'E', 'F', 'G'],
{
'A-B': 1, 'A-F': 3, 'A-G': 2,
'B-F': 1,
'C-B': 2,
'C-D': 5,
'D-E': 2,
'F-D': 4,
'F-G': 2,
'G-E': 7,
}
)
print(grafo_aula2)
print('Menor caminho por Dijkstra: ', grafo_aula2.dijkstra('A', 'E'))
| 22.611111
| 79
| 0.395577
| 103
| 814
| 3.048544
| 0.31068
| 0.025478
| 0.10828
| 0.025478
| 0.242038
| 0.210191
| 0
| 0
| 0
| 0
| 0
| 0.035413
| 0.271499
| 814
| 35
| 80
| 23.257143
| 0.494098
| 0.15602
| 0
| 0
| 0
| 0
| 0.208517
| 0.036711
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.034483
| 0
| 0.034483
| 0.172414
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
917a6b3b8a05d7c695e7c6d3cb38a9324f5ab905
| 302
|
py
|
Python
|
mol/data/reader.py
|
TzuTingWei/mol
|
9499925443f389d8e960b6d656f2953d21df3e3b
|
[
"MIT"
] | null | null | null |
mol/data/reader.py
|
TzuTingWei/mol
|
9499925443f389d8e960b6d656f2953d21df3e3b
|
[
"MIT"
] | null | null | null |
mol/data/reader.py
|
TzuTingWei/mol
|
9499925443f389d8e960b6d656f2953d21df3e3b
|
[
"MIT"
] | null | null | null |
import os
from mol.util import read_xyz
dirname = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(dirname, 'look_and_say.dat')
with open(filename, 'r') as handle:
look_and_say = handle.read()
def get_molecule(filename):
return read_xyz(os.path.join(dirname, filename + ".xyz"))
| 25.166667
| 58
| 0.748344
| 49
| 302
| 4.387755
| 0.530612
| 0.111628
| 0.12093
| 0.15814
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109272
| 302
| 11
| 59
| 27.454545
| 0.799257
| 0
| 0
| 0
| 0
| 0
| 0.069536
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.25
| 0.125
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 1
|
917b8eb1f8726a411ad6e99afecc5eaca421cc08
| 1,793
|
py
|
Python
|
misc/python/mango/application/main_driver/logstream.py
|
pymango/pymango
|
b55f831f0194b214e746b2dfb4d9c6671a1abc38
|
[
"BSD-2-Clause"
] | 3
|
2020-05-11T03:23:17.000Z
|
2021-03-16T09:01:48.000Z
|
misc/python/mango/application/main_driver/logstream.py
|
pymango/pymango
|
b55f831f0194b214e746b2dfb4d9c6671a1abc38
|
[
"BSD-2-Clause"
] | null | null | null |
misc/python/mango/application/main_driver/logstream.py
|
pymango/pymango
|
b55f831f0194b214e746b2dfb4d9c6671a1abc38
|
[
"BSD-2-Clause"
] | 2
|
2017-03-04T11:03:40.000Z
|
2020-08-01T10:01:36.000Z
|
__doc__ = \
"""
=======================================================================================
Main-driver :obj:`LogStream` variables (:mod:`mango.application.main_driver.logstream`)
=======================================================================================
.. currentmodule:: mango.application.main_driver.logstream
Logging objects/attributes for :obj:`mango.application.main_driver.MainDriverFilter` filters.
Classes
=======
.. autosummary::
:toctree: generated/
LogStream - Message logging for :obj:`mango.application.main_driver.MainDriverFilter` filters.
Attributes
==========
.. autodata:: log
.. autodata:: mstLog
.. autodata:: mstOut
.. autodata:: warnLog
.. autodata:: errLog
"""
import mango
import mango.mpi as mpi
import os
import os.path
import sys
if sys.platform.startswith('linux'):
import DLFCN as dl
_flags = sys.getdlopenflags()
sys.setdlopenflags(dl.RTLD_NOW|dl.RTLD_GLOBAL)
from . import _mango_main_driver as _mango_main_driver_so
sys.setdlopenflags(_flags)
else:
from . import _mango_main_driver as _mango_main_driver_so
from mango.core import LogStream
#: Messages sent to stdout, prefixed with :samp:`'P<RANK>'`, where :samp:`<RANK>` is MPI process world rank.
log = _mango_main_driver_so._log
#: Messages sent to stdout, prefixed with :samp:`'MST'`, and messages also saved to history-meta-data.
mstLog = _mango_main_driver_so._mstLog
#: Messages sent to stdout, prefixed with :samp:`'OUT'`.
mstOut = _mango_main_driver_so._mstOut
#: Messages sent to stderr, prefixed with :samp:`'WARNING'`.
warnLog = _mango_main_driver_so._warnLog
#: Messages sent to stderr, prefixed with :samp:`'ERROR'`.
errLog = _mango_main_driver_so._errLog
__all__ = [s for s in dir() if not s.startswith('_')]
| 25.985507
| 108
| 0.665365
| 215
| 1,793
| 5.288372
| 0.362791
| 0.123131
| 0.118734
| 0.104661
| 0.394019
| 0.332454
| 0.332454
| 0.174142
| 0.077397
| 0.077397
| 0
| 0
| 0.126603
| 1,793
| 68
| 109
| 26.367647
| 0.726054
| 0.211378
| 0
| 0.095238
| 0
| 0
| 0.008721
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.428571
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
917c31411ccb8a75122b971cca9ce661e5940151
| 9,680
|
py
|
Python
|
ucdev/cy7c65211/header.py
|
luftek/python-ucdev
|
8d3c46d25551f1237e6a2f7a90d54c24bcb1d4f9
|
[
"MIT"
] | 11
|
2015-07-08T01:28:01.000Z
|
2022-01-26T14:29:47.000Z
|
ucdev/cy7c65211/header.py
|
luftek/python-ucdev
|
8d3c46d25551f1237e6a2f7a90d54c24bcb1d4f9
|
[
"MIT"
] | 5
|
2017-12-07T15:04:00.000Z
|
2021-06-02T14:47:14.000Z
|
ucdev/cy7c65211/header.py
|
tai/python-ucdev
|
8d3c46d25551f1237e6a2f7a90d54c24bcb1d4f9
|
[
"MIT"
] | 4
|
2017-02-18T18:20:13.000Z
|
2022-03-23T16:21:20.000Z
|
# -*- coding: utf-8-unix -*-
import platform
######################################################################
# Platform specific headers
######################################################################
if platform.system() == 'Linux':
src = """
typedef bool BOOL;
"""
######################################################################
# Common headers
######################################################################
src += """
#define CY_STRING_DESCRIPTOR_SIZE 256
#define CY_MAX_DEVICE_INTERFACE 5
#define CY_US_VERSION_MAJOR 1
#define CY_US_VERSION_MINOR 0
#define CY_US_VERSION_PATCH 0
#define CY_US_VERSION 1
#define CY_US_VERSION_BUILD 74
typedef unsigned int UINT32;
typedef unsigned char UINT8;
typedef unsigned short UINT16;
typedef char CHAR;
typedef unsigned char UCHAR;
typedef void* CY_HANDLE;
typedef void (*CY_EVENT_NOTIFICATION_CB_FN)(UINT16 eventsNotified);
typedef struct _CY_VID_PID {
UINT16 vid;
UINT16 pid;
} CY_VID_PID, *PCY_VID_PID;
typedef struct _CY_LIBRARY_VERSION {
UINT8 majorVersion;
UINT8 minorVersion;
UINT16 patch;
UINT8 buildNumber;
} CY_LIBRARY_VERSION, *PCY_LIBRARY_VERSION;
typedef struct _CY_FIRMWARE_VERSION {
UINT8 majorVersion;
UINT8 minorVersion;
UINT16 patchNumber;
UINT32 buildNumber;
} CY_FIRMWARE_VERSION, *PCY_FIRMWARE_VERSION;
typedef enum _CY_DEVICE_CLASS{
CY_CLASS_DISABLED = 0,
CY_CLASS_CDC = 0x02,
CY_CLASS_PHDC = 0x0F,
CY_CLASS_VENDOR = 0xFF
} CY_DEVICE_CLASS;
typedef enum _CY_DEVICE_TYPE {
CY_TYPE_DISABLED = 0,
CY_TYPE_UART,
CY_TYPE_SPI,
CY_TYPE_I2C,
CY_TYPE_JTAG,
CY_TYPE_MFG
} CY_DEVICE_TYPE;
typedef enum _CY_DEVICE_SERIAL_BLOCK
{
SerialBlock_SCB0 = 0,
SerialBlock_SCB1,
SerialBlock_MFG
} CY_DEVICE_SERIAL_BLOCK;
typedef struct _CY_DEVICE_INFO {
CY_VID_PID vidPid;
UCHAR numInterfaces;
UCHAR manufacturerName [256];
UCHAR productName [256];
UCHAR serialNum [256];
UCHAR deviceFriendlyName [256];
CY_DEVICE_TYPE deviceType [5];
CY_DEVICE_CLASS deviceClass [5];
CY_DEVICE_SERIAL_BLOCK deviceBlock;
} CY_DEVICE_INFO,*PCY_DEVICE_INFO;
typedef struct _CY_DATA_BUFFER {
UCHAR *buffer;
UINT32 length;
UINT32 transferCount;
} CY_DATA_BUFFER,*PCY_DATA_BUFFER;
typedef enum _CY_RETURN_STATUS{
CY_SUCCESS = 0,
CY_ERROR_ACCESS_DENIED,
CY_ERROR_DRIVER_INIT_FAILED,
CY_ERROR_DEVICE_INFO_FETCH_FAILED,
CY_ERROR_DRIVER_OPEN_FAILED,
CY_ERROR_INVALID_PARAMETER,
CY_ERROR_REQUEST_FAILED,
CY_ERROR_DOWNLOAD_FAILED,
CY_ERROR_FIRMWARE_INVALID_SIGNATURE,
CY_ERROR_INVALID_FIRMWARE,
CY_ERROR_DEVICE_NOT_FOUND,
CY_ERROR_IO_TIMEOUT,
CY_ERROR_PIPE_HALTED,
CY_ERROR_BUFFER_OVERFLOW,
CY_ERROR_INVALID_HANDLE,
CY_ERROR_ALLOCATION_FAILED,
CY_ERROR_I2C_DEVICE_BUSY,
CY_ERROR_I2C_NAK_ERROR,
CY_ERROR_I2C_ARBITRATION_ERROR,
CY_ERROR_I2C_BUS_ERROR,
CY_ERROR_I2C_BUS_BUSY,
CY_ERROR_I2C_STOP_BIT_SET,
CY_ERROR_STATUS_MONITOR_EXIST
} CY_RETURN_STATUS;
typedef struct _CY_I2C_CONFIG{
UINT32 frequency;
UINT8 slaveAddress;
BOOL isMaster;
BOOL isClockStretch;
} CY_I2C_CONFIG,*PCY_I2C_CONFIG;
typedef struct _CY_I2C_DATA_CONFIG
{
UCHAR slaveAddress;
BOOL isStopBit;
BOOL isNakBit;
} CY_I2C_DATA_CONFIG, *PCY_I2C_DATA_CONFIG;
typedef enum _CY_SPI_PROTOCOL {
CY_SPI_MOTOROLA = 0,
CY_SPI_TI,
CY_SPI_NS
} CY_SPI_PROTOCOL;
typedef struct _CY_SPI_CONFIG
{
UINT32 frequency;
UCHAR dataWidth;
CY_SPI_PROTOCOL protocol ;
BOOL isMsbFirst;
BOOL isMaster;
BOOL isContinuousMode;
BOOL isSelectPrecede;
BOOL isCpha;
BOOL isCpol;
}CY_SPI_CONFIG,*PCY_SPI_CONFIG;
typedef enum _CY_UART_BAUD_RATE
{
CY_UART_BAUD_300 = 300,
CY_UART_BAUD_600 = 600,
CY_UART_BAUD_1200 = 1200,
CY_UART_BAUD_2400 = 2400,
CY_UART_BAUD_4800 = 4800,
CY_UART_BAUD_9600 = 9600,
CY_UART_BAUD_14400 = 14400,
CY_UART_BAUD_19200 = 19200,
CY_UART_BAUD_38400 = 38400,
CY_UART_BAUD_56000 = 56000,
CY_UART_BAUD_57600 = 57600,
CY_UART_BAUD_115200 = 115200,
CY_UART_BAUD_230400 = 230400,
CY_UART_BAUD_460800 = 460800,
CY_UART_BAUD_921600 = 921600,
CY_UART_BAUD_1000000 = 1000000,
CY_UART_BAUD_3000000 = 3000000,
}CY_UART_BAUD_RATE;
typedef enum _CY_UART_PARITY_MODE {
CY_DATA_PARITY_DISABLE = 0,
CY_DATA_PARITY_ODD,
CY_DATA_PARITY_EVEN,
CY_DATA_PARITY_MARK,
CY_DATA_PARITY_SPACE
} CY_UART_PARITY_MODE;
typedef enum _CY_UART_STOP_BIT {
CY_UART_ONE_STOP_BIT = 1,
CY_UART_TWO_STOP_BIT
} CY_UART_STOP_BIT;
typedef enum _CY_FLOW_CONTROL_MODES {
CY_UART_FLOW_CONTROL_DISABLE = 0,
CY_UART_FLOW_CONTROL_DSR,
CY_UART_FLOW_CONTROL_RTS_CTS,
CY_UART_FLOW_CONTROL_ALL
} CY_FLOW_CONTROL_MODES;
typedef struct _CY_UART_CONFIG {
CY_UART_BAUD_RATE baudRate;
UINT8 dataWidth;
CY_UART_STOP_BIT stopBits;
CY_UART_PARITY_MODE parityMode;
BOOL isDropOnRxErrors;
} CY_UART_CONFIG,*PCY_UART_CONFIG;
typedef enum _CY_CALLBACK_EVENTS {
CY_UART_CTS_BIT = 0x01,
CY_UART_DSR_BIT = 0x02,
CY_UART_BREAK_BIT = 0x04,
CY_UART_RING_SIGNAL_BIT = 0x08,
CY_UART_FRAME_ERROR_BIT = 0x10,
CY_UART_PARITY_ERROR_BIT = 0x20,
CY_UART_DATA_OVERRUN_BIT = 0x40,
CY_UART_DCD_BIT = 0x100,
CY_SPI_TX_UNDERFLOW_BIT = 0x200,
CY_SPI_BUS_ERROR_BIT = 0x400,
CY_ERROR_EVENT_FAILED_BIT = 0x800
} CY_CALLBACK_EVENTS;
CY_RETURN_STATUS CyLibraryInit ();
CY_RETURN_STATUS CyLibraryExit ();
CY_RETURN_STATUS CyGetListofDevices (
UINT8* numDevices
);
CY_RETURN_STATUS CyGetDeviceInfo(
UINT8 deviceNumber,
CY_DEVICE_INFO *deviceInfo
);
CY_RETURN_STATUS CyGetDeviceInfoVidPid (
CY_VID_PID vidPid,
UINT8 *deviceIdList,
CY_DEVICE_INFO *deviceInfoList,
UINT8 *deviceCount,
UINT8 infoListLength
);
CY_RETURN_STATUS CyOpen (
UINT8 deviceNumber,
UINT8 interfaceNum,
CY_HANDLE *handle
);
CY_RETURN_STATUS CyClose (
CY_HANDLE handle
);
CY_RETURN_STATUS CyCyclePort (
CY_HANDLE handle
);
CY_RETURN_STATUS CySetGpioValue (
CY_HANDLE handle,
UINT8 gpioNumber,
UINT8 value
);
CY_RETURN_STATUS CyGetGpioValue (
CY_HANDLE handle,
UINT8 gpioNumber,
UINT8 *value
);
CY_RETURN_STATUS CySetEventNotification(
CY_HANDLE handle,
CY_EVENT_NOTIFICATION_CB_FN notificationCbFn
);
CY_RETURN_STATUS CyAbortEventNotification(
CY_HANDLE handle
);
CY_RETURN_STATUS CyGetLibraryVersion (
CY_HANDLE handle,
PCY_LIBRARY_VERSION version
);
CY_RETURN_STATUS CyGetFirmwareVersion (
CY_HANDLE handle,
PCY_FIRMWARE_VERSION firmwareVersion
);
CY_RETURN_STATUS CyResetDevice (
CY_HANDLE handle
);
CY_RETURN_STATUS CyProgUserFlash (
CY_HANDLE handle,
CY_DATA_BUFFER *progBuffer,
UINT32 flashAddress,
UINT32 timeout
);
CY_RETURN_STATUS CyReadUserFlash (
CY_HANDLE handle,
CY_DATA_BUFFER *readBuffer,
UINT32 flashAddress,
UINT32 timeout
);
CY_RETURN_STATUS CyGetSignature (
CY_HANDLE handle,
UCHAR *pSignature
);
CY_RETURN_STATUS CyGetUartConfig (
CY_HANDLE handle,
CY_UART_CONFIG *uartConfig
);
CY_RETURN_STATUS CySetUartConfig (
CY_HANDLE handle,
CY_UART_CONFIG *uartConfig
);
CY_RETURN_STATUS CyUartRead (
CY_HANDLE handle,
CY_DATA_BUFFER* readBuffer,
UINT32 timeout
);
CY_RETURN_STATUS CyUartWrite (
CY_HANDLE handle,
CY_DATA_BUFFER* writeBuffer,
UINT32 timeout
);
CY_RETURN_STATUS CyUartSetHwFlowControl(
CY_HANDLE handle,
CY_FLOW_CONTROL_MODES mode
);
CY_RETURN_STATUS CyUartGetHwFlowControl(
CY_HANDLE handle,
CY_FLOW_CONTROL_MODES *mode
);
CY_RETURN_STATUS CyUartSetRts(
CY_HANDLE handle
);
CY_RETURN_STATUS CyUartClearRts(
CY_HANDLE handle
);
CY_RETURN_STATUS CyUartSetDtr(
CY_HANDLE handle
);
CY_RETURN_STATUS CyUartClearDtr(
CY_HANDLE handle
);
CY_RETURN_STATUS CyUartSetBreak(
CY_HANDLE handle,
UINT16 timeout
);
CY_RETURN_STATUS CyGetI2cConfig (
CY_HANDLE handle,
CY_I2C_CONFIG *i2cConfig
);
CY_RETURN_STATUS CySetI2cConfig (
CY_HANDLE handle,
CY_I2C_CONFIG *i2cConfig
);
CY_RETURN_STATUS CyI2cRead (
CY_HANDLE handle,
CY_I2C_DATA_CONFIG *dataConfig,
CY_DATA_BUFFER *readBuffer,
UINT32 timeout
);
CY_RETURN_STATUS CyI2cWrite (
CY_HANDLE handle,
CY_I2C_DATA_CONFIG *dataConfig,
CY_DATA_BUFFER *writeBuffer,
UINT32 timeout
);
CY_RETURN_STATUS CyI2cReset(
CY_HANDLE handle,
BOOL resetMode
);
CY_RETURN_STATUS CyGetSpiConfig (
CY_HANDLE handle,
CY_SPI_CONFIG *spiConfig
);
CY_RETURN_STATUS CySetSpiConfig (
CY_HANDLE handle,
CY_SPI_CONFIG *spiConfig
);
CY_RETURN_STATUS CySpiReadWrite (
CY_HANDLE handle,
CY_DATA_BUFFER* readBuffer,
CY_DATA_BUFFER* writeBuffer,
UINT32 timeout
);
CY_RETURN_STATUS CyJtagEnable (
CY_HANDLE handle
);
CY_RETURN_STATUS CyJtagDisable (
CY_HANDLE handle
);
CY_RETURN_STATUS CyJtagWrite (
CY_HANDLE handle,
CY_DATA_BUFFER *writeBuffer,
UINT32 timeout
);
CY_RETURN_STATUS CyJtagRead (
CY_HANDLE handle,
CY_DATA_BUFFER *readBuffer,
UINT32 timeout
);
CY_RETURN_STATUS CyPhdcClrFeature (
CY_HANDLE handle
);
CY_RETURN_STATUS CyPhdcSetFeature (
CY_HANDLE handle
);
CY_RETURN_STATUS CyPhdcGetStatus (
CY_HANDLE handle,
UINT16 *dataStatus
);
"""
| 25.882353
| 70
| 0.71095
| 1,210
| 9,680
| 5.194215
| 0.222314
| 0.058552
| 0.102466
| 0.078918
| 0.283055
| 0.258552
| 0.181543
| 0.16245
| 0.155768
| 0.135879
| 0
| 0.044482
| 0.208058
| 9,680
| 373
| 71
| 25.951743
| 0.775372
| 0.006921
| 0
| 0.33795
| 0
| 0
| 0.990889
| 0.152749
| 0
| 0
| 0.006432
| 0
| 0
| 1
| 0
| false
| 0
| 0.00277
| 0
| 0.00277
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
917e0cc4efaf369d4d17aeaeb0fc5c964a039793
| 760
|
py
|
Python
|
slender/tests/list/test_keep_if.py
|
torokmark/slender
|
3bf815e22f7802ba48706f31ba608cf609e23e68
|
[
"Apache-2.0"
] | 1
|
2020-01-10T21:51:46.000Z
|
2020-01-10T21:51:46.000Z
|
slender/tests/list/test_keep_if.py
|
torokmark/slender
|
3bf815e22f7802ba48706f31ba608cf609e23e68
|
[
"Apache-2.0"
] | null | null | null |
slender/tests/list/test_keep_if.py
|
torokmark/slender
|
3bf815e22f7802ba48706f31ba608cf609e23e68
|
[
"Apache-2.0"
] | null | null | null |
from unittest import TestCase
from expects import expect, equal, raise_error
from slender import List
class TestKeepIf(TestCase):
def test_keep_if_if_func_is_none(self):
e = List([1, 2, 3, 4, 5])
expect(e.keep_if(None).to_list()).to(equal([1, 2, 3, 4, 5]))
def test_keep_if_if_func_is_valid(self):
e = List([1, 2, 3, 4, 5])
expect(e.keep_if(lambda item: item > 3).to_list()).to(equal([4, 5]))
def test_keep_if_if_func_is_invalid_for_all_items(self):
e = List([1, 2, 3, 4, 5])
expect(e.keep_if(lambda item: item > 6).to_list()).to(equal([]))
def test_keep_if_if_func_is_different(self):
e = List([1, 2, 3, 4])
expect(lambda: e.keep_if('...')).to(raise_error(TypeError))
| 28.148148
| 76
| 0.628947
| 133
| 760
| 3.323308
| 0.285714
| 0.108597
| 0.033937
| 0.045249
| 0.4819
| 0.475113
| 0.475113
| 0.350679
| 0.350679
| 0.246606
| 0
| 0.046667
| 0.210526
| 760
| 26
| 77
| 29.230769
| 0.69
| 0
| 0
| 0.1875
| 0
| 0
| 0.003968
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.1875
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
9192d6d1ce77aea0159f3db895468368ec72c08a
| 592
|
py
|
Python
|
setup.py
|
avryhof/ambient_api
|
08194b5d8626801f2c2c7369adacb15eace54802
|
[
"MIT"
] | 20
|
2018-12-24T15:40:49.000Z
|
2022-01-10T18:58:41.000Z
|
setup.py
|
avryhof/ambient_api
|
08194b5d8626801f2c2c7369adacb15eace54802
|
[
"MIT"
] | 10
|
2018-08-17T02:01:45.000Z
|
2021-01-08T23:34:59.000Z
|
setup.py
|
avryhof/ambient_api
|
08194b5d8626801f2c2c7369adacb15eace54802
|
[
"MIT"
] | 14
|
2018-06-13T23:40:12.000Z
|
2022-01-05T06:34:13.000Z
|
from setuptools import setup
setup(
name="ambient_api",
version="1.5.6",
packages=["ambient_api"],
url="https://github.com/avryhof/ambient_api",
license="MIT",
author="Amos Vryhof",
author_email="[email protected]",
description="A Python class for accessing the Ambient Weather API.",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
],
install_requires=["requests", "urllib3"],
)
| 29.6
| 72
| 0.640203
| 64
| 592
| 5.84375
| 0.765625
| 0.080214
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012903
| 0.214527
| 592
| 19
| 73
| 31.157895
| 0.791398
| 0
| 0
| 0
| 0
| 0
| 0.535473
| 0.038851
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.055556
| 0
| 0.055556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
9192df0712738e90f6f197873c3a465c79101722
| 585
|
py
|
Python
|
tests/llvm/static/test_main_is_found/test_main_is_found.py
|
ganeshutah/FPChecker
|
53a471429762ace13f69733cb2f8b7227fc15b9f
|
[
"Apache-2.0"
] | 19
|
2019-09-28T16:15:45.000Z
|
2022-02-15T15:11:28.000Z
|
tests/llvm/static/test_main_is_found/test_main_is_found.py
|
tanmaytirpankar/FPChecker
|
d3fe4bd9489c5705df58a67dbbc388ac1ebf56bf
|
[
"Apache-2.0"
] | 16
|
2020-02-01T18:43:00.000Z
|
2021-12-22T14:47:39.000Z
|
tests/llvm/static/test_main_is_found/test_main_is_found.py
|
tanmaytirpankar/FPChecker
|
d3fe4bd9489c5705df58a67dbbc388ac1ebf56bf
|
[
"Apache-2.0"
] | 5
|
2020-07-27T18:15:36.000Z
|
2021-11-01T18:43:34.000Z
|
#!/usr/bin/env python
import subprocess
import os
def setup_module(module):
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
os.chdir(THIS_DIR)
def teardown_module(module):
cmd = ["make clean"]
cmdOutput = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
def test_1():
cmd = ["make"]
cmdOutput = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
passed = False
for l in cmdOutput.decode('utf-8').split("\n"):
if "#FPCHECKER: main() found" in l:
passed = True
assert passed == True
| 22.5
| 82
| 0.666667
| 79
| 585
| 4.797468
| 0.56962
| 0.063325
| 0.126649
| 0.158311
| 0.337731
| 0.337731
| 0.337731
| 0.337731
| 0.337731
| 0.337731
| 0
| 0.004274
| 0.2
| 585
| 25
| 83
| 23.4
| 0.805556
| 0.034188
| 0
| 0.125
| 0
| 0
| 0.079929
| 0
| 0
| 0
| 0
| 0
| 0.0625
| 1
| 0.1875
| false
| 0.1875
| 0.125
| 0
| 0.3125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
91936b7f0195e57ee35ddf84cdb73c2bef559977
| 745
|
py
|
Python
|
Dynamic_Programming/1259.Integer Replacement/Solution_BFS.py
|
Zhenye-Na/LxxxCode
|
afd79d790d0a7495d75e6650f80adaa99bd0ff07
|
[
"MIT"
] | 12
|
2019-05-04T04:21:27.000Z
|
2022-03-02T07:06:57.000Z
|
Dynamic_Programming/1259.Integer Replacement/Solution_BFS.py
|
Zhenye-Na/LxxxCode
|
afd79d790d0a7495d75e6650f80adaa99bd0ff07
|
[
"MIT"
] | 1
|
2019-07-24T18:43:53.000Z
|
2019-07-24T18:43:53.000Z
|
Dynamic_Programming/1259.Integer Replacement/Solution_BFS.py
|
Zhenye-Na/LxxxCode
|
afd79d790d0a7495d75e6650f80adaa99bd0ff07
|
[
"MIT"
] | 10
|
2019-07-01T04:03:04.000Z
|
2022-03-09T03:57:37.000Z
|
from collections import deque
class Solution:
"""
@param n: a positive integer
@return: the minimum number of replacements
"""
def integerReplacement(self, n):
# Write your code here
steps = 0
if n == 1:
return steps
queue = deque([n])
while queue:
size = len(queue)
print(queue, steps)
for _ in range(size):
num = queue.popleft()
if num == 1:
return steps
if num % 2 == 0:
queue.append(num // 2)
else:
queue.append(num + 1)
queue.append(num - 1)
steps += 1
return 0
| 23.28125
| 47
| 0.436242
| 76
| 745
| 4.263158
| 0.539474
| 0.064815
| 0.12963
| 0.092593
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02584
| 0.480537
| 745
| 31
| 48
| 24.032258
| 0.81137
| 0.127517
| 0
| 0.095238
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.032258
| 0
| 1
| 0.047619
| false
| 0
| 0.047619
| 0
| 0.285714
| 0.047619
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
9197f982af32fc988794515b093dd5bf984c98a5
| 4,132
|
py
|
Python
|
src/biota_models/vegetation/model/constants_json_create.py
|
Deltares/NBSDynamics
|
4710da529d85b588ea249f6e2b4f4cac132bb34f
|
[
"MIT"
] | 2
|
2022-01-14T05:02:04.000Z
|
2022-03-02T10:42:59.000Z
|
src/biota_models/vegetation/model/constants_json_create.py
|
Deltares/NBSDynamics
|
4710da529d85b588ea249f6e2b4f4cac132bb34f
|
[
"MIT"
] | 35
|
2021-11-01T08:59:02.000Z
|
2021-11-19T16:47:17.000Z
|
src/biota_models/vegetation/model/constants_json_create.py
|
Deltares/NBSDynamics
|
4710da529d85b588ea249f6e2b4f4cac132bb34f
|
[
"MIT"
] | 1
|
2022-03-16T07:11:00.000Z
|
2022-03-16T07:11:00.000Z
|
import json
schema = {
"Spartina": {
"ColStart": "2000-04-01",
"ColEnd": "2000-05-31",
"random": 7,
"mud_colonization": [0.0, 0.0],
"fl_dr": 0.005,
"Maximum age": 20,
"Number LifeStages": 2,
"initial root length": 0.05,
"initial shoot length": 0.015,
"initial diameter": 0.003,
"start growth period": "2000-04-01",
"end growth period": "2000-10-31",
"start winter period": "2000-11-30",
"maximum plant height": [0.8, 1.3],
"maximum diameter": [0.003, 0.005],
"maximum root length": [0.2, 1],
"maximum years in LifeStage": [1, 19],
"numStem": [700, 700], # 3.5. number of stems per m2
"iniCol_frac": 0.6, # 3.6. initial colonization fraction (0-1)
"Cd": [1.1, 1.15], # 3.7. drag coefficient
"desMort_thres": [400, 400], # 3.9. dessication mortality threshold
"desMort_slope": [0.75, 0.75], # 3.10. dessication mortality slope
"floMort_thres": [0.4, 0.4], # 3.11. flooding mortality threshold
"floMort_slope": [0.25, 0.25], # 3.12. flooding mortality slope
"vel_thres": [0.15, 0.25], # 3.13. flow velocity threshold
"vel_slope": [3, 3], # 3.14. flow velocity slope
"maxH_winter": [0.4, 0.4], # 3.15 max height during winter time
},
"Salicornia": {
"ColStart": "2000-02-15",
"ColEnd": "2000-04-30",
"random": 20,
"mud_colonization": [0.0, 0.0],
"fl_dr": 0.005,
"Maximum age": 1,
"Number LifeStages": 1,
"initial root length": 0.15,
"initial shoot length": 0.05,
"initial diameter": 0.01,
"start growth period": "2000-02-15",
"end growth period": "2000-10-15",
"start winter period": "2000-11-01",
"maximum plant height": [0.4, 0],
"maximum diameter": [0.015, 0],
"maximum root length": [0.05, 0],
"maximum years in LifeStage": [1, 0],
"numStem": [190, 0], # 3.5. number of stems per m2
"iniCol_frac": 0.2, # 3.6. initial colonization fraction (0-1)
"Cd": [0.7, 0], # 3.7. drag coefficient
"desMort_thres": [400, 1], # 3.9. dessication mortality threshold
"desMort_slope": [0.75, 1], # 3.10. dessication mortality slope
"floMort_thres": [0.5, 1], # 3.11. flooding mortality threshold
"floMort_slope": [0.12, 1], # 3.12. flooding mortality slope
"vel_thres": [0.15, 1], # 3.13. flow velocity threshold
"vel_slope": [3, 1], # 3.14. flow velocity slope
"maxH_winter": [0.0, 0.0], # 3.15 max height during winter time
},
"Puccinellia": {
"ColStart": "2000-03-01",
"ColEnd": "2000-04-30",
"random": 7,
"mud_colonization": [0.0, 0.0],
"fl_dr": 0.005,
"Maximum age": 20,
"Number LifeStages": 2,
"initial root length": 0.02,
"initial shoot length": 0.05,
"initial diameter": 0.004,
"start growth period": "2000-03-01",
"end growth period": "2000-11-15",
"start winter period": "2000-11-30",
"maximum plant height": [0.2, 0.35],
"maximum diameter": [0.004, 0.005],
"maximum root length": [0.15, 0.15],
"maximum years in LifeStage": [1, 19],
"numStem": [6500, 6500], # 3.5. number of stems per m2
"iniCol_frac": 0.3, # 3.6. initial colonization fraction (0-1)
"Cd": [0.7, 0.7], # 3.7. drag coefficient
"desMort_thres": [400, 400], # 3.9. dessication mortality threshold
"desMort_slope": [0.75, 0.75], # 3.10. dessication mortality slope
"floMort_thres": [0.35, 0.35], # 3.11. flooding mortality threshold
"floMort_slope": [0.4, 0.4], # 3.12. flooding mortality slope
"vel_thres": [0.25, 0.5], # 3.13. flow velocity threshold
"vel_slope": [3, 3], # 3.14. flow velocity slope
"maxH_winter": [0.2, 0.2], # 3.15 max height during winter time
},
}
with open("constants_veg.json", "w") as write_file:
json.dump(schema, write_file, indent=4)
| 43.494737
| 76
| 0.547193
| 580
| 4,132
| 3.841379
| 0.181034
| 0.010772
| 0.010772
| 0.007181
| 0.766158
| 0.706912
| 0.675045
| 0.593357
| 0.426391
| 0.373429
| 0
| 0.153587
| 0.281462
| 4,132
| 94
| 77
| 43.957447
| 0.596834
| 0.232091
| 0
| 0.282609
| 0
| 0
| 0.407702
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.01087
| 0
| 0.01087
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
91993f87e0ff04f74f7a6f31b278e5b76bf7a8ba
| 1,376
|
py
|
Python
|
Stream-3/Full-Stack-Development/10.Custom-User-And-Email-Authentication/2.Custom-User-Model/auth_demo/accounts/models.py
|
GunnerJnr/_CodeInstitute
|
efba0984a3dc71558eef97724c85e274a712798c
|
[
"MIT"
] | 4
|
2017-10-10T14:00:40.000Z
|
2021-01-27T14:08:26.000Z
|
Stream-3/Full-Stack-Development/10.Custom-User-And-Email-Authentication/2.Custom-User-Model/auth_demo/accounts/models.py
|
GunnerJnr/_CodeInstitute
|
efba0984a3dc71558eef97724c85e274a712798c
|
[
"MIT"
] | 115
|
2019-10-24T11:18:33.000Z
|
2022-03-11T23:15:42.000Z
|
Stream-3/Full-Stack-Development/10.Custom-User-And-Email-Authentication/2.Custom-User-Model/auth_demo/accounts/models.py
|
GunnerJnr/_CodeInstitute
|
efba0984a3dc71558eef97724c85e274a712798c
|
[
"MIT"
] | 5
|
2017-09-22T21:42:39.000Z
|
2020-02-07T02:18:11.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.auth.models import AbstractUser, UserManager
from django.db import models
from django.utils import timezone
# Create your models here.
# Create our new user class
class AccountUserManager(UserManager):
def _create_user(self, username, email, password, is_staff, is_supervisor, **extra_fields):
"""
Creates and saves a User with the given username, email and password.
:param username:
:param email:
:param password:
:param is_staff:
:param is_supervisor:
:param extra_fields:
:return:
"""
now = timezone.now()
if not email:
raise ValueError('The given username must be set')
email = self.normalize_email(email)
user = self.model(username=email, email=email,
is_staff=is_staff, is_active=True,
is_supervisor=is_supervisor,
date_joined=now, **extra_fields)
user.set_password(password)
user.save(using=self.db)
return user
class User(AbstractUser):
# now that we've abstracted this class we can add any
# number of custom attribute to our user class
# in later units we'll be adding things like payment details!
object = AccountUserManager()
| 32
| 95
| 0.641715
| 167
| 1,376
| 5.155689
| 0.497006
| 0.03252
| 0.031359
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001012
| 0.281977
| 1,376
| 42
| 96
| 32.761905
| 0.870445
| 0.303052
| 0
| 0
| 0
| 0
| 0.034325
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0.105263
| 0.210526
| 0
| 0.473684
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
919c72f34a550015e3cadb40b602759ce1ee194d
| 14,482
|
py
|
Python
|
benchmark/python/ffi/benchmark_ffi.py
|
grygielski/incubator-mxnet
|
45952e21a35e32a04b7607b121085973369a42db
|
[
"BSL-1.0",
"Apache-2.0"
] | 211
|
2016-06-06T08:32:36.000Z
|
2021-07-03T16:50:16.000Z
|
benchmark/python/ffi/benchmark_ffi.py
|
grygielski/incubator-mxnet
|
45952e21a35e32a04b7607b121085973369a42db
|
[
"BSL-1.0",
"Apache-2.0"
] | 42
|
2017-01-05T02:45:13.000Z
|
2020-08-11T23:45:27.000Z
|
benchmark/python/ffi/benchmark_ffi.py
|
grygielski/incubator-mxnet
|
45952e21a35e32a04b7607b121085973369a42db
|
[
"BSL-1.0",
"Apache-2.0"
] | 58
|
2016-10-27T07:37:08.000Z
|
2021-07-03T16:50:17.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import timeit
import itertools
import argparse
import os
class OpArgMngr(object):
"""Operator argument manager for storing operator workloads."""
args = {}
@staticmethod
def add_workload(funcname, *args, **kwargs):
if "_specifier" not in kwargs:
_specifier = funcname
else:
_specifier = kwargs["_specififer"]
del kwargs["_specififer"]
if _specifier in OpArgMngr.args:
raise ValueError("duplicate {}".format(_specifier))
OpArgMngr.args[_specifier] = {'args': args, 'kwargs': kwargs, 'funcname': funcname}
def generate_workloads():
array_pool = {}
shapes = []
for ndim in range(4):
shapes.extend(list(itertools.product(range(4), repeat=ndim)))
for shape in shapes:
name = 'x'.join(str(i) for i in shape)
if name in array_pool:
raise ValueError("duplicate array {}".format(name))
array_pool[name] = dnp.ones(shape)
return array_pool
def prepare_workloads():
pool = generate_workloads()
OpArgMngr.add_workload("zeros", (2, 2))
OpArgMngr.add_workload("full", (2, 2), 10)
OpArgMngr.add_workload("identity", 3)
OpArgMngr.add_workload("ones", (2, 2))
OpArgMngr.add_workload("einsum", "ii", pool['2x2'], optimize=False)
OpArgMngr.add_workload("unique", pool['1'], return_index=True, return_inverse=True, return_counts=True, axis=-1)
OpArgMngr.add_workload("dstack", (pool['2x1'], pool['2x1'], pool['2x1'], pool['2x1']))
OpArgMngr.add_workload("polyval", dnp.arange(10), pool['2x2'])
OpArgMngr.add_workload("ediff1d", pool['2x2'], pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("nan_to_num", pool['2x2'])
OpArgMngr.add_workload("tri", 2, 3, 4)
OpArgMngr.add_workload("tensordot", pool['2x2'], pool['2x2'], ((1, 0), (0, 1)))
OpArgMngr.add_workload("cumsum", pool['3x2'], axis=0, out=pool['3x2'])
OpArgMngr.add_workload("random.shuffle", pool['3'])
OpArgMngr.add_workload("equal", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("not_equal", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("less", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("greater_equal", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("less_equal", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("maximum", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("minimum", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("sum", pool['2x2'], axis=0, keepdims=True, out=pool['1x2'])
OpArgMngr.add_workload("std", pool['2x2'], axis=0, ddof=0, keepdims=True, out=pool['1x2'])
OpArgMngr.add_workload("var", pool['2x2'], axis=0, ddof=1, keepdims=True, out=pool['1x2'])
OpArgMngr.add_workload("average", pool['2x2'], weights=pool['2'], axis=1, returned=True)
OpArgMngr.add_workload("histogram", pool['2x2'], bins=10, range=(0.0, 10.0))
OpArgMngr.add_workload("add", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("cross", pool['2'], pool['2'])
OpArgMngr.add_workload("linalg.eig", pool['3x3'])
OpArgMngr.add_workload("linalg.eigh", pool['3x3'])
OpArgMngr.add_workload("linalg.det", pool['3x3'])
OpArgMngr.add_workload("linalg.slogdet", pool['3x3'])
OpArgMngr.add_workload("linalg.matrix_rank", pool['3x3'], pool['1'], hermitian=False)
OpArgMngr.add_workload("linalg.svd", pool['3x3'])
OpArgMngr.add_workload("linalg.cholesky", pool['1x1'])
OpArgMngr.add_workload("linalg.qr", pool['3x3'])
OpArgMngr.add_workload("linalg.lstsq", pool['2x1'], pool['2'], rcond=None)
OpArgMngr.add_workload("linalg.eigvals", pool['1x1'])
OpArgMngr.add_workload("linalg.eigvalsh", pool['1x1'], UPLO='L')
OpArgMngr.add_workload("linalg.inv", pool['1x1'])
OpArgMngr.add_workload("linalg.pinv", pool['2x3x3'], pool['1'], hermitian=False)
OpArgMngr.add_workload("linalg.solve", pool['1x1'], pool['1'])
OpArgMngr.add_workload("linalg.tensorinv", pool['1x1'], ind=2)
OpArgMngr.add_workload("linalg.norm", pool['3x3'])
OpArgMngr.add_workload("linalg.tensorsolve", pool['1x1x1'], pool['1x1x1'], (2, 0, 1))
OpArgMngr.add_workload("tile", pool['2x2'], 1)
OpArgMngr.add_workload("trace", pool['2x2'])
OpArgMngr.add_workload("transpose", pool['2x2'])
OpArgMngr.add_workload("split", pool['3x3'], (0, 1, 2), axis=1)
OpArgMngr.add_workload("vstack", (pool['3x3'], pool['3x3'], pool['3x3']))
OpArgMngr.add_workload("argmax", pool['3x2'], axis=-1)
OpArgMngr.add_workload("argmin", pool['3x2'], axis=-1)
OpArgMngr.add_workload("atleast_1d", pool['2'], pool['2x2'])
OpArgMngr.add_workload("atleast_2d", pool['2'], pool['2x2'])
OpArgMngr.add_workload("atleast_3d", pool['2'], pool['2x2'])
OpArgMngr.add_workload("argsort", pool['3x2'], axis=-1)
OpArgMngr.add_workload("sort", pool['3x2'], axis=-1)
OpArgMngr.add_workload("indices", dimensions=(1, 2, 3))
OpArgMngr.add_workload("subtract", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("multiply", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("mod", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("remainder", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("divide", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("true_divide", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("power", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("lcm", pool['2x2'].astype('int32'), pool['2x2'].astype('int32'))
OpArgMngr.add_workload("diff", pool['2x2'], n=1, axis=-1)
OpArgMngr.add_workload("inner", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("random.multinomial", n=2, pvals=[1/6.]*6, size=(2,2))
OpArgMngr.add_workload("random.rand", 3, 2)
OpArgMngr.add_workload("random.randn", 2, 2)
OpArgMngr.add_workload("nonzero", pool['2x2'])
OpArgMngr.add_workload("tril", pool['2x2'], k=0)
OpArgMngr.add_workload("random.choice", pool['2'], size=(2, 2))
OpArgMngr.add_workload("take", pool['2'], dnp.array([1,0], dtype='int64'))
OpArgMngr.add_workload("clip", pool['2x2'], 0, 1)
OpArgMngr.add_workload("expand_dims", pool['2x2'], axis=0)
OpArgMngr.add_workload("broadcast_to", pool['2x2'], (2, 2, 2))
OpArgMngr.add_workload("full_like", pool['2x2'], 2)
OpArgMngr.add_workload("zeros_like", pool['2x2'])
OpArgMngr.add_workload("ones_like", pool['2x2'])
OpArgMngr.add_workload("bitwise_and", pool['2x2'].astype(int), pool['2x2'].astype(int))
OpArgMngr.add_workload("bitwise_xor", pool['2x2'].astype(int), pool['2x2'].astype(int))
OpArgMngr.add_workload("bitwise_or", pool['2x2'].astype(int), pool['2x2'].astype(int))
OpArgMngr.add_workload("copysign", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("arctan2", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("hypot", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("ldexp", pool['2x2'].astype(int), pool['2x2'].astype(int))
OpArgMngr.add_workload("logical_and", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("logical_or", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("logical_xor", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("random.uniform", low=0, high=1, size=1)
OpArgMngr.add_workload("random.exponential", scale=2, size=(2,2))
OpArgMngr.add_workload("random.rayleigh", scale=2, size=(2,2))
OpArgMngr.add_workload("random.weibull", a=2, size=(2,2))
OpArgMngr.add_workload("random.pareto", a=2, size=(2,2))
OpArgMngr.add_workload("random.power", a=2, size=(2,2))
OpArgMngr.add_workload("random.logistic", loc=2, scale=2, size=(2,2))
OpArgMngr.add_workload("random.gumbel", loc=2, scale=2, size=(2,2))
OpArgMngr.add_workload("where", pool['2x3'], pool['2x3'], pool['2x1'])
OpArgMngr.add_workload("may_share_memory", pool['2x3'][:0], pool['2x3'][:1])
OpArgMngr.add_workload('squeeze', pool['2x2'], axis=None)
OpArgMngr.add_workload("pad", pool['2x2'], pad_width=((1,2),(1,2)), mode="constant")
OpArgMngr.add_workload("prod", pool['2x2'], axis=1, dtype="float64", keepdims=False)
OpArgMngr.add_workload("around", pool['2x2'], decimals=0)
OpArgMngr.add_workload("round", pool['2x2'], decimals=1)
OpArgMngr.add_workload("repeat", pool['2x2'], repeats=1, axis=None)
OpArgMngr.add_workload("diagflat", pool['2x2'], k=1)
OpArgMngr.add_workload("diag", pool['2x2'], k=1)
OpArgMngr.add_workload("diagonal", pool['2x2x2'], offset=-1, axis1=0, axis2=1)
OpArgMngr.add_workload("diag_indices_from", pool['2x2'])
OpArgMngr.add_workload("bincount", dnp.arange(3, dtype=int), pool['3'], minlength=4)
OpArgMngr.add_workload("percentile", pool['2x2x2'], 80, axis=0, out=pool['2x2'],\
interpolation='midpoint')
OpArgMngr.add_workload("quantile", pool['2x2x2'], 0.8, axis=0, out=pool['2x2'],\
interpolation='midpoint')
OpArgMngr.add_workload("all", pool['2x2x2'], axis=(0, 1),\
out=dnp.array([False, False], dtype=bool), keepdims=False)
OpArgMngr.add_workload("any", pool['2x2x2'], axis=(0, 1),\
out=dnp.array([False, False], dtype=bool), keepdims=False)
OpArgMngr.add_workload("roll", pool["2x2"], 1, axis=0)
OpArgMngr.add_workload("rot90", pool["2x2"], 2)
OpArgMngr.add_workload("column_stack", (pool['3x3'], pool['3x3'], pool['3x3']))
OpArgMngr.add_workload("hstack", (pool['3x3'], pool['3x3'], pool['3x3']))
OpArgMngr.add_workload("triu", pool['3x3'])
OpArgMngr.add_workload("array_split", pool['2x2'], 2, axis=1)
OpArgMngr.add_workload("vsplit", pool['2x2'], 2)
OpArgMngr.add_workload("hsplit", pool['2x2'], 2)
OpArgMngr.add_workload("dsplit", pool['2x2x2'], 2)
OpArgMngr.add_workload("arange", 10)
OpArgMngr.add_workload("concatenate", (pool['1x2'], pool['1x2'], pool['1x2']), axis=0)
OpArgMngr.add_workload("append", pool['2x2'], pool['1x2'], axis=0)
OpArgMngr.add_workload("insert", pool['3x2'], 1, pool['1x1'], axis=0)
OpArgMngr.add_workload("delete", pool['3x2'], 1, axis=0)
OpArgMngr.add_workload("blackman", 12)
OpArgMngr.add_workload("eye", 5)
OpArgMngr.add_workload("hamming", 12)
OpArgMngr.add_workload("hanning", 12)
OpArgMngr.add_workload("linspace", 0, 10, 8, endpoint=False)
OpArgMngr.add_workload("logspace", 2.0, 3.0, num=4, base=2.0, dtype=onp.float32)
OpArgMngr.add_workload("matmul", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("mean", pool['2x2'], axis=0, keepdims=True)
OpArgMngr.add_workload("random.gamma", 1, size=(2, 3))
OpArgMngr.add_workload("random.normal", 1, size=(2, 3))
OpArgMngr.add_workload("max", pool["2x2"], axis=0, out=pool['2'], keepdims=False)
OpArgMngr.add_workload("min", pool["2x2"], axis=0, out=pool['2'], keepdims=False)
OpArgMngr.add_workload("amax", pool["2x2"], axis=1, out=pool['2'], keepdims=False)
OpArgMngr.add_workload("amin", pool["2x2"], axis=1, out=pool['2'], keepdims=False)
unary_ops = ['negative', 'reciprocal', 'abs', 'sign', 'rint', 'ceil', 'floor',
'bitwise_not', 'trunc', 'fix', 'square', 'sqrt', 'cbrt', 'exp',
'log', 'log10', 'log2', 'log1p', 'expm1', 'logical_not', 'isnan',
'isinf', 'isposinf', 'isneginf', 'isfinite', 'sin', 'cos', 'tan',
'arcsin', 'arccos', 'arctan', 'degrees', 'radians', 'sinh', 'cosh',
'tanh', 'arcsinh', 'arccosh', 'arctanh'] # 'rad2deg', 'deg2rad' cannot run without tvm
for unary_op in unary_ops:
if unary_op == "bitwise_not":
OpArgMngr.add_workload(unary_op, dnp.ones((2, 2), dtype=int))
else:
OpArgMngr.add_workload(unary_op, pool['2x2'])
def benchmark_helper(f, *args, **kwargs):
number = 10000
return timeit.timeit(lambda: f(*args, **kwargs), number=number) / number
def get_op(module, funcname):
funcname = funcname.split(".")
for fname in funcname:
module = getattr(module, fname)
return module
def run_benchmark(packages):
results = {}
for (k, v) in OpArgMngr.args.items():
result = {}
for (name, package) in packages.items():
print('{}.{} running...'.format(name, k))
op = get_op(package["module"], v["funcname"])
args = [package["data"](arg) for arg in v["args"]]
kwargs = {k: package["data"](v) for (k, v) in v["kwargs"].items()}
benchmark = benchmark_helper(op, *args, **kwargs)
result[name] = benchmark
results[k] = result
return results
def show_results(results):
print("{:>24}{:>24}{:>24}".format("name", "package", "time(us)"))
for (specifier, d) in results.items():
for (k, v) in d.items():
print("{:>24}{:>24}{:>24}".format(specifier, k, v * 10 ** 6))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('ffi_type')
parsed = parser.parse_args()
if parsed.ffi_type == "cython":
os.environ['MXNET_ENABLE_CYTHON'] = '1'
os.environ['MXNET_ENFORCE_CYTHON'] = '1'
elif parsed.ffi_type == "ctypes":
os.environ['MXNET_ENABLE_CYTHON'] = '0'
else:
raise ValueError("unknown ffi_type {}",format(parsed.ffi_type))
os.environ["MXNET_ENGINE_TYPE"] = "NaiveEngine"
import mxnet as mx
import numpy as onp
from mxnet import np as dnp
mx.npx.set_np(dtype=False)
packages = {
"onp": {
"module": onp,
"data": lambda arr: arr.asnumpy() if isinstance(arr, dnp.ndarray) else arr
},
"dnp": {
"module": dnp,
"data": lambda arr: arr
}
}
prepare_workloads()
results = run_benchmark(packages)
show_results(results)
| 51.90681
| 116
| 0.646596
| 1,922
| 14,482
| 4.74974
| 0.221124
| 0.177128
| 0.31986
| 0.072845
| 0.459634
| 0.367729
| 0.288531
| 0.193121
| 0.132873
| 0.073173
| 0
| 0.048544
| 0.160751
| 14,482
| 278
| 117
| 52.093525
| 0.702567
| 0.059039
| 0
| 0.028807
| 0
| 0
| 0.172216
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028807
| false
| 0
| 0.028807
| 0
| 0.082305
| 0.012346
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
91a824d6a95f0e9a4a572ff289971a58109b3c3c
| 3,887
|
py
|
Python
|
test/present.py
|
jchampio/apache-websocket
|
18ad4ae2fc99381b8d75785f492a479f789b322b
|
[
"Apache-2.0"
] | 8
|
2015-09-10T21:49:25.000Z
|
2022-02-02T04:39:00.000Z
|
test/present.py
|
jchampio/apache-websocket
|
18ad4ae2fc99381b8d75785f492a479f789b322b
|
[
"Apache-2.0"
] | 34
|
2015-09-10T21:40:09.000Z
|
2020-09-04T22:16:08.000Z
|
test/present.py
|
jchampio/apache-websocket
|
18ad4ae2fc99381b8d75785f492a479f789b322b
|
[
"Apache-2.0"
] | 5
|
2016-01-22T05:16:54.000Z
|
2017-10-18T12:28:02.000Z
|
#! /usr/bin/env python
#
# Presents the results of an Autobahn TestSuite run in TAP format.
#
# Copyright 2015 Jacob Champion
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import StrictVersion
import json
import os.path
import sys
import textwrap
import yamlish
def filter_report(report):
"""Filters a test report dict down to only the interesting keys."""
INTERESTING_KEYS = [
'behavior',
'behaviorClose',
'expected',
'received',
'expectedClose',
'remoteCloseCode'
]
return { key: report[key] for key in INTERESTING_KEYS }
def prepare_description(report):
"""Constructs a description from a test report."""
raw = report['description']
# Wrap to at most 80 characters.
wrapped = textwrap.wrap(raw, 80)
description = wrapped[0]
if len(wrapped) > 1:
# If the text is longer than one line, add an ellipsis.
description += '...'
return description
#
# MAIN
#
# Read the index.
results_dir = 'test-results'
with open(os.path.join(results_dir, 'index.json'), 'r') as index_file:
index = json.load(index_file)['AutobahnPython']
# Sort the tests by numeric ID so we print them in a sane order.
test_ids = list(index.keys())
test_ids.sort(key=StrictVersion)
# Print the TAP header.
print('TAP version 13')
print('1..{0!s}'.format(len(test_ids)))
count = 0
skipped_count = 0
failed_count = 0
for test_id in test_ids:
count += 1
passed = True
skipped = False
report = None
result = index[test_id]
# Try to get additional information from this test's report file.
try:
path = os.path.join(results_dir, result['reportfile'])
with open(path, 'r') as f:
report = json.load(f)
description = prepare_description(report)
except Exception as e:
description = '[could not load report file: {0!s}]'.format(e)
test_result = result['behavior']
close_result = result['behaviorClose']
# Interpret the result for this test.
if test_result != 'OK' and test_result != 'INFORMATIONAL':
if test_result == 'UNIMPLEMENTED':
skipped = True
else:
passed = False
elif close_result != 'OK' and close_result != 'INFORMATIONAL':
passed = False
# Print the TAP result.
print(u'{0} {1} - [{2}] {3}{4}'.format('ok' if passed else 'not ok',
count,
test_id,
description,
' # SKIP unimplemented' if skipped
else ''))
# Print a YAMLish diagnostic for failed tests.
if report and not passed:
output = filter_report(report)
diagnostic = yamlish.dumps(output)
for line in diagnostic.splitlines():
print(' ' + line)
if not passed:
failed_count += 1
if skipped:
skipped_count += 1
# Print a final result.
print('# Autobahn|TestSuite {0}'.format('PASSED' if not failed_count else 'FAILED'))
print('# total {0}'.format(count))
print('# passed {0}'.format(count - failed_count - skipped_count))
print('# skipped {0}'.format(skipped_count))
print('# failed {0}'.format(failed_count))
exit(0 if not failed_count else 1)
| 28.792593
| 84
| 0.623874
| 496
| 3,887
| 4.818548
| 0.362903
| 0.025105
| 0.010879
| 0.013389
| 0.033473
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013107
| 0.273733
| 3,887
| 134
| 85
| 29.007463
| 0.83351
| 0.289426
| 0
| 0.026316
| 0
| 0
| 0.141492
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026316
| false
| 0.105263
| 0.078947
| 0
| 0.131579
| 0.118421
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
91a977f44ca6b26789c3c66246a46fa0280ee2a7
| 1,143
|
py
|
Python
|
softwarecollections/scls/migrations/0004_other_repos_default_values.py
|
WEBZCC/softwarecollections
|
efee5c3c276033d526a0cdba504d43deff71581e
|
[
"BSD-3-Clause"
] | 39
|
2016-12-24T02:57:55.000Z
|
2022-02-15T09:29:43.000Z
|
softwarecollections/scls/migrations/0004_other_repos_default_values.py
|
WEBZCC/softwarecollections
|
efee5c3c276033d526a0cdba504d43deff71581e
|
[
"BSD-3-Clause"
] | 32
|
2016-11-21T15:05:07.000Z
|
2021-12-06T11:52:32.000Z
|
softwarecollections/scls/migrations/0004_other_repos_default_values.py
|
WEBZCC/softwarecollections
|
efee5c3c276033d526a0cdba504d43deff71581e
|
[
"BSD-3-Clause"
] | 13
|
2016-12-14T10:42:22.000Z
|
2022-01-01T20:35:15.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('scls', '0003_other_repos'),
]
operations = [
migrations.AlterField(
model_name='otherrepo',
name='arch',
field=models.CharField(default='', blank=True, verbose_name='Architecture', max_length=20),
),
migrations.AlterField(
model_name='otherrepo',
name='command',
field=models.TextField(default='', blank=True, verbose_name='Command'),
),
migrations.AlterField(
model_name='otherrepo',
name='icon',
field=models.CharField(default='', blank=True, verbose_name='Icon', choices=[('centos', 'centos'), ('epel', 'epel'), ('fedora', 'fedora'), ('rhel', 'rhel')], max_length=20),
),
migrations.AlterField(
model_name='otherrepo',
name='version',
field=models.CharField(default='', blank=True, verbose_name='Distribution version', max_length=20),
),
]
| 32.657143
| 185
| 0.582677
| 107
| 1,143
| 6.056075
| 0.420561
| 0.123457
| 0.154321
| 0.179012
| 0.552469
| 0.510802
| 0.381173
| 0.381173
| 0.16358
| 0
| 0
| 0.013095
| 0.265092
| 1,143
| 34
| 186
| 33.617647
| 0.758333
| 0.018373
| 0
| 0.428571
| 0
| 0
| 0.14375
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.071429
| 0
| 0.178571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
91ad7c273462430b62373174e1161a8ff1416f63
| 715
|
py
|
Python
|
atcoder/corp/codethxfes2014a_e.py
|
knuu/competitive-programming
|
16bc68fdaedd6f96ae24310d697585ca8836ab6e
|
[
"MIT"
] | 1
|
2018-11-12T15:18:55.000Z
|
2018-11-12T15:18:55.000Z
|
atcoder/corp/codethxfes2014a_e.py
|
knuu/competitive-programming
|
16bc68fdaedd6f96ae24310d697585ca8836ab6e
|
[
"MIT"
] | null | null | null |
atcoder/corp/codethxfes2014a_e.py
|
knuu/competitive-programming
|
16bc68fdaedd6f96ae24310d697585ca8836ab6e
|
[
"MIT"
] | null | null | null |
r, c, m = map(int, input().split())
n = int(input())
op = [list(map(lambda x: int(x) - 1, input().split())) for _ in range(n)]
board = [[0 for _ in range(c)] for _ in range(r)]
for ra, rb, ca, cb in op:
for j in range(ra, rb + 1):
for k in range(ca, cb + 1):
board[j][k] += 1
cnt = 0
for i in range(r):
for j in range(c):
board[i][j] %= 4
if board[i][j] == 0:
cnt += 1
for i in range(n):
ra, rb, ca, cb = op[i]
cnti = cnt
for j in range(ra, rb + 1):
for k in range(ca, cb + 1):
if board[j][k] == 0:
cnti -= 1
elif board[j][k] == 1:
cnti += 1
if cnti == m:
print(i + 1)
| 25.535714
| 73
| 0.439161
| 129
| 715
| 2.410853
| 0.24031
| 0.22508
| 0.096463
| 0.106109
| 0.205788
| 0.205788
| 0.205788
| 0.205788
| 0.205788
| 0.205788
| 0
| 0.036036
| 0.379021
| 715
| 27
| 74
| 26.481481
| 0.664414
| 0
| 0
| 0.16
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.04
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
91b495763107bc2ceb225b3984a8b4ffae309299
| 2,914
|
py
|
Python
|
data_converter/data_converter.py
|
jkchen2/JshBot-plugins
|
b5999fecf0df067e34673ff193dcfbf8c7e2fde2
|
[
"MIT"
] | 1
|
2021-08-09T19:28:49.000Z
|
2021-08-09T19:28:49.000Z
|
data_converter/data_converter.py
|
jkchen2/JshBot-plugins
|
b5999fecf0df067e34673ff193dcfbf8c7e2fde2
|
[
"MIT"
] | null | null | null |
data_converter/data_converter.py
|
jkchen2/JshBot-plugins
|
b5999fecf0df067e34673ff193dcfbf8c7e2fde2
|
[
"MIT"
] | 2
|
2017-07-14T00:15:54.000Z
|
2019-03-02T09:46:21.000Z
|
import discord
from jshbot import utilities, data, configurations, plugins, logger
from jshbot.exceptions import BotException, ConfiguredBotException
from jshbot.commands import (
Command, SubCommand, Shortcut, ArgTypes, Attachment, Arg, Opt, MessageTypes, Response)
__version__ = '0.1.0'
CBException = ConfiguredBotException('0.3 to 0.4 plugin')
@plugins.command_spawner
def get_commands(bot):
return [Command('convertdata', hidden=True, elevated_level=3)]
async def get_response(bot, context):
for guild in bot.guilds:
convert_core(bot, guild)
if 'tags.py' in bot.plugins:
convert_tags(bot, guild)
return Response("Converted.")
def convert_core(bot, guild):
if data.get(bot, 'core', None, guild_id=guild.id):
logger.warn("Guild %s (%s) already had core converted", guild.name, guild.id)
return
base_data = data.get(bot, 'base', None, guild_id=guild.id, default={})
if 'disabled' in base_data:
# TODO: Iterate through toggled commands
pass
if 'blocked' in base_data:
replacement = []
for entry in base_data['blocked']:
replacement.append(int(entry))
base_data['blocked'] = replacement
if 'muted_channels' in base_data:
replacement = []
for entry in base_data['muted_channels']:
replacement.append(int(entry))
base_data['muted_channels'] = replacement
if 'moderators' in base_data:
del base_data['moderators']
if base_data:
for key, value in base_data.items():
data.add(bot, 'core', key, value, guild_id=guild.id)
data.remove(bot, 'base', None, guild_id=guild.id)
def convert_tags(bot, guild):
if not data.get(bot, 'tags.py', 'tags', guild_id=guild.id):
logger.warn("Guild %s (%s) already had tags converted", guild.name, guild.id)
return
tags = data.get(bot, 'tags.py', 'tags', guild_id=guild.id, default={})
add_tag = bot.plugins['tags.py']._add_tag
#key,value,length,volume,name,flags,author,hits,created,last_used,last_used_by,complex,extra
for key, tag in tags.items():
to_insert = [
key, # key
tag['value'], # value
tag['length'], # length
tag['volume'], # volume
tag['name'], # name
tag['flags'], # flags
int(tag['author']), # author
tag['hits'], # hits
int(tag['created']), # created
int(tag['last_used']), # last_used
None, # last_used_by
{}, # complex
{} # extra
]
add_tag(bot, to_insert, guild.id)
data.remove(bot, 'tags.py', 'tags', guild_id=guild.id, safe=True)
| 38.853333
| 96
| 0.576527
| 351
| 2,914
| 4.652422
| 0.273504
| 0.072872
| 0.051439
| 0.060012
| 0.363748
| 0.256583
| 0.1782
| 0.147581
| 0.131047
| 0.083282
| 0
| 0.003923
| 0.300275
| 2,914
| 74
| 97
| 39.378378
| 0.796959
| 0.075841
| 0
| 0.095238
| 0
| 0
| 0.122899
| 0
| 0
| 0
| 0
| 0.013514
| 0
| 1
| 0.047619
| false
| 0.015873
| 0.063492
| 0.015873
| 0.174603
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
91c2124933101c4997c3e85497e979cf423b2846
| 10,418
|
py
|
Python
|
Tests/test_ironmath.py
|
btddg28/ironpython
|
8006238c19d08db5db9bada39d765143e631059e
|
[
"Apache-2.0"
] | null | null | null |
Tests/test_ironmath.py
|
btddg28/ironpython
|
8006238c19d08db5db9bada39d765143e631059e
|
[
"Apache-2.0"
] | null | null | null |
Tests/test_ironmath.py
|
btddg28/ironpython
|
8006238c19d08db5db9bada39d765143e631059e
|
[
"Apache-2.0"
] | null | null | null |
#####################################################################################
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# [email protected]. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
#
#####################################################################################
#
# test Microsoft.Scripting.Math
#
from iptest.assert_util import *
skiptest("win32")
from System import *
import clr
#silverlight already has this
if is_cli:
math_assembly = (1).GetType().Assembly
clr.AddReference(math_assembly)
load_iron_python_test()
import IronPythonTest
if is_net40:
from System.Numerics import BigInteger, Complex
else:
from Microsoft.Scripting.Math import BigInteger
from Microsoft.Scripting.Math import Complex64 as Complex
class myFormatProvider(IFormatProvider):
def ToString():pass
p = myFormatProvider()
def test_bigint():
AreEqual(BigInteger.Add(1,99999999999999999999999999999999999999999999999999999999999) ,BigInteger.Subtract(100000000000000000000000000000000000000000000000000000000001,1))
AreEqual(BigInteger.Multiply(400,500) , BigInteger.Divide(1000000,5))
AreEqual(BigInteger.Multiply(400,8) , BigInteger.LeftShift(400,3))
AreEqual(BigInteger.Divide(400,8) , BigInteger.RightShift(400,3))
AreEqual(BigInteger.RightShift(BigInteger.LeftShift(400,100),100) , 400)
AreEqual(BigInteger.RightShift(BigInteger.LeftShift(-12345678987654321,100),100) , -12345678987654321)
if is_net40:
AssertError(ValueError, BigInteger.RightShift, 400, -100)
AssertError(ValueError, BigInteger.LeftShift, 400, -100)
AssertError(ValueError, BigInteger.RightShift, -12345678987654321, -100)
AssertError(ValueError, BigInteger.LeftShift, -12345678987654321, -100)
else:
AreEqual(BigInteger.LeftShift(BigInteger.RightShift(400,-100),-100) , 400)
AreEqual(BigInteger.LeftShift(BigInteger.RightShift(-12345678987654321,-100),-100) , -12345678987654321)
AreEqual(BigInteger(-123456781234567812345678123456781234567812345678123456781234567812345678).OnesComplement().OnesComplement() , -123456781234567812345678123456781234567812345678123456781234567812345678)
AreEqual(BigInteger(-1234567812345678123456781234567812345678123456781234567812345678123456781234567812345678).OnesComplement() , -(-1234567812345678123456781234567812345678123456781234567812345678123456781234567812345678 + 1 ))
Assert(BigInteger.Xor(-1234567812345678123456781234567812345678123456781234567812345678123456781234567812345678,BigInteger(-1234567812345678123456781234567812345678123456781234567812345678123456781234567812345678).OnesComplement()) , -1)
AreEqual(BigInteger.BitwiseAnd(0xff00ff00,BigInteger.BitwiseOr(0x00ff00ff,0xaabbaabb)) , BigInteger(0xaa00aa00))
AreEqual(BigInteger.Mod(BigInteger(-9999999999999999999999999999999999999999),1000000000000000000) , -BigInteger.Mod(9999999999999999999999999999999999999999,BigInteger(-1000000000000000000)))
AreEqual(BigInteger.ToInt64(0x7fffffffffffffff) , 9223372036854775807)
AssertError(OverflowError, BigInteger.ToInt64, 0x8000000000000000)
AreEqual(BigInteger(-0).ToBoolean(p) , False )
AreEqual(BigInteger(-1212321.3213).ToBoolean(p) , True )
AreEqual(BigInteger(1212321384892342394723947).ToBoolean(p) , True )
AreEqual(BigInteger(0).ToChar(p) , Char.MinValue)
AreEqual(BigInteger(65).ToChar(p) , IConvertible.ToChar('A', p))
AreEqual(BigInteger(0xffff).ToChar(p) , Char.MaxValue)
AssertError(OverflowError, BigInteger(-1).ToChar, p)
AreEqual(BigInteger(100).ToDouble(p) , 100.0)
AreEqual(BigInteger(BigInteger(100).ToDouble(p)).ToSingle(p) , BigInteger(100.1213123).ToFloat())
Assert(BigInteger(100) != 100.32)
AreEqual(BigInteger(100) , 100.0)
Assert( 100.32 != BigInteger(100))
AreEqual(100.0 , BigInteger(100) )
def test_big_1():
for (a, m, t,x) in [
(7, "ToSByte", SByte,2),
(8, "ToByte", Byte, 0),
(15, "ToInt16", Int16,2),
(16, "ToUInt16", UInt16,0),
(31, "ToInt32", Int32,2),
(32, "ToUInt32", UInt32,0),
(63, "ToInt64", Int64,2),
(64, "ToUInt64", UInt64,0)
]:
b = BigInteger(-x ** a )
left = getattr(b, m)(p)
right = t.MinValue
AreEqual(left, right)
b = BigInteger(2 ** a -1)
left = getattr(b, m)(p)
right = t.MaxValue
AreEqual(left, right)
b = BigInteger(0)
left = getattr(b, m)(p)
right = t.MaxValue - t.MaxValue
AreEqual(left, 0)
AssertError(OverflowError,getattr(BigInteger(2 ** a ), m),p)
AssertError(OverflowError,getattr(BigInteger(-1 - x ** a ), m),p)
def test_big_2():
for (a, m, t,x) in [
(31, "ToInt32",Int32,2),
(32, "ToUInt32",UInt32,0),
(63, "ToInt64",Int64,2),
(64, "ToUInt64",UInt64,0)
]:
b = BigInteger(-x ** a )
left = getattr(b, m)()
right = t.MinValue
AreEqual(left, right)
b = BigInteger(2 ** a -1)
left = getattr(b, m)()
right = t.MaxValue
AreEqual(left, right)
b = BigInteger(0)
left = getattr(b, m)()
right = t.MaxValue - t.MaxValue
AreEqual(left, right)
AssertError(OverflowError,getattr(BigInteger(2 ** a ), m))
AssertError(OverflowError,getattr(BigInteger(-1 - x ** a ), m))
#complex
def test_complex():
AreEqual(
Complex.Add(
Complex(BigInteger(9999), -1234),
Complex.Conjugate(Complex(9999, -1234)) ),
Complex.Multiply(BigInteger(9999), 2) )
AreEqual(
Complex.Add(
Complex(99999.99e-200, 12345.88e+100),
Complex.Negate(Complex(99999.99e-200, 12345.88e+100)) ),
Complex.Subtract(
Complex(99999.99e-200, 12345.88e+100),
Complex(99999.99e-200, 12345.88e+100) ))
AreEqual(
Complex.Divide(4+2j,2),
(2 + 1j) )
Assert(not hasattr(Complex, "Mod")) #IP 1.x had limited support for modulo which has been removed
def test_bool_misc():
if is_net40:
def is_zero(bigint):
return bigint.IsZero
else:
def is_zero(bigint):
return bigint.IsZero()
AreEqual(BigInteger(-1234).Sign, -1)
AreEqual(is_zero(BigInteger(-1234)), False)
AreEqual(BigInteger(-1234).IsNegative(), True)
AreEqual(BigInteger(-1234).IsPositive(), False)
AreEqual(BigInteger(0).Sign, 0)
AreEqual(is_zero(BigInteger(0)), True)
AreEqual(BigInteger(0).IsNegative(), False)
AreEqual(BigInteger(0).IsPositive(), False)
AreEqual(BigInteger(1234).Sign, 1)
AreEqual(is_zero(BigInteger(1234)), False)
AreEqual(BigInteger(1234).IsNegative(), False)
AreEqual(BigInteger(1234).IsPositive(), True)
def test_byte_conversions():
def CheckByteConversions(bigint, bytes):
SequencesAreEqual(bigint.ToByteArray(), bytes)
AreEqual(BigInteger.Create(Array[Byte](bytes)), bigint)
CheckByteConversions(BigInteger(0x00), [0x00])
CheckByteConversions(BigInteger(-0x01), [0xff])
CheckByteConversions(BigInteger(-0x81), [0x7f, 0xff])
CheckByteConversions(BigInteger(-0x100), [0x00, 0xff])
CheckByteConversions(BigInteger(-0x1000), [0x00, 0xf0])
CheckByteConversions(BigInteger(-0x10000), [0x00, 0x00, 0xff])
CheckByteConversions(BigInteger(-0x100000), [0x00, 0x00, 0xf0])
CheckByteConversions(BigInteger(-0x10000000), [0x00, 0x00, 0x00, 0xf0])
CheckByteConversions(BigInteger(-0x100000000), [0x00, 0x00, 0x00, 0x00, 0xff])
CheckByteConversions(BigInteger(0x7f), [0x7f])
CheckByteConversions(BigInteger(0xff), [0xff, 0x00])
CheckByteConversions(BigInteger(0x0201), [0x01, 0x02])
CheckByteConversions(BigInteger(0xf2f1), [0xf1, 0xf2, 0x00])
CheckByteConversions(BigInteger(0x03020100), [0x00, 0x01, 0x02, 0x03])
CheckByteConversions(BigInteger(0x0403020100), [0x00, 0x01, 0x02, 0x03, 0x04])
CheckByteConversions(BigInteger(0x0706050403020100), [0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07])
CheckByteConversions(BigInteger(0x080706050403020100), [0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08])
def test_dword_conversions():
def CheckDwordConversions(bigint, dwords):
SequencesAreEqual(bigint.GetWords(), dwords)
if bigint == BigInteger.Zero:
AreEqual(
IronPythonTest.System_Scripting_Math.CreateBigInteger(
0,
Array[UInt32](dwords),),
bigint)
else:
AreEqual(
IronPythonTest.System_Scripting_Math.CreateBigInteger(
1,
Array[UInt32](dwords)),
bigint)
AreEqual(
IronPythonTest.System_Scripting_Math.CreateBigInteger(
-1,
Array[UInt32](dwords)),
BigInteger.Negate(bigint))
CheckDwordConversions(BigInteger(0), [0x00000000])
CheckDwordConversions(BigInteger(1), [0x00000001])
CheckDwordConversions(BigInteger((1<<31)), [0x80000000])
CheckDwordConversions(BigInteger(((1<<31) + 9)), [0x80000009])
CheckDwordConversions(BigInteger((1<<32)), [0x00000000, 0x00000001])
def test_misc():
AssertError(ArgumentException, IronPythonTest.System_Scripting_Math.CreateBigInteger, 0, (1, 2, 3))
AssertError(ArgumentNullException, IronPythonTest.System_Scripting_Math.CreateBigInteger, 0, None)
AreEqual(BigInteger(1).CompareTo(None), 1)
if is_net40:
AreEqual(BigInteger(1).CompareTo(True), 0)
else:
AssertError(ArgumentException, BigInteger(1).CompareTo, True)
run_test(__name__)
| 41.015748
| 241
| 0.656748
| 1,016
| 10,418
| 6.690945
| 0.25689
| 0.090026
| 0.023683
| 0.011474
| 0.31877
| 0.230656
| 0.192262
| 0.150044
| 0.10915
| 0.10915
| 0
| 0.203911
| 0.209637
| 10,418
| 253
| 242
| 41.177866
| 0.621691
| 0.062584
| 0
| 0.333333
| 0
| 0
| 0.010129
| 0
| 0
| 0
| 0.053154
| 0
| 0.095238
| 1
| 0.068783
| false
| 0.005291
| 0.037037
| 0.010582
| 0.121693
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
91c59190736d04c98947f42fd90af017204111ac
| 505
|
py
|
Python
|
ndscheduler/server/handlers/index.py
|
symphonyrm/ndscheduler
|
e9a56ef345b25916a2b53d1ea3349efb532d63ce
|
[
"BSD-2-Clause"
] | null | null | null |
ndscheduler/server/handlers/index.py
|
symphonyrm/ndscheduler
|
e9a56ef345b25916a2b53d1ea3349efb532d63ce
|
[
"BSD-2-Clause"
] | null | null | null |
ndscheduler/server/handlers/index.py
|
symphonyrm/ndscheduler
|
e9a56ef345b25916a2b53d1ea3349efb532d63ce
|
[
"BSD-2-Clause"
] | null | null | null |
"""Serves the single page app web ui."""
import json
import tornado.gen
from ndscheduler import settings
from ndscheduler import utils
from ndscheduler.server.handlers import base
class Handler(base.BaseHandler):
"""Index page request handler."""
@tornado.gen.coroutine
def get(self):
"""Serve up the single page app for scheduler dashboard."""
meta_info = utils.get_all_available_jobs()
self.render(settings.APP_INDEX_PAGE, jobs_meta_info=json.dumps(meta_info))
| 25.25
| 82
| 0.732673
| 69
| 505
| 5.231884
| 0.565217
| 0.124654
| 0.072022
| 0.088643
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.174257
| 505
| 19
| 83
| 26.578947
| 0.865707
| 0.229703
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.5
| 0
| 0.7
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
91c92b40c4f1e26399a0ff522ec30f406f0ff98d
| 934
|
py
|
Python
|
nlp_annotator_api/server/app.py
|
IBM/deepsearch-nlp-annotator-api-example
|
76c2c8fd83c1e6d51c51c7b581a8c3f273b23c40
|
[
"Apache-2.0"
] | 3
|
2022-01-04T12:15:22.000Z
|
2022-03-25T21:19:20.000Z
|
nlp_annotator_api/server/app.py
|
IBM/deepsearch-nlp-annotator-api-example
|
76c2c8fd83c1e6d51c51c7b581a8c3f273b23c40
|
[
"Apache-2.0"
] | null | null | null |
nlp_annotator_api/server/app.py
|
IBM/deepsearch-nlp-annotator-api-example
|
76c2c8fd83c1e6d51c51c7b581a8c3f273b23c40
|
[
"Apache-2.0"
] | 5
|
2021-09-27T08:26:09.000Z
|
2022-03-10T11:41:35.000Z
|
import logging
import os
import aiohttp.web
from connexion import AioHttpApp
from nlp_annotator_api.config.config import conf
from nlp_annotator_api.config.logging import setup_logging
from nlp_annotator_api.server.middleware.statsd_middleware import StatsdMiddleware
from nlp_annotator_api.server.signals.statsd_client import statsd_client_factory
setup_logging()
access_log = logging.getLogger("nlp_annotator_api.access")
_file_dir = os.path.dirname(__file__)
app = AioHttpApp(
__name__, specification_dir=os.path.join(_file_dir, "..", "resources", "schemas"),
server_args=dict(
client_max_size=8 * 1024**2
)
)
app.add_api("openapi.yaml", pass_context_arg_name="request")
aiohttp_app: aiohttp.web.Application = app.app
aiohttp_app.cleanup_ctx.append(statsd_client_factory(conf.statsd))
aiohttp_app.middlewares.append(StatsdMiddleware())
if __name__ == "__main__":
app.run(access_log=access_log)
| 26.685714
| 86
| 0.799786
| 128
| 934
| 5.421875
| 0.4375
| 0.086455
| 0.108069
| 0.10951
| 0.144092
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007168
| 0.103854
| 934
| 34
| 87
| 27.470588
| 0.821983
| 0
| 0
| 0
| 0
| 0
| 0.073876
| 0.025696
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.043478
| 0.347826
| 0
| 0.347826
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
91c9ae32ffd6100ceb2a8fceee2c2c30ae4e7dc4
| 3,518
|
py
|
Python
|
dataactcore/migrations/versions/8692ab1298e1_replace_filerequest_with_filegeneration.py
|
brianherman/data-act-broker-backend
|
80eb055b9d245046192f7ad4fd0be7d0e11d2dec
|
[
"CC0-1.0"
] | 1
|
2019-06-22T21:53:16.000Z
|
2019-06-22T21:53:16.000Z
|
dataactcore/migrations/versions/8692ab1298e1_replace_filerequest_with_filegeneration.py
|
brianherman/data-act-broker-backend
|
80eb055b9d245046192f7ad4fd0be7d0e11d2dec
|
[
"CC0-1.0"
] | 3
|
2021-08-22T11:47:45.000Z
|
2022-03-29T22:06:49.000Z
|
dataactcore/migrations/versions/8692ab1298e1_replace_filerequest_with_filegeneration.py
|
brianherman/data-act-broker-backend
|
80eb055b9d245046192f7ad4fd0be7d0e11d2dec
|
[
"CC0-1.0"
] | 1
|
2020-07-17T23:50:56.000Z
|
2020-07-17T23:50:56.000Z
|
"""replace FileRequest with FileGeneration
Revision ID: 8692ab1298e1
Revises: 4bbc47f2b48d
Create Date: 2018-10-24 14:54:39.278159
"""
# revision identifiers, used by Alembic.
revision = '8692ab1298e1'
down_revision = '4bbc47f2b48d'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('file_generation',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('file_generation_id', sa.Integer(), nullable=False),
sa.Column('request_date', sa.Date(), nullable=False),
sa.Column('start_date', sa.Date(), nullable=False),
sa.Column('end_date', sa.Date(), nullable=False),
sa.Column('agency_code', sa.Text(), nullable=False),
sa.Column('agency_type', sa.Enum('awarding', 'funding', name='generation_agency_types'), server_default='awarding', nullable=False),
sa.Column('file_type', sa.Enum('D1', 'D2', name='generation_file_types'), server_default='D1', nullable=False),
sa.Column('file_path', sa.Text(), nullable=True),
sa.Column('is_cached_file', sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint('file_generation_id')
)
op.create_index(op.f('ix_file_generation_agency_code'), 'file_generation', ['agency_code'], unique=False)
op.create_index(op.f('ix_file_generation_agency_type'), 'file_generation', ['agency_type'], unique=False)
op.create_index(op.f('ix_file_generation_end_date'), 'file_generation', ['end_date'], unique=False)
op.create_index(op.f('ix_file_generation_file_type'), 'file_generation', ['file_type'], unique=False)
op.create_index(op.f('ix_file_generation_request_date'), 'file_generation', ['request_date'], unique=False)
op.create_index(op.f('ix_file_generation_start_date'), 'file_generation', ['start_date'], unique=False)
op.add_column('job', sa.Column('file_generation_id', sa.Integer(), nullable=True))
op.create_foreign_key('fk_file_request_file_generation_id', 'job', 'file_generation', ['file_generation_id'], ['file_generation_id'], ondelete='SET NULL')
op.drop_column('job', 'from_cached')
# ### end Alembic commands ###
def downgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('job', sa.Column('from_cached', sa.BOOLEAN(), server_default=sa.text('false'), autoincrement=False, nullable=False))
op.drop_constraint('fk_file_request_file_generation_id', 'job', type_='foreignkey')
op.drop_column('job', 'file_generation_id')
op.drop_index(op.f('ix_file_generation_start_date'), table_name='file_generation')
op.drop_index(op.f('ix_file_generation_request_date'), table_name='file_generation')
op.drop_index(op.f('ix_file_generation_file_type'), table_name='file_generation')
op.drop_index(op.f('ix_file_generation_end_date'), table_name='file_generation')
op.drop_index(op.f('ix_file_generation_agency_type'), table_name='file_generation')
op.drop_index(op.f('ix_file_generation_agency_code'), table_name='file_generation')
op.drop_table('file_generation')
op.execute("""
DROP TYPE generation_agency_types
""")
op.execute("""
DROP TYPE generation_file_types
""")
# ### end Alembic commands ###
| 45.102564
| 158
| 0.726549
| 484
| 3,518
| 4.958678
| 0.200413
| 0.204167
| 0.04
| 0.05
| 0.529583
| 0.4725
| 0.44625
| 0.3575
| 0.283333
| 0.265
| 0
| 0.017025
| 0.115122
| 3,518
| 77
| 159
| 45.688312
| 0.753935
| 0.091245
| 0
| 0.078431
| 0
| 0
| 0.376701
| 0.160177
| 0
| 0
| 0
| 0
| 0
| 1
| 0.078431
| false
| 0
| 0.039216
| 0
| 0.117647
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
91d4aad729e6a3ae80ef7ec7692d7daf662bb479
| 1,127
|
py
|
Python
|
setup.py
|
garnaat/details
|
07f2fc7f27b29a6ddcda918abf6ae0882450319e
|
[
"Apache-2.0"
] | 27
|
2015-03-01T10:54:32.000Z
|
2021-09-08T14:52:30.000Z
|
setup.py
|
garnaat/details
|
07f2fc7f27b29a6ddcda918abf6ae0882450319e
|
[
"Apache-2.0"
] | 3
|
2015-01-29T08:26:13.000Z
|
2017-02-14T09:35:06.000Z
|
setup.py
|
garnaat/details
|
07f2fc7f27b29a6ddcda918abf6ae0882450319e
|
[
"Apache-2.0"
] | 7
|
2015-03-26T13:53:34.000Z
|
2017-05-23T20:58:28.000Z
|
#!/usr/bin/env python
from setuptools import setup, find_packages
import os
requires = [
]
setup(
name='details',
version=open(os.path.join('details', '_version')).read(),
description='Tools for processing AWS detailed billing reports',
long_description=open('README.md').read(),
author='Mitch Garnaat',
author_email='[email protected]',
url='https://github.com/scopely-devops/details',
packages=find_packages(exclude=['tests*']),
package_dir={'details': 'details'},
install_requires=requires,
license=open("LICENSE").read(),
classifiers=(
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Natural Language :: English',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4'
),
)
| 30.459459
| 68
| 0.632653
| 117
| 1,127
| 6.034188
| 0.581197
| 0.161473
| 0.212465
| 0.110482
| 0.076487
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011312
| 0.215617
| 1,127
| 36
| 69
| 31.305556
| 0.78733
| 0.017746
| 0
| 0
| 0
| 0
| 0.517179
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.066667
| 0
| 0.066667
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
91d867e70ec797fb77cf3fedd501ea6a1aca218d
| 8,301
|
py
|
Python
|
wbia/plottool/interact_keypoints.py
|
mmulich/wildbook-ia
|
81b405e2bfaa3f6c30a546fb6dc6e6488e9b2663
|
[
"Apache-2.0"
] | null | null | null |
wbia/plottool/interact_keypoints.py
|
mmulich/wildbook-ia
|
81b405e2bfaa3f6c30a546fb6dc6e6488e9b2663
|
[
"Apache-2.0"
] | null | null | null |
wbia/plottool/interact_keypoints.py
|
mmulich/wildbook-ia
|
81b405e2bfaa3f6c30a546fb6dc6e6488e9b2663
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import logging
import utool as ut
import six
from . import draw_func2 as df2
from wbia.plottool import plot_helpers as ph
from wbia.plottool import interact_helpers as ih
from wbia.plottool.viz_featrow import draw_feat_row
from wbia.plottool.viz_keypoints import show_keypoints
from wbia.plottool import abstract_interaction
(print, rrr, profile) = ut.inject2(__name__)
logger = logging.getLogger('wbia')
class KeypointInteraction(abstract_interaction.AbstractInteraction):
r"""
CommandLine:
python -m wbia.plottool.interact_keypoints --exec-KeypointInteraction --show
python -m wbia.plottool.interact_keypoints --exec-KeypointInteraction --show --fname=lena.png
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.plottool.interact_keypoints import * # NOQA
>>> import numpy as np
>>> import wbia.plottool as pt
>>> import utool as ut
>>> import pyhesaff
>>> import vtool as vt
>>> kpts, vecs, imgBGR = pt.viz_keypoints.testdata_kpts()
>>> ut.quit_if_noshow()
>>> #pt.interact_keypoints.ishow_keypoints(imgBGR, kpts, vecs, ori=True, ell_alpha=.4, color='distinct')
>>> pt.interact_keypoints.KeypointInteraction(imgBGR, kpts, vecs, ori=True, ell_alpha=.4, autostart=True)
>>> pt.show_if_requested()
"""
def __init__(self, chip, kpts, vecs, fnum=0, figtitle=None, **kwargs):
self.chip = chip
self.kpts = kpts
self.vecs = vecs
self.figtitle = figtitle
self.mode = 0
super(KeypointInteraction, self).__init__(**kwargs)
def plot(self, fnum=None, pnum=(1, 1, 1), **kwargs):
import wbia.plottool as pt
fnum = pt.ensure_fnum(fnum)
pt.figure(fnum=fnum, docla=True, doclf=True)
show_keypoints(self.chip, self.kpts, fnum=fnum, pnum=pnum, **kwargs)
if self.figtitle is not None:
pt.set_figtitle(self.figtitle)
def _select_ith_kpt(self, fx):
logger.info('[interact] viewing ith=%r keypoint' % fx)
# Get the fx-th keypiont
kp, sift = self.kpts[fx], self.vecs[fx]
# Draw the image with keypoint fx highlighted
self.plot(self.fnum, (2, 1, 1), sel_fx=fx)
# Draw the selected feature
nRows, nCols, px = (2, 3, 3)
draw_feat_row(self.chip, fx, kp, sift, self.fnum, nRows, nCols, px, None)
def on_click_outside(self, event):
self.mode = (self.mode + 1) % 3
ell = self.mode == 1
pts = self.mode == 2
logger.info('... default kpts view mode=%r' % self.mode)
self.plot(self.fnum, ell=ell, pts=pts)
self.draw()
def on_click_inside(self, event, ax):
import wbia.plottool as pt
viztype = ph.get_plotdat(ax, 'viztype', None)
logger.info('[ik] viztype=%r' % viztype)
if viztype is None:
pass
elif viztype == 'keypoints':
kpts = ph.get_plotdat(ax, 'kpts', [])
if len(kpts) == 0:
logger.info('...nokpts')
else:
logger.info('...nearest')
x, y = event.xdata, event.ydata
import vtool as vt
fx = vt.nearest_point(x, y, kpts)[0]
self._select_ith_kpt(fx)
elif viztype == 'warped':
hs_fx = ph.get_plotdat(ax, 'fx', None)
if hs_fx is not None:
kp = self.kpts[hs_fx] # FIXME
sift = self.vecs[hs_fx]
df2.draw_keypoint_gradient_orientations(
self.chip, kp, sift=sift, mode='vec', fnum=pt.next_fnum()
)
pt.draw()
elif viztype.startswith('colorbar'):
pass
else:
logger.info('...unhandled')
self.draw()
def ishow_keypoints(chip, kpts, desc, fnum=0, figtitle=None, nodraw=False, **kwargs):
"""
TODO: Depricate in favor of the class
CommandLine:
python -m wbia.plottool.interact_keypoints --test-ishow_keypoints --show
python -m wbia.plottool.interact_keypoints --test-ishow_keypoints --show --fname zebra.png
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.plottool.interact_keypoints import * # NOQA
>>> import numpy as np
>>> import wbia.plottool as pt
>>> import utool as ut
>>> import pyhesaff
>>> import vtool as vt
>>> kpts, vecs, imgBGR = pt.viz_keypoints.testdata_kpts()
>>> ut.quit_if_noshow()
>>> #pt.interact_keypoints.ishow_keypoints(imgBGR, kpts, vecs, ori=True, ell_alpha=.4, color='distinct')
>>> pt.interact_keypoints.ishow_keypoints(imgBGR, kpts, vecs, ori=True, ell_alpha=.4)
>>> pt.show_if_requested()
"""
if isinstance(chip, six.string_types):
import vtool as vt
chip = vt.imread(chip)
fig = ih.begin_interaction('keypoint', fnum)
annote_ptr = [1]
self = ut.DynStruct() # MOVE TO A CLASS INTERACTION
self.kpts = kpts
vecs = desc
self.vecs = vecs
def _select_ith_kpt(fx):
logger.info('[interact] viewing ith=%r keypoint' % fx)
# Get the fx-th keypiont
kp, sift = kpts[fx], vecs[fx]
# Draw the image with keypoint fx highlighted
_viz_keypoints(fnum, (2, 1, 1), sel_fx=fx, **kwargs) # MAYBE: remove kwargs
# Draw the selected feature
nRows, nCols, px = (2, 3, 3)
draw_feat_row(chip, fx, kp, sift, fnum, nRows, nCols, px, None)
def _viz_keypoints(fnum, pnum=(1, 1, 1), **kwargs):
df2.figure(fnum=fnum, docla=True, doclf=True)
show_keypoints(chip, kpts, fnum=fnum, pnum=pnum, **kwargs)
if figtitle is not None:
df2.set_figtitle(figtitle)
def _on_keypoints_click(event):
logger.info('[viz] clicked keypoint view')
if event is None or event.xdata is None or event.inaxes is None:
annote_ptr[0] = (annote_ptr[0] + 1) % 3
mode = annote_ptr[0]
ell = mode == 1
pts = mode == 2
logger.info('... default kpts view mode=%r' % mode)
_viz_keypoints(fnum, ell=ell, pts=pts, **kwargs) # MAYBE: remove kwargs
else:
ax = event.inaxes
viztype = ph.get_plotdat(ax, 'viztype', None)
logger.info('[ik] viztype=%r' % viztype)
if viztype == 'keypoints':
kpts = ph.get_plotdat(ax, 'kpts', [])
if len(kpts) == 0:
logger.info('...nokpts')
else:
logger.info('...nearest')
x, y = event.xdata, event.ydata
import vtool as vt
fx = vt.nearest_point(x, y, kpts)[0]
_select_ith_kpt(fx)
elif viztype == 'warped':
hs_fx = ph.get_plotdat(ax, 'fx', None)
# kpts = ph.get_plotdat(ax, 'kpts', [])
if hs_fx is not None:
# Ugly. Interactions should be changed to classes.
kp = self.kpts[hs_fx] # FIXME
sift = self.vecs[hs_fx]
df2.draw_keypoint_gradient_orientations(
chip, kp, sift=sift, mode='vec', fnum=df2.next_fnum()
)
elif viztype.startswith('colorbar'):
pass
# Hack to get a specific scoring feature
# sortx = self.fs.argsort()
# idx = np.clip(int(np.round(y * len(sortx))), 0, len(sortx) - 1)
# mx = sortx[idx]
# (fx1, fx2) = self.fm[mx]
# (fx1, fx2) = self.fm[mx]
# logger.info('... selected score at rank idx=%r' % (idx,))
# logger.info('... selected score with fs=%r' % (self.fs[mx],))
# logger.info('... resolved to mx=%r' % mx)
# logger.info('... fx1, fx2 = %r, %r' % (fx1, fx2,))
# self.select_ith_match(mx)
else:
logger.info('...unhandled')
ph.draw()
# Draw without keypoints the first time
_viz_keypoints(fnum, **kwargs) # MAYBE: remove kwargs
ih.connect_callback(fig, 'button_press_event', _on_keypoints_click)
if not nodraw:
ph.draw()
| 39.15566
| 113
| 0.563787
| 1,035
| 8,301
| 4.393237
| 0.201932
| 0.037387
| 0.024632
| 0.021553
| 0.554651
| 0.506928
| 0.490653
| 0.451067
| 0.444469
| 0.362657
| 0
| 0.010508
| 0.312131
| 8,301
| 211
| 114
| 39.341232
| 0.785814
| 0.292013
| 0
| 0.435115
| 0
| 0
| 0.061973
| 0
| 0
| 0
| 0
| 0.014218
| 0
| 1
| 0.068702
| false
| 0.022901
| 0.10687
| 0
| 0.183206
| 0.007634
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
91da549f96f9ccca48e20a796a48546be83febae
| 206
|
py
|
Python
|
exercises/ja/exc_03_16_01.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 2,085
|
2019-04-17T13:10:40.000Z
|
2022-03-30T21:51:46.000Z
|
exercises/ja/exc_03_16_01.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 79
|
2019-04-18T14:42:55.000Z
|
2022-03-07T08:15:43.000Z
|
exercises/ja/exc_03_16_01.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 361
|
2019-04-17T13:34:32.000Z
|
2022-03-28T04:42:45.000Z
|
import spacy
nlp = spacy.load("ja_core_news_sm")
text = (
"チックフィレイはジョージア州カレッジパークに本社を置く、"
"チキンサンドを専門とするアメリカのファストフードレストランチェーンです。"
)
# トークナイズのみ行う
doc = nlp(text)
print([token.text for token in doc])
| 17.166667
| 42
| 0.73301
| 23
| 206
| 6.434783
| 0.73913
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.150485
| 206
| 11
| 43
| 18.727273
| 0.845714
| 0.048544
| 0
| 0
| 0
| 0
| 0.407216
| 0.329897
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.125
| 0.125
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
91e4401665d568cd4d6102a4a69c6d2f7668744f
| 602
|
py
|
Python
|
backend/api/v1/dialogs/urls.py
|
donicrazy/ChatApp
|
ab129a9c0706bbb972cbce43283ba6e06d144635
|
[
"MIT"
] | null | null | null |
backend/api/v1/dialogs/urls.py
|
donicrazy/ChatApp
|
ab129a9c0706bbb972cbce43283ba6e06d144635
|
[
"MIT"
] | 7
|
2021-03-19T04:47:13.000Z
|
2022-01-13T02:02:46.000Z
|
backend/api/v1/dialogs/urls.py
|
donicrazy/ChatApp
|
ab129a9c0706bbb972cbce43283ba6e06d144635
|
[
"MIT"
] | null | null | null |
from django.urls import path
from backend.api.v1.dialogs.views import (
DialogListCreateView,
DialogRetrieveUpdateDestroyAPIView,
DialogMembershipListCreateView,
DialogMessageListCreateView,
DialogMessageRetrieveUpdateDestroyAPIView,
)
urlpatterns = [
path('', DialogListCreateView.as_view()),
path('<int:pk>', DialogRetrieveUpdateDestroyAPIView.as_view()),
path('membership/', DialogMembershipListCreateView.as_view()),
path('messages/', DialogMessageListCreateView.as_view()),
path('messages/<int:pk>', DialogMessageRetrieveUpdateDestroyAPIView.as_view()),
]
| 35.411765
| 83
| 0.770764
| 45
| 602
| 10.2
| 0.488889
| 0.065359
| 0.087146
| 0.078431
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001876
| 0.114618
| 602
| 16
| 84
| 37.625
| 0.859287
| 0
| 0
| 0
| 0
| 0
| 0.074751
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.133333
| 0
| 0.133333
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
37d2de39d6a42eafed34788e36c34749e153b301
| 500
|
py
|
Python
|
info.py
|
altfool/mri_face_detection
|
3117f7f00c98efe2260936146ce6b5454b059672
|
[
"MIT"
] | 1
|
2021-11-13T02:42:49.000Z
|
2021-11-13T02:42:49.000Z
|
info.py
|
altfool/mri_face_detection
|
3117f7f00c98efe2260936146ce6b5454b059672
|
[
"MIT"
] | null | null | null |
info.py
|
altfool/mri_face_detection
|
3117f7f00c98efe2260936146ce6b5454b059672
|
[
"MIT"
] | null | null | null |
import numpy as np
img_dtype = np.float32
imgX, imgY, imgZ = (256, 256, 150)
imgs_path_withfaces = '../dataset/withfaces'
imgs_path_nofaces = '../dataset/nofaces'
imgX_dwt1, imgY_dwt1, imgZ_dwt1 = (128, 128, 75)
imgs_path_withfaces_dwt = './dataset/withfaces'
imgs_path_nofaces_dwt = './dataset/nofaces'
dwt_flag = (True, False)[0]
if dwt_flag:
imgX, imgY, imgZ = imgX_dwt1, imgY_dwt1, imgZ_dwt1
imgs_path_withfaces = imgs_path_withfaces_dwt
imgs_path_nofaces = imgs_path_nofaces_dwt
| 27.777778
| 54
| 0.752
| 76
| 500
| 4.565789
| 0.342105
| 0.184438
| 0.195965
| 0.138329
| 0.317003
| 0.138329
| 0
| 0
| 0
| 0
| 0
| 0.060185
| 0.136
| 500
| 17
| 55
| 29.411765
| 0.743056
| 0
| 0
| 0
| 0
| 0
| 0.148
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.076923
| 0
| 0.076923
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
37d53dc9e4eafc3370db20f7342e6ffdb10aeb9f
| 24,609
|
py
|
Python
|
src/pretalx/orga/urls.py
|
martinheidegger/pretalx
|
d812e665c1c5ce29df3eafc1985af08e4d986fef
|
[
"Apache-2.0"
] | null | null | null |
src/pretalx/orga/urls.py
|
martinheidegger/pretalx
|
d812e665c1c5ce29df3eafc1985af08e4d986fef
|
[
"Apache-2.0"
] | null | null | null |
src/pretalx/orga/urls.py
|
martinheidegger/pretalx
|
d812e665c1c5ce29df3eafc1985af08e4d986fef
|
[
"Apache-2.0"
] | null | null | null |
from django.conf.urls import include, url
from django.views.generic.base import RedirectView
from pretalx.event.models.event import SLUG_CHARS
from pretalx.orga.views import cards
from .views import (
admin,
auth,
cfp,
dashboard,
event,
mails,
organiser,
person,
plugins,
review,
schedule,
speaker,
submission,
)
app_name = "orga"
urlpatterns = [
url("^login/$", auth.LoginView.as_view(), name="login"),
url("^logout/$", auth.logout_view, name="logout"),
url("^reset/$", auth.ResetView.as_view(), name="auth.reset"),
url(r"^reset/(?P<token>\w+)$", auth.RecoverView.as_view(), name="auth.recover"),
url("^$", RedirectView.as_view(url="event", permanent=False)),
url("^admin/$", admin.AdminDashboard.as_view(), name="admin.dashboard"),
url("^admin/update/$", admin.UpdateCheckView.as_view(), name="admin.update"),
url("^me$", event.UserSettings.as_view(), name="user.view"),
url("^me/subuser$", person.SubuserView.as_view(), name="user.subuser"),
url(
r"^invitation/(?P<code>\w+)$",
event.InvitationView.as_view(),
name="invitation.view",
),
url(
"^organiser/$",
dashboard.DashboardOrganiserListView.as_view(),
name="organiser.list",
),
url(
"^organiser/new$", organiser.OrganiserDetail.as_view(), name="organiser.create"
),
url(
f"^organiser/(?P<organiser>[{SLUG_CHARS}]+)/",
include(
[
url("^$", organiser.OrganiserDetail.as_view(), name="organiser.view"),
url(
"^delete$",
organiser.OrganiserDelete.as_view(),
name="organiser.delete",
),
url("^teams/$", organiser.TeamDetail.as_view(), name="organiser.teams"),
url(
"^teams/new$",
organiser.TeamDetail.as_view(),
name="organiser.teams.create",
),
url(
"^teams/(?P<pk>[0-9]+)/$",
organiser.TeamDetail.as_view(),
name="organiser.teams.view",
),
url(
"^teams/(?P<pk>[0-9]+)/delete$",
organiser.TeamDelete.as_view(),
name="organiser.teams.delete",
),
url(
"^teams/(?P<pk>[0-9]+)/tracks$",
organiser.TeamTracks.as_view(),
name="organiser.teams.tracks",
),
url(
"^teams/(?P<pk>[0-9]+)/delete/(?P<user_pk>[0-9]+)$",
organiser.TeamDelete.as_view(),
name="organiser.teams.delete_member",
),
url(
"^teams/(?P<pk>[0-9]+)/reset/(?P<user_pk>[0-9]+)$",
organiser.TeamResetPassword.as_view(),
name="organiser.team.password_reset",
),
url(
"^teams/(?P<pk>[0-9]+)/uninvite$",
organiser.TeamUninvite.as_view(),
name="organiser.teams.uninvite",
),
url(
"^teams/(?P<pk>[0-9]+)/resend$",
organiser.TeamResend.as_view(),
name="organiser.teams.resend",
),
]
),
),
url("^event/new/$", event.EventWizard.as_view(), name="event.create"),
url("^event/typeahead/$", event.event_list, name="event.typeahead"),
url("^event/$", dashboard.DashboardEventListView.as_view(), name="event.list"),
url(
f"^event/(?P<event>[{SLUG_CHARS}]+)/",
include(
[
url(
"^$", dashboard.EventDashboardView.as_view(), name="event.dashboard"
),
url("^login/$", auth.LoginView.as_view(), name="event.login"),
url("^reset/$", auth.ResetView.as_view(), name="event.auth.reset"),
url(
r"^reset/(?P<token>\w+)$",
auth.RecoverView.as_view(),
name="event.auth.recover",
),
url("^delete$", event.EventDelete.as_view(), name="event.delete"),
url("^live$", event.EventLive.as_view(), name="event.live"),
url("^api/users$", person.UserList.as_view(), name="event.user_list"),
url(
"^cfp/$",
RedirectView.as_view(pattern_name="orga:cfp.text.view"),
name="cfp",
),
url("^cfp/flow/$", cfp.CfPFlowEditor.as_view(), name="cfp.flow"),
url(
"^cfp/questions/$",
cfp.CfPQuestionList.as_view(),
name="cfp.questions.view",
),
url(
"^cfp/questions/new$",
cfp.CfPQuestionDetail.as_view(),
name="cfp.questions.create",
),
url(
"^cfp/questions/remind$",
cfp.CfPQuestionRemind.as_view(),
name="cfp.questions.remind",
),
url(
"^cfp/questions/(?P<pk>[0-9]+)/$",
cfp.CfPQuestionDetail.as_view(),
name="cfp.question.view",
),
url(
"^cfp/questions/(?P<pk>[0-9]+)/up$",
cfp.question_move_up,
name="cfp.questions.up",
),
url(
"^cfp/questions/(?P<pk>[0-9]+)/down$",
cfp.question_move_down,
name="cfp.questions.down",
),
url(
"^cfp/questions/(?P<pk>[0-9]+)/delete$",
cfp.CfPQuestionDelete.as_view(),
name="cfp.question.delete",
),
url(
"^cfp/questions/(?P<pk>[0-9]+)/edit$",
cfp.CfPQuestionDetail.as_view(),
name="cfp.question.edit",
),
url(
"^cfp/questions/(?P<pk>[0-9]+)/toggle$",
cfp.CfPQuestionToggle.as_view(),
name="cfp.question.toggle",
),
url("^cfp/text$", cfp.CfPTextDetail.as_view(), name="cfp.text.view"),
url(
"^cfp/types/$",
cfp.SubmissionTypeList.as_view(),
name="cfp.types.view",
),
url(
"^cfp/types/new$",
cfp.SubmissionTypeDetail.as_view(),
name="cfp.types.create",
),
url(
"^cfp/types/(?P<pk>[0-9]+)/$",
cfp.SubmissionTypeDetail.as_view(),
name="cfp.type.view",
),
url(
"^cfp/types/(?P<pk>[0-9]+)/delete$",
cfp.SubmissionTypeDelete.as_view(),
name="cfp.type.delete",
),
url(
"^cfp/types/(?P<pk>[0-9]+)/default$",
cfp.SubmissionTypeDefault.as_view(),
name="cfp.type.default",
),
url("^cfp/tracks/$", cfp.TrackList.as_view(), name="cfp.tracks.view"),
url(
"^cfp/tracks/new$",
cfp.TrackDetail.as_view(),
name="cfp.track.create",
),
url(
"^cfp/tracks/(?P<pk>[0-9]+)/$",
cfp.TrackDetail.as_view(),
name="cfp.track.view",
),
url(
"^cfp/tracks/(?P<pk>[0-9]+)/delete$",
cfp.TrackDelete.as_view(),
name="cfp.track.delete",
),
url(
"^cfp/access-codes/$",
cfp.AccessCodeList.as_view(),
name="cfp.access_code.view",
),
url(
"^cfp/access-codes/new$",
cfp.AccessCodeDetail.as_view(),
name="cfp.access_code.create",
),
url(
"^cfp/access-codes/(?P<code>[A-z0-9]+)/$",
cfp.AccessCodeDetail.as_view(),
name="cfp.access_code.view",
),
url(
"^cfp/access-codes/(?P<code>[A-z0-9]+)/send$",
cfp.AccessCodeSend.as_view(),
name="cfp.access_code.send",
),
url(
"^cfp/access-codes/(?P<code>[A-z0-9]+)/delete$",
cfp.AccessCodeDelete.as_view(),
name="cfp.access_code.delete",
),
url(
"^mails/",
include(
[
url(
"^(?P<pk>[0-9]+)/$",
mails.MailDetail.as_view(),
name="mails.outbox.mail.view",
),
url(
"^(?P<pk>[0-9]+)/copy$",
mails.MailCopy.as_view(),
name="mails.outbox.mail.copy",
),
url(
"^(?P<pk>[0-9]+)/delete$",
mails.OutboxPurge.as_view(),
name="mails.outbox.mail.delete",
),
url(
"^(?P<pk>[0-9]+)/send$",
mails.OutboxSend.as_view(),
name="mails.outbox.mail.send",
),
url(
"^templates/$",
mails.TemplateList.as_view(),
name="mails.templates.list",
),
url(
"^templates/new$",
mails.TemplateDetail.as_view(),
name="mails.templates.create",
),
url(
"^templates/(?P<pk>[0-9]+)/$",
mails.TemplateDetail.as_view(),
name="mails.templates.view",
),
url(
"^templates/(?P<pk>[0-9]+)/delete$",
mails.TemplateDelete.as_view(),
name="mails.templates.delete",
),
url(
"^compose$",
mails.ComposeMail.as_view(),
name="mails.compose",
),
url("^sent$", mails.SentMail.as_view(), name="mails.sent"),
url(
"^outbox/$",
mails.OutboxList.as_view(),
name="mails.outbox.list",
),
url(
"^outbox/send$",
mails.OutboxSend.as_view(),
name="mails.outbox.send",
),
url(
"^outbox/purge$",
mails.OutboxPurge.as_view(),
name="mails.outbox.purge",
),
]
),
),
url(
"^submissions/$",
submission.SubmissionList.as_view(),
name="submissions.list",
),
url(
"^submissions/new$",
submission.SubmissionContent.as_view(),
name="submissions.create",
),
url(
"^submissions/cards/$",
cards.SubmissionCards.as_view(),
name="submissions.cards",
),
url(
"^submissions/feed/$",
submission.SubmissionFeed(),
name="submissions.feed",
),
url(
"^submissions/statistics/$",
submission.SubmissionStats.as_view(),
name="submissions.statistics",
),
url(
"^submissions/feedback/$",
submission.AllFeedbacksList.as_view(),
name="submissions.feedback",
),
url(
r"^submissions/(?P<code>[\w-]+)/",
include(
[
url(
"^$",
submission.SubmissionContent.as_view(),
name="submissions.content.view",
),
url(
"^submit$",
submission.SubmissionStateChange.as_view(),
name="submissions.submit",
),
url(
"^accept$",
submission.SubmissionStateChange.as_view(),
name="submissions.accept",
),
url(
"^reject$",
submission.SubmissionStateChange.as_view(),
name="submissions.reject",
),
url(
"^confirm",
submission.SubmissionStateChange.as_view(),
name="submissions.confirm",
),
url(
"^withdraw$",
submission.SubmissionStateChange.as_view(),
name="submissions.withdraw",
),
url(
"^delete",
submission.SubmissionStateChange.as_view(),
name="submissions.delete",
),
url(
"^cancel",
submission.SubmissionStateChange.as_view(),
name="submissions.cancel",
),
url(
"^speakers/$",
submission.SubmissionSpeakers.as_view(),
name="submissions.speakers.view",
),
url(
"^speakers/add$",
submission.SubmissionSpeakersAdd.as_view(),
name="submissions.speakers.add",
),
url(
"^speakers/delete$",
submission.SubmissionSpeakersDelete.as_view(),
name="submissions.speakers.delete",
),
url(
"^reviews/$",
review.ReviewSubmission.as_view(),
name="submissions.reviews",
),
url(
"^reviews/delete$",
review.ReviewSubmissionDelete.as_view(),
name="submissions.reviews.submission.delete",
),
url(
"^feedback/$",
submission.FeedbackList.as_view(),
name="submissions.feedback.list",
),
url(
"^toggle_featured$",
submission.ToggleFeatured.as_view(),
name="submissions.toggle_featured",
),
url(
"^anonymise/$",
submission.Anonymise.as_view(),
name="submissions.anonymise",
),
]
),
),
url("^speakers/$", speaker.SpeakerList.as_view(), name="speakers.list"),
url(
"^speakers/(?P<pk>[0-9]+)/$",
speaker.SpeakerDetail.as_view(),
name="speakers.view",
),
url(
"^speakers/(?P<pk>[0-9]+)/reset$",
speaker.SpeakerPasswordReset.as_view(),
name="speakers.reset",
),
url(
"^speakers/(?P<pk>[0-9]+)/toggle-arrived$",
speaker.SpeakerToggleArrived.as_view(),
name="speakers.arrived",
),
url(
"^info/$",
speaker.InformationList.as_view(),
name="speakers.information.list",
),
url(
"^info/new$",
speaker.InformationDetail.as_view(),
name="speakers.information.create",
),
url(
"^info/(?P<pk>[0-9]+)/$",
speaker.InformationDetail.as_view(),
name="speakers.information.view",
),
url(
"^info/(?P<pk>[0-9]+)/delete$",
speaker.InformationDelete.as_view(),
name="speakers.information.delete",
),
url(
"^reviews/$",
review.ReviewDashboard.as_view(),
name="reviews.dashboard",
),
url(
"^reviews/regenerate/$",
review.RegenerateDecisionMails.as_view(),
name="reviews.regenerate",
),
url(
"^settings/$",
event.EventDetail.as_view(),
name="settings.event.view",
),
url(
"^settings/mail$",
event.EventMailSettings.as_view(),
name="settings.mail.view",
),
url(
"^settings/plugins$",
plugins.EventPluginsView.as_view(),
name="settings.plugins.select",
),
url(
"^settings/widget$",
event.WidgetSettings.as_view(),
name="settings.widget",
),
url(
"^settings/review/$",
event.EventReviewSettings.as_view(),
name="settings.review",
),
url(
"^settings/review/phase/(?P<pk>[0-9]+)/up$",
event.phase_move_up,
name="settings.review.phase.up",
),
url(
"^settings/review/phase/(?P<pk>[0-9]+)/down$",
event.phase_move_down,
name="settings.review.phase.down",
),
url(
"^settings/review/phase/(?P<pk>[0-9]+)/delete$",
event.PhaseDelete.as_view(),
name="settings.review.phasedelete",
),
url(
"^settings/review/phase/(?P<pk>[0-9]+)/activate$",
event.PhaseActivate.as_view(),
name="settings.review.phasedelete",
),
url(
"^schedule/$", schedule.ScheduleView.as_view(), name="schedule.main"
),
url(
"^schedule/export/$",
schedule.ScheduleExportView.as_view(),
name="schedule.export",
),
url(
"^schedule/export/trigger$",
schedule.ScheduleExportTriggerView.as_view(),
name="schedule.export.trigger",
),
url(
"^schedule/export/download$",
schedule.ScheduleExportDownloadView.as_view(),
name="schedule.export.download",
),
url(
"^schedule/release$",
schedule.ScheduleReleaseView.as_view(),
name="schedule.release",
),
url(
r"^schedule/quick/(?P<code>\w+)/$",
schedule.QuickScheduleView.as_view(),
name="schedule.quick",
),
url(
"^schedule/reset$",
schedule.ScheduleResetView.as_view(),
name="schedule.reset",
),
url(
"^schedule/toggle$",
schedule.ScheduleToggleView.as_view(),
name="schedule.toggle",
),
url(
"^schedule/resend_mails$",
schedule.ScheduleResendMailsView.as_view(),
name="schedule.resend_mails",
),
url(
"^schedule/rooms/$",
schedule.RoomList.as_view(),
name="schedule.rooms.list",
),
url(
"^schedule/rooms/new$",
schedule.RoomDetail.as_view(),
name="schedule.rooms.create",
),
url(
"^schedule/rooms/(?P<pk>[0-9]+)/$",
schedule.RoomDetail.as_view(),
name="schedule.rooms.view",
),
url(
"^schedule/rooms/(?P<pk>[0-9]+)/delete$",
schedule.RoomDelete.as_view(),
name="schedule.rooms.delete",
),
url(
"^schedule/rooms/(?P<pk>[0-9]+)/up$",
schedule.room_move_up,
name="schedule.rooms.up",
),
url(
"^schedule/rooms/(?P<pk>[0-9]+)/down$",
schedule.room_move_down,
name="schedule.rooms.down",
),
url(
"^schedule/api/talks/$",
schedule.TalkList.as_view(),
name="schedule.api.talks",
),
url(
"^schedule/api/talks/(?P<pk>[0-9]+)/$",
schedule.TalkUpdate.as_view(),
name="schedule.api.update",
),
url(
"^schedule/api/availabilities/(?P<talkid>[0-9]+)/(?P<roomid>[0-9]+)/$",
schedule.RoomTalkAvailabilities.as_view(),
name="schedule.api.availabilities",
),
]
),
),
]
| 40.54201
| 91
| 0.358771
| 1,657
| 24,609
| 5.234762
| 0.138805
| 0.08439
| 0.138344
| 0.021905
| 0.420221
| 0.316117
| 0.200599
| 0.068711
| 0.028706
| 0.021674
| 0
| 0.007509
| 0.512983
| 24,609
| 606
| 92
| 40.608911
| 0.716229
| 0
| 0
| 0.444444
| 0
| 0.004975
| 0.208745
| 0.113129
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.004975
| 0.008292
| 0
| 0.008292
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
37db93135f06b7cc7a06b9ea9f0839b0af335d54
| 6,889
|
py
|
Python
|
src/ITN/srmg/core/RiemannianRight.py
|
Yulv-git/Awesome-Ultrasound-Standard-Plane-Detection
|
2e35afaa891badf5a235b5d995102e4dc8a4cf0d
|
[
"MIT"
] | 1
|
2022-03-24T06:54:36.000Z
|
2022-03-24T06:54:36.000Z
|
src/ITN/srmg/core/RiemannianRight.py
|
Yulv-git/Awesome-Ultrasound-Standard-Plane-Detection
|
2e35afaa891badf5a235b5d995102e4dc8a4cf0d
|
[
"MIT"
] | null | null | null |
src/ITN/srmg/core/RiemannianRight.py
|
Yulv-git/Awesome-Ultrasound-Standard-Plane-Detection
|
2e35afaa891badf5a235b5d995102e4dc8a4cf0d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding=utf-8
'''
Author: Shuangchi He / Yulv
Email: [email protected]
Date: 2022-03-19 10:33:38
Motto: Entities should not be multiplied unnecessarily.
LastEditors: Shuangchi He
LastEditTime: 2022-03-23 00:52:55
FilePath: /Awesome-Ultrasound-Standard-Plane-Detection/src/ITN/srmg/core/RiemannianRight.py
Description: Modify here please
Init from https://github.com/yuanwei1989/plane-detection Author: Yuanwei Li (3 Oct 2018)
# Copyright (c) 2006-2017, Nina Milone, Bishesh Kanal, Benjamin Hou
# Copyright (c) 2006-2017, Imperial College of Science, Technology and Medicine
# Produced at Biomedical Image Analysis Group
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holders nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
Statistics on Riemannian Manifolds and Groups
---------------------------------------------
This is a set of codes to compare the computing of the different types of means on Lie groups.
These codes can be used to reproduce the experiments illustrated in the video developed for the
MICCAI Educational challenge 2014, available at: url of the video.
:Authors:
`Nina Miolane <website>`
`Bishesh Khanal <website>`
:Organization:
Asclepios Team, INRIA Sophia Antipolis.
:Version:
2017.07.05
Requirements
------------
* `Numpy 1.11 <http://www.numpy.org>`_
Notes
-----
----------
(1) Defining a mean on Lie group.
Nina Miolane. Medical Imaging. 2013. <hal-00938320>
'''
import numpy
import math
from srmg.common.group import *
from srmg.common.util import *
EPS = 1e-5
def riemExpR(a,f0,v):
"""
start: TODO
What the function does
clearer function name ?
Inputs description:
Outputs description:
end: TODO
Riemannian exponential and logarithm from any point f0 (for left- and right-invariant metric)
"""
f = grpCompose((riemExpIdR(a, numpy.linalg.lstsq(jR(f0),v)[0])), f0)
return f
def riemExpIdR(a,v):
"""
start: TODO
What the function does
clearer function name ?
Inputs description:
Outputs description:
end: TODO
Riemannian exponential and logarithm from Id (for left- and right-invariant metric)
"""
v=grpReg(-v);
f = numpy.zeros(6)
f[0:3] = v[0:3]
f[3:6] = a * v[3:6]
f = grpInv(f)
return f
def sigma2R(a,m,tabf,tabw):
"""
start: TODO
What the function does
clearer function name ?
Inputs description:
Outputs description:
end: TODO
"""
siz = tabf.shape[0]
if siz < 2:
print('Error: Calculating variance requires at least 2 points')
return 0
s = 0
for i in range(0,siz):
s = s + tabw[i] * normA2R(a,m,riemLogR(a,m,tabf[i,:]));
return s
def riemLogR(a,f0,f):
"""
DESCRIPTION
Attributes:
a: ?????
f0: ????
f: ????
Return:
v: ?????
"""
v=numpy.dot(jR(f0),riemLogIdR(a,grpCompose(f,grpInv(f0))))
return v
def riemLogIdR(a,f):
"""
DESCRIPTION
Attributes:
a: ?????
f: ????
Return:
v: ?????
"""
v = numpy.zeros(6)
v[0:3] = f[0:3]
v[3:6] = numpy.dot(rotMat(-f[0:3]),f[3:6]);
return v
def qR(a,f):
"""
Left- and right- invariant inner product in the principal chart (propagation of Frobenius inner product)
Attributes:
a: ?????
f: ????
Return:
g: ?????
"""
f = grpReg(f)
g0 = numpy.zeros([6,6])
g0[0:3,0:3] = numpy.eye(3)
g0[3:6,3:6] = a * numpy.eye(3)
g = numpy.dot(numpy.dot(numpy.linalg.inv(jR(f).T) , g0) , numpy.linalg.inv(jR(f)))
return g
def jR(f):
"""
Differentials of the left and right translations for SO(3) in the principal chart
Attributes:
r: ?????
Return:
Jl: ?????
"""
#f = makeColVector(f,6); # unnecessary if 1D
f = grpReg(f);
Jr = numpy.zeros([6,6])
Jr[0:3,0:3] = jRotR(f[0:3]);
Jr[3:6,0:3] = -skew(f[3:6]);
Jr[3:6,3:6] = numpy.eye(3);
return Jr
def normA2R(a,f,v):
"""
This function calculates the normalised left
Attributes:
a: ?????
f: ?????
v: ?????
Return:
n: normalised vector
"""
v=grpReg(v);
n=numpy.dot(numpy.dot(v.T,qR(a,f)),v);
return n
def frechetR(a,tabf,tabw):
"""
This function computes the frechet-L mean
Attributes:
img: The fixed image that will be transformed (simpleitk type)
a: ?????
tabf: SE3 data points (Nx6 vector)
tabw: data point weights (Nx1 vector)
Return:
m: The mean
"""
siz = tabf.shape[0]
if siz < 2:
print('Error: Calculating mean requires at least 2 points')
m = tabf[0,:]
# Iteration 0
mbis=m;
print('mbisR=' + str(mbis))
aux=numpy.zeros(6);
for i in range (0,siz):
aux=aux+tabw[i]*riemLogR(a,mbis,tabf[i,:]);
m=riemExpR(a,mbis,aux);
# Iteration 1 until converges
while (normA2R(a,mbis,riemLogR(a,mbis,m))>EPS*sigma2R(a,mbis,tabf,tabw)):
mbis=m;
print('mbisR=' + str(mbis))
aux=numpy.zeros(6);
for i in range (0,siz):
aux=aux+tabw[i]*riemLogR(a,mbis,tabf[i,:]);
m=riemExpR(a,mbis,aux);
return m
| 27.556
| 108
| 0.609958
| 953
| 6,889
| 4.408185
| 0.376705
| 0.005237
| 0.015711
| 0.011426
| 0.240895
| 0.197096
| 0.179243
| 0.179243
| 0.179243
| 0.179243
| 0
| 0.034612
| 0.266076
| 6,889
| 249
| 109
| 27.666667
| 0.796282
| 0.650167
| 0
| 0.304348
| 0
| 0
| 0.05653
| 0
| 0
| 0
| 0
| 0.024096
| 0
| 1
| 0.130435
| false
| 0
| 0.057971
| 0
| 0.333333
| 0.057971
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
37de891f427c0291be7aba179849ea2f6a86e5c6
| 281
|
py
|
Python
|
Python/Programming Basics/Simple Calculations/17. Daily Earnings.py
|
teodoramilcheva/softuni-software-engineering
|
98dc9faa66f42570f6538fd7ef186d2bd1d39bff
|
[
"MIT"
] | null | null | null |
Python/Programming Basics/Simple Calculations/17. Daily Earnings.py
|
teodoramilcheva/softuni-software-engineering
|
98dc9faa66f42570f6538fd7ef186d2bd1d39bff
|
[
"MIT"
] | null | null | null |
Python/Programming Basics/Simple Calculations/17. Daily Earnings.py
|
teodoramilcheva/softuni-software-engineering
|
98dc9faa66f42570f6538fd7ef186d2bd1d39bff
|
[
"MIT"
] | null | null | null |
workdays = float(input())
daily_tips = float(input())
exchange_rate = float(input())
salary = workdays * daily_tips
annual_income = salary * 12 + salary * 2.5
net_income = annual_income - annual_income * 25 / 100
result = net_income / 365 * exchange_rate
print('%.2f' % result)
| 23.416667
| 53
| 0.711744
| 39
| 281
| 4.897436
| 0.512821
| 0.157068
| 0.188482
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.055085
| 0.160142
| 281
| 11
| 54
| 25.545455
| 0.754237
| 0
| 0
| 0
| 0
| 0
| 0.014235
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.125
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
37e4f2c4b90817314cd77bae4c4800a1c5a1cfd8
| 11,933
|
py
|
Python
|
alerter/src/monitorables/nodes/chainlink_node.py
|
SimplyVC/panic
|
2f5c327ea0d14b6a49dc8f4599a255048bc2ff6d
|
[
"Apache-2.0"
] | 41
|
2019-08-23T12:40:42.000Z
|
2022-03-28T11:06:02.000Z
|
alerter/src/monitorables/nodes/chainlink_node.py
|
SimplyVC/panic
|
2f5c327ea0d14b6a49dc8f4599a255048bc2ff6d
|
[
"Apache-2.0"
] | 147
|
2019-08-30T22:09:48.000Z
|
2022-03-30T08:46:26.000Z
|
alerter/src/monitorables/nodes/chainlink_node.py
|
SimplyVC/panic
|
2f5c327ea0d14b6a49dc8f4599a255048bc2ff6d
|
[
"Apache-2.0"
] | 3
|
2019-09-03T21:12:28.000Z
|
2021-08-18T14:27:56.000Z
|
from datetime import datetime
from typing import Optional, Dict, List, Union
from schema import Schema, Or
from src.monitorables.nodes.node import Node
from src.utils.exceptions import InvalidDictSchemaException
class ChainlinkNode(Node):
def __init__(self, node_name: str, node_id: str, parent_id: str) -> None:
super().__init__(node_name, node_id, parent_id)
# Metrics
self._went_down_at_prometheus = None
self._current_height = None
self._total_block_headers_received = None
self._max_pending_tx_delay = None
self._process_start_time_seconds = None
self._total_gas_bumps = None
self._total_gas_bumps_exceeds_limit = None
self._no_of_unconfirmed_txs = None
self._total_errored_job_runs = None
self._current_gas_price_info = {
'percentile': None,
'price': None,
}
self._eth_balance_info = {}
# This variable stores the url of the source used to get prometheus node
# data. Note that this had to be done because multiple prometheus
# sources can be associated with the same node, where at the same time
# only one source is available, and sources switch from time to time.
self._last_prometheus_source_used = None
# This stores the timestamp of the last successful monitoring round.
self._last_monitored_prometheus = None
@property
def is_down_prometheus(self) -> bool:
return self._went_down_at_prometheus is not None
@property
def went_down_at_prometheus(self) -> Optional[float]:
return self._went_down_at_prometheus
@property
def current_height(self) -> Optional[int]:
return self._current_height
@property
def total_block_headers_received(self) -> Optional[int]:
return self._total_block_headers_received
@property
def max_pending_tx_delay(self) -> Optional[int]:
return self._max_pending_tx_delay
@property
def process_start_time_seconds(self) -> Optional[float]:
return self._process_start_time_seconds
@property
def total_gas_bumps(self) -> Optional[int]:
return self._total_gas_bumps
@property
def total_gas_bumps_exceeds_limit(self) -> Optional[int]:
return self._total_gas_bumps_exceeds_limit
@property
def no_of_unconfirmed_txs(self) -> Optional[int]:
return self._no_of_unconfirmed_txs
@property
def total_errored_job_runs(self) -> Optional[int]:
return self._total_errored_job_runs
@property
def current_gas_price_info(self) -> Dict[str, Optional[float]]:
return self._current_gas_price_info
@property
def eth_balance_info(self) -> Dict[str, Union[str, float]]:
return self._eth_balance_info
@property
def last_prometheus_source_used(self) -> Optional[str]:
return self._last_prometheus_source_used
@property
def last_monitored_prometheus(self) -> Optional[float]:
return self._last_monitored_prometheus
@staticmethod
def get_int_prometheus_metric_attributes() -> List[str]:
"""
:return: A list of all variable names representing integer prometheus
: metrics.
"""
return [
'current_height',
'total_block_headers_received',
'max_pending_tx_delay', 'total_gas_bumps',
'total_gas_bumps_exceeds_limit', 'no_of_unconfirmed_txs',
'total_errored_job_runs'
]
@staticmethod
def get_float_prometheus_metric_attributes() -> List[str]:
"""
:return: A list of all variable names representing float prometheus
: metrics.
"""
return [
'went_down_at_prometheus', 'process_start_time_seconds',
'last_monitored_prometheus'
]
@staticmethod
def get_dict_prometheus_metric_attributes() -> List[str]:
"""
:return: A list of all variable names representing dict prometheus
: metrics.
"""
return ['current_gas_price_info', 'eth_balance_info']
@staticmethod
def get_str_prometheus_metric_attributes() -> List[str]:
"""
:return: A list of all variable names representing string prometheus
: metrics.
"""
return ['last_prometheus_source_used']
def get_all_prometheus_metric_attributes(self) -> List[str]:
"""
:return: A list of all variable names representing prometheus metrics
"""
str_prometheus_metric_attributes = \
self.get_str_prometheus_metric_attributes()
int_prometheus_metric_attributes = \
self.get_int_prometheus_metric_attributes()
float_prometheus_metric_attributes = \
self.get_float_prometheus_metric_attributes()
dict_prometheus_metric_attributes = \
self.get_dict_prometheus_metric_attributes()
return [
*str_prometheus_metric_attributes,
*int_prometheus_metric_attributes,
*float_prometheus_metric_attributes,
*dict_prometheus_metric_attributes
]
def get_int_metric_attributes(self) -> List[str]:
"""
:return: A list of all variable names representing int metrics.
"""
int_prometheus_metric_attributes = \
self.get_int_prometheus_metric_attributes()
return [*int_prometheus_metric_attributes]
def get_float_metric_attributes(self) -> List[str]:
"""
:return: A list of all variable names representing float metrics.
"""
float_prometheus_metric_attributes = \
self.get_float_prometheus_metric_attributes()
return [*float_prometheus_metric_attributes]
def get_dict_metric_attributes(self) -> List[str]:
"""
:return: A list of all variable names representing dict metrics.
"""
dict_prometheus_metric_attributes = \
self.get_dict_prometheus_metric_attributes()
return [*dict_prometheus_metric_attributes]
def get_str_metric_attributes(self) -> List[str]:
"""
:return: A list of all variable names representing str metrics.
"""
str_prometheus_metric_attributes = \
self.get_str_prometheus_metric_attributes()
return [*str_prometheus_metric_attributes]
def get_all_metric_attributes(self) -> List[str]:
"""
:return: A list of all variable names representing metrics
"""
prometheus_metric_attributes = \
self.get_all_prometheus_metric_attributes()
return [*prometheus_metric_attributes]
def set_went_down_at_prometheus(
self, went_down_at_prometheus: Optional[float]) -> None:
self._went_down_at_prometheus = went_down_at_prometheus
def set_prometheus_as_down(self, downtime: Optional[float]) -> None:
"""
This function sets the node's prometheus interface as down. It sets the
time that the interface was initially down to the parameter 'downtime'
if it is not None, otherwise it sets it to the current timestamp.
:param downtime:
:return:
"""
if downtime is None:
self.set_went_down_at_prometheus(datetime.now().timestamp())
else:
self.set_went_down_at_prometheus(downtime)
def set_prometheus_as_up(self) -> None:
"""
This function sets a node's prometheus interface as up. A node's
interface is said to be up if went_down_at_prometheus is None.
:return: None
"""
self.set_went_down_at_prometheus(None)
def set_current_height(self, new_height: Optional[int]) -> None:
self._current_height = new_height
def set_total_block_headers_received(
self, new_total_block_headers_received: Optional[int]) -> None:
self._total_block_headers_received = new_total_block_headers_received
def set_max_pending_tx_delay(
self, new_max_pending_tx_delay: Optional[int]) -> None:
self._max_pending_tx_delay = new_max_pending_tx_delay
def set_process_start_time_seconds(
self, new_process_start_time_seconds: Optional[float]) -> None:
self._process_start_time_seconds = new_process_start_time_seconds
def set_total_gas_bumps(self, new_total_gas_bumps: Optional[int]) -> None:
self._total_gas_bumps = new_total_gas_bumps
def set_total_gas_bumps_exceeds_limit(
self, new_total_gas_bumps_exceeds_limit: Optional[int]) -> None:
self._total_gas_bumps_exceeds_limit = new_total_gas_bumps_exceeds_limit
def set_no_of_unconfirmed_txs(
self, new_no_of_unconfirmed_txs: Optional[int]) -> None:
self._no_of_unconfirmed_txs = new_no_of_unconfirmed_txs
def set_total_errored_job_runs(
self, new_total_errored_job_runs: Optional[int]) -> None:
self._total_errored_job_runs = new_total_errored_job_runs
def set_current_gas_price_info(self, new_percentile: Optional[float],
new_price: Optional[float]) -> None:
"""
This method sets the current_gas_price_info dict based on the new
percentile and price. This is done in this way to protect the Dict
schema.
:param new_percentile: The new percentile to be stored
:param new_price: The new gas to be stored
:return: None
"""
self._current_gas_price_info['percentile'] = new_percentile
self._current_gas_price_info['price'] = new_price
@staticmethod
def _new_eth_balance_info_valid(new_eth_balance_info: Dict) -> bool:
"""
This method checks that the new eth_balance_info dict obeys the required
schema.
:param new_eth_balance_info: The dict to check
:return: True if the dict obeys the required schema
: False otherwise
"""
schema = Schema(Or({
'address': str,
'balance': float,
'latest_usage': float,
}, {}))
return schema.is_valid(new_eth_balance_info)
def set_eth_balance_info(
self, new_eth_balance_info: Dict[str, Union[str, float]]) -> None:
"""
This method sets the new_eth_balance_info. It first checks that the new
dict obeys the required schema. If not, an InvalidDictSchemaException is
raised.
:param new_eth_balance_info: The new eth_balance_info to store.
:return: None
"""""
if self._new_eth_balance_info_valid(new_eth_balance_info):
self._eth_balance_info = new_eth_balance_info
else:
raise InvalidDictSchemaException('new_eth_balance_info')
def set_last_prometheus_source_used(
self, new_last_prometheus_source_used: Optional[str]) -> None:
self._last_prometheus_source_used = new_last_prometheus_source_used
def set_last_monitored_prometheus(
self, new_last_monitored_prometheus: Optional[float]) -> None:
self._last_monitored_prometheus = new_last_monitored_prometheus
def reset(self) -> None:
"""
This method resets all metrics to their initial state
:return: None
"""
self.set_went_down_at_prometheus(None)
self.set_current_height(None)
self.set_total_block_headers_received(None)
self.set_max_pending_tx_delay(None)
self.set_process_start_time_seconds(None)
self.set_total_gas_bumps(None)
self.set_total_gas_bumps_exceeds_limit(None)
self.set_no_of_unconfirmed_txs(None)
self.set_total_errored_job_runs(None)
self.set_current_gas_price_info(None, None)
self.set_eth_balance_info({})
self.set_last_prometheus_source_used(None)
self.set_last_monitored_prometheus(None)
| 37.407524
| 80
| 0.67636
| 1,466
| 11,933
| 5.090723
| 0.108458
| 0.079325
| 0.111483
| 0.037518
| 0.646791
| 0.404395
| 0.257939
| 0.223771
| 0.192952
| 0.171513
| 0
| 0
| 0.252661
| 11,933
| 318
| 81
| 37.525157
| 0.836847
| 0.185704
| 0
| 0.217617
| 0
| 0
| 0.039777
| 0.024369
| 0
| 0
| 0
| 0
| 0
| 1
| 0.217617
| false
| 0
| 0.025907
| 0.072539
| 0.378238
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
37eb8f04b291f998b42a8e819f9ce512c5fabcfb
| 277
|
py
|
Python
|
code/doubanUtils.py
|
verazuo/douban_crawler
|
042e870c74df8b6f4eb1cd2af3b90d5b6699ab8f
|
[
"MIT"
] | 1
|
2021-04-03T12:41:29.000Z
|
2021-04-03T12:41:29.000Z
|
code/doubanUtils.py
|
verazuo/douban_crawler
|
042e870c74df8b6f4eb1cd2af3b90d5b6699ab8f
|
[
"MIT"
] | null | null | null |
code/doubanUtils.py
|
verazuo/douban_crawler
|
042e870c74df8b6f4eb1cd2af3b90d5b6699ab8f
|
[
"MIT"
] | null | null | null |
import requests
import re
from bs4 import BeautifulSoup
def nextPageLink(sess,soup,page,head=""):
NextPage=soup.find(class_='next').link.get('href')
req=sess.get(head + NextPage)
print(f'第{page}页:',req.status_code)
return BeautifulSoup(req.text,'html.parser')
| 27.7
| 54
| 0.722022
| 41
| 277
| 4.829268
| 0.731707
| 0.121212
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004115
| 0.122744
| 277
| 10
| 55
| 27.7
| 0.8107
| 0
| 0
| 0
| 0
| 0
| 0.100719
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.375
| 0
| 0.625
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
37ebf327c9046920009b45cecc899607501e8a37
| 2,530
|
py
|
Python
|
sdk/communication/azure-communication-phonenumbers/azure/communication/phonenumbers/_generated/models/__init__.py
|
RAY-316/azure-sdk-for-python
|
4f7790deaf46c6f4e965f099f36eb73a7954ad5b
|
[
"MIT"
] | null | null | null |
sdk/communication/azure-communication-phonenumbers/azure/communication/phonenumbers/_generated/models/__init__.py
|
RAY-316/azure-sdk-for-python
|
4f7790deaf46c6f4e965f099f36eb73a7954ad5b
|
[
"MIT"
] | null | null | null |
sdk/communication/azure-communication-phonenumbers/azure/communication/phonenumbers/_generated/models/__init__.py
|
RAY-316/azure-sdk-for-python
|
4f7790deaf46c6f4e965f099f36eb73a7954ad5b
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import AcquiredPhoneNumbers
from ._models_py3 import CommunicationError
from ._models_py3 import CommunicationErrorResponse
from ._models_py3 import PhoneNumberCapabilities
from ._models_py3 import PhoneNumberCapabilitiesRequest
from ._models_py3 import PhoneNumberCost
from ._models_py3 import PhoneNumberOperation
from ._models_py3 import PhoneNumberPurchaseRequest
from ._models_py3 import PhoneNumberSearchRequest
from ._models_py3 import PhoneNumberSearchResult
from ._models_py3 import PurchasedPhoneNumber
except (SyntaxError, ImportError):
from ._models import AcquiredPhoneNumbers # type: ignore
from ._models import CommunicationError # type: ignore
from ._models import CommunicationErrorResponse # type: ignore
from ._models import PhoneNumberCapabilities # type: ignore
from ._models import PhoneNumberCapabilitiesRequest # type: ignore
from ._models import PhoneNumberCost # type: ignore
from ._models import PhoneNumberOperation # type: ignore
from ._models import PhoneNumberPurchaseRequest # type: ignore
from ._models import PhoneNumberSearchRequest # type: ignore
from ._models import PhoneNumberSearchResult # type: ignore
from ._models import PurchasedPhoneNumber # type: ignore
from ._phone_numbers_client_enums import (
BillingFrequency,
PhoneNumberAssignmentType,
PhoneNumberCapabilityType,
PhoneNumberOperationStatus,
PhoneNumberOperationType,
PhoneNumberType,
)
__all__ = [
'AcquiredPhoneNumbers',
'CommunicationError',
'CommunicationErrorResponse',
'PhoneNumberCapabilities',
'PhoneNumberCapabilitiesRequest',
'PhoneNumberCost',
'PhoneNumberOperation',
'PhoneNumberPurchaseRequest',
'PhoneNumberSearchRequest',
'PhoneNumberSearchResult',
'PurchasedPhoneNumber',
'BillingFrequency',
'PhoneNumberAssignmentType',
'PhoneNumberCapabilityType',
'PhoneNumberOperationStatus',
'PhoneNumberOperationType',
'PhoneNumberType',
]
| 40.806452
| 94
| 0.729644
| 202
| 2,530
| 8.935644
| 0.351485
| 0.121884
| 0.079224
| 0.115789
| 0.289197
| 0.145152
| 0
| 0
| 0
| 0
| 0
| 0.005644
| 0.159684
| 2,530
| 61
| 95
| 41.47541
| 0.843368
| 0.235178
| 0
| 0
| 0
| 0
| 0.196242
| 0.131524
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.470588
| 0
| 0.470588
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
5302a9b7f4d36ed1d8c39a9e74b3775344df1bd4
| 2,028
|
py
|
Python
|
HoursSelect.py
|
Maxahoy/ClassVolumeSilencer
|
9a05f9dd4efbbbddc74377a27027fa40b2167d02
|
[
"MIT"
] | null | null | null |
HoursSelect.py
|
Maxahoy/ClassVolumeSilencer
|
9a05f9dd4efbbbddc74377a27027fa40b2167d02
|
[
"MIT"
] | null | null | null |
HoursSelect.py
|
Maxahoy/ClassVolumeSilencer
|
9a05f9dd4efbbbddc74377a27027fa40b2167d02
|
[
"MIT"
] | null | null | null |
"""
This is how I'm gonna schedule hours
IDEA: import the format example file that I'm using and is saved in the same directory
"""
import csv
import pprint
from tkinter import *
from tkinter.filedialog import askopenfilename
import StringProcessing
def selectHoursFile():
Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing
filename = askopenfilename() # show an "Open" dialog box and return the path to the selected file
print(filename)
return filename
"""
Receives a file location, opens the csv
The format looks like this:
CLASS STARTS,Class name (optional),MON,TUES,WED,THURS,FRI,,CLASS ENDS,MON,TUES,WED,THURS,FRI
1, Stats, 10:20:00 AM,,10:20:00 AM,,10:20:00 AM,,,11:15:00 AM,,11:15:00 AM,,11:15:00 AM
2,,,09:35:00 AM,,09:35:00 AM,,,,,10:55:00 AM,,10:55:00 AM,
3,,,11:30:00 AM,11:30:00 AM,11:30:00 AM,11:30:00 AM,,,,12:25:00 PM,12:25:00 PM,12:25:00 PM,12:25:00 PM
4,,,,,,09:10:00 AM,,,,,,,10:05:00 AM
5,,12:00:00 PM,01:00:00 PM,01:00:00 PM,01:00:00 PM,01:00:00 PM,,,,04:30:00 PM,04:30:00 PM,04:30:00 PM,04:30:00 PM
6,,,,,,,,,,,,,
7,,,,,,,,,,,,,
8,,,,,,,,,,,,,
9,,,,,,,,,,,,,
10,,,,,,,,,,,,,
11,,,,,,,,,,,,,
12,,,,,,,,,,,,,
13,,,,,,,,,,,,,
14,,,,,,,,,,,,,
15,,,,,,,,,,,,,
"""
def interpretCSVFormat(csvFile):
#first open the file with the filepath
classList = dict()
with open(csvFile, "r") as csvOpen:
#next populate a temporary dictionary for the classes
tempDict = dict()
classID = 0
rowReader = csv.reader(csvOpen, delimiter=',', quotechar="'")
for row in rowReader:
#dictionary format: class ID::string of class days
classTimes = row
#print(row)
tempDict[classID] = str(classTimes)
classID = classID + 1
print(StringProcessing.lineList(str(classTimes)))
del tempDict[0]
pp = pprint.PrettyPrinter(indent=4)
#pp.pprint(tempDict)
#TODO: make the sections using ClassScheduleStorage
| 28.971429
| 113
| 0.607988
| 318
| 2,028
| 3.877358
| 0.430818
| 0.051906
| 0.029197
| 0.025953
| 0.193025
| 0.163828
| 0.147607
| 0.147607
| 0.131387
| 0.131387
| 0
| 0.123833
| 0.207594
| 2,028
| 69
| 114
| 29.391304
| 0.643435
| 0.233728
| 0
| 0
| 0
| 0
| 0.003831
| 0
| 0
| 0
| 0
| 0.014493
| 0
| 1
| 0.086957
| false
| 0
| 0.217391
| 0
| 0.347826
| 0.173913
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
53066ef029d7bd3ef7be8bb9baad9578898d6c71
| 2,325
|
py
|
Python
|
projection.py
|
ogawan/nisa
|
d758e41e4983cc35477e81d944689b0226f00ef5
|
[
"MIT"
] | null | null | null |
projection.py
|
ogawan/nisa
|
d758e41e4983cc35477e81d944689b0226f00ef5
|
[
"MIT"
] | null | null | null |
projection.py
|
ogawan/nisa
|
d758e41e4983cc35477e81d944689b0226f00ef5
|
[
"MIT"
] | null | null | null |
from matplotlib import pyplot as plt
def nisa_projection(years=30, annual_deposit=80, initial_budget=100):
"""
This is a function to plot deposit of TSUMITATE NISA
Parameters:
---------------
years: integer
How many years are you going to continue?
annual_depoist: integer
Annual deposit into the NISA account.
initial_budget: integer
The initial budget.
Returns:
--------------
matplotlib figure
"""
for j in [1.00,1.01, 1.02, 1.03, 1.04, 1.05]:
original = initial_budget
ganbon = []
box = []
for i in range(0,years):
if i == 0:
box.append(original)
ganbon.append(original)
gan = ganbon[-1] + annual_deposit
original = original * j + annual_deposit
if i > 0:
box.append(original)
ganbon.append(gan)
plt.scatter(list(range(0,years)), box)
plt.legend(["0%", "1%", "2%", "3%", "4%", "5%"])
plt.xlabel("Years")
plt.ylabel("Money (Man yen)")
# Reference: https://plotly.com/python/figure-labels/
import pandas as pd
import plotly.graph_objects as go
def nisa_projection_plotly(years=30, annual_deposit=80, initial_budget=100):
"""
This is a function to plot deposit of TSUMITATE NISA
Parameters:
---------------
years: integer
How many years are you going to continue?
annual_depoist: integer
Annual deposit into the NISA account.
initial_budget: integer
The initial budget.
Returns:
--------------
plotly figures.
"""
dic_ = {}
for j in [1.00,1.01, 1.02, 1.03, 1.04, 1.05]:
original = initial_budget
ganbon = []
box = []
for i in range(0,years):
if i == 0:
box.append(original)
ganbon.append(original)
gan = ganbon[-1] + annual_deposit
original = original * j + annual_deposit
if i > 0:
box.append(original)
ganbon.append(gan)
dic_["{} %".format(str(j)[-1])] = box
df = pd.DataFrame(dic_)
fig = go.Figure()
for i in df.columns:
fig.add_trace(go.Scatter(x=df.index, y=df[i],name=i))
fig.update_layout(
title="NISA PLOT",
xaxis_title="Years",
yaxis_title="Man Yen",
width=500,
height=400,
)
fig.show()
nisa_projection(30, 80, 100)
nisa_projection_plotly(30, 80, 100)
| 21.728972
| 76
| 0.594409
| 317
| 2,325
| 4.271293
| 0.33123
| 0.076809
| 0.011817
| 0.020679
| 0.636632
| 0.636632
| 0.636632
| 0.636632
| 0.636632
| 0.636632
| 0
| 0.050058
| 0.261075
| 2,325
| 106
| 77
| 21.933962
| 0.738068
| 0.287742
| 0
| 0.509804
| 0
| 0
| 0.036008
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039216
| false
| 0
| 0.058824
| 0
| 0.098039
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
53104f17c4720a21e638155abf65cadc6cce2788
| 24,765
|
py
|
Python
|
src/poretitioner/utils/filtering.py
|
uwmisl/poretitioner
|
0ff9f67a3b25fdcb460b11c970b2ed366da07da7
|
[
"MIT"
] | 2
|
2021-03-11T21:27:16.000Z
|
2021-03-18T00:58:22.000Z
|
src/poretitioner/utils/filtering.py
|
uwmisl/poretitioner
|
0ff9f67a3b25fdcb460b11c970b2ed366da07da7
|
[
"MIT"
] | 12
|
2021-02-19T19:36:05.000Z
|
2021-03-24T15:38:02.000Z
|
src/poretitioner/utils/filtering.py
|
uwmisl/poretitioner
|
0ff9f67a3b25fdcb460b11c970b2ed366da07da7
|
[
"MIT"
] | null | null | null |
"""
=========
filtering.py
=========
This module provides more granular filtering for captures.
You can customize your own filters too.
"""
from __future__ import annotations
import re
from abc import ABC, ABCMeta, abstractmethod
from dataclasses import dataclass
from json import JSONEncoder
from pathlib import PosixPath
from typing import (
Any,
Dict,
Iterable,
Mapping,
NewType,
Optional,
Protocol,
Type,
TypedDict,
Union,
)
import h5py
import numpy as np
from h5py import File as Fast5File
from ..hdf5 import (
HasFast5,
HDF5_Group,
HDF5_GroupSerialableDataclass,
HDF5_GroupSerializable,
HDF5_GroupSerializing,
IsAttr,
)
from ..logger import Logger, getLogger
from ..signals import Capture
from .core import NumpyArrayLike, PathLikeOrString, ReadId, stripped_by_keys
from .plugin import Plugin
CaptureOrTimeSeries = Union[Capture, NumpyArrayLike]
# Unique identifier for a collection of filters (e.g. "ProfJeffsAwesomeFilters")
FilterSetId = NewType("FilterSetId", str)
# Unique identifier for an individual filter (e.g. "min_frac")
FilterName = NewType("FilterName", str)
__all__ = [
"does_pass_filters",
"get_filters",
"FilterName",
"FilterSetId",
"FilterConfig",
"Filter",
"Filters",
"DEFAULT_FILTER_PLUGINS",
"FilterSet",
"FilterConfigs",
"FilterPlugin",
"PATH",
]
@dataclass(frozen=True)
class FILTER_PATH:
ROOT = f"/Filter/"
@classmethod
def filter_set_path(cls, filter_set_id: FilterSetId) -> str:
filter_path = str(PosixPath(FILTER_PATH.ROOT, filter_set_id))
return filter_path
@classmethod
def filter_set_pass_path(cls, filter_set_id: FilterSetId) -> str:
pass_path = str(PosixPath(FILTER_PATH.filter_set_path(filter_set_id), "pass"))
return pass_path
@classmethod
def filter_set_pass_path_for_read_id(cls, filter_set_id: FilterSetId, read_id: ReadId) -> str:
pass_path = str(PosixPath(FILTER_PATH.filter_set_pass_path(filter_set_id), read_id))
return pass_path
class FilterConfig(TypedDict):
"""A blueprint for how to construct a FilterPlugin.
Contains a name, and any number of other attributes
Note on terminology:
- FilterConfig: A high-level description of a filter.
- FilterPlugin: An actual, callable, implementation of a FilterConfig.
For custom plugins, make sure "filepath" is an attribute that points to the file to laod
"""
# Mapping of a FilterName to filter configurations.
FilterConfigs = NewType("FilterConfigs", Dict[FilterName, FilterConfig])
# TODO: Filter Plugin should check that name is unique. https://github.com/uwmisl/poretitioner/issues/91
class FilterPlugin(Plugin):
"""
Abstract class for Filter plugins. To write your own filter, subclass this abstract
class and implement the `apply` method and `name` property.
"""
@classmethod
@abstractmethod
def name(cls) -> str:
"""Unique name for this filter.
Make sure it doesn't conflict with any existing names.
Returns
-------
str
The unique name for this filter (e.g. "fourier_transform").
Raises
------
NotImplementedError
Raised if this filter is called without this name method being implemented.
"""
raise NotImplementedError(
"'name' class method not implemented for filter. This class method should return a unique name for this filter."
)
@abstractmethod
def apply(self, capture: CaptureOrTimeSeries) -> bool:
"""Returns True if a capture passes a given filter criteria.
For instance, a range filter would check that a capture's summary statistsics lie within a given range.
Parameters
----------
capture : np.typing.ArrayLike
Time series capture to filter.
Returns
-------
bool
Whether this capture passes the filter.
Raises
------
NotImplementedError
Raised when the filter method isn't implemented by the consuming Filter class
"""
raise NotImplementedError(
"'apply' method not implemented for filter. This method should return True if and only if applied to a capture that meets the filter criterion. For instance, "
)
def __call__(self, capture: CaptureOrTimeSeries) -> bool:
"""Apply the filter.
Defining `__call__` lets us do nice things like:
class MyCustomFilter(FilterPlugin):
def apply(capture):
# ...
pass
# Later in code where filtering is done....
valid_captures = []
filters = [ MyCustomFilter(), AnotherCustomFilter(), ... ]
valid_captures = [capture for capture in captures if all([filt(capture) for filt in filters])]
for capture in captures: # You'd want to parallelize this in a real life example...
for filt in filters:
filtered_captures = filt(capture).
Parameters
----------
capture : CaptureOrTimeSeries
Capture to filter.
Returns
-------
bool
Whether this capture passes the filter.
"""
result = self.apply(capture)
return result
RANGE_FILTER_DEFAULT_MINIMUM: float = -np.inf
RANGE_FILTER_DEFAULT_MAXIMUM: float = np.inf
class RangeFilter(FilterPlugin):
def __init__(self, minimum: Optional[float] = None, maximum: Optional[float] = None):
"""A filter that filters based on whether a signal falls between a maximum and a minimum.
Parameters
----------
minimum : float, optional
The smallest value this signal should be allowed to take (inclusive), by default RangeFilter.DEFAULT_MINIMUM
maximum : float, optional
The largest value this signal should be allowed to take (inclusive), by default RangeFilter.DEFAULT_MAXIMUM
"""
self.minimum = minimum if minimum is not None else RANGE_FILTER_DEFAULT_MINIMUM
self.maximum = maximum if maximum is not None else RANGE_FILTER_DEFAULT_MAXIMUM
def extract(self, capture: CaptureOrTimeSeries) -> NumpyArrayLike:
"""Extracts a summary statistic from the capture (e.g. mean, length, standard deviation).
Identity operation by default (just returns the capture).
You can use this function to transform the data in a useful way before processing it (e.g.
getting the mean value of a capture before filtering based on that mean.)
Note: If we picture the filtering workflow as an ETL (Extract-Transform-Load) pipeline, this would be the "transform"
(take data, modify it for a later purpose), but I feel that "transform" is perhaps a misleading function name in this context.
Parameters
----------
capture : CaptureOrTimeSeries
Capture from which to extract data.
"""
try:
signal = capture.fractionalized()
except AttributeError:
signal = capture
else:
signal = capture
return signal
# signal = getattr(capture, Capture.fractionalized.__name__, capture)
def is_in_range(self, value: Union[NumpyArrayLike, float]) -> bool:
try:
# If the value is just a float, we can use this handy syntax:
return self.minimum <= value <= self.maximum
except ValueError:
# But we're not allowed to use that syntax on numpy arrays.
return all(np.logical_and(self.minimum <= value, value <= self.maximum))
def apply(self, signal):
value = self.extract(signal)
return self.is_in_range(value)
class StandardDeviationFilter(RangeFilter):
"""Filters for captures with standard deviations in some range."""
@classmethod
def name(cls) -> str:
return "stdv"
def extract(self, capture: CaptureOrTimeSeries):
signal = super().extract(capture)
return np.std(signal)
class MeanFilter(RangeFilter):
"""Filters for captures with an arithmetic mean within a range."""
@classmethod
def name(cls) -> str:
return "mean"
def extract(self, capture: CaptureOrTimeSeries):
signal = super().extract(capture)
return np.mean(signal)
class MedianFilter(RangeFilter):
"""Filters for captures with a median within a range."""
@classmethod
def name(cls) -> str:
return "median"
def extract(self, capture: CaptureOrTimeSeries):
signal = super().extract(capture)
return np.median(signal)
class MinimumFilter(RangeFilter):
"""Filters for captures with a minimum within a range."""
@classmethod
def name(cls) -> str:
return "min"
def extract(self, capture: CaptureOrTimeSeries):
signal = super().extract(capture)
return np.min(signal)
class MaximumFilter(RangeFilter):
"""Filters for captures with a maximum within a range."""
@classmethod
def name(cls) -> str:
return "max"
def extract(self, capture: CaptureOrTimeSeries):
signal = super().extract(capture)
return np.max(signal)
class LengthFilter(RangeFilter):
"""Filters captures based on their length."""
@classmethod
def name(cls) -> str:
return "length"
def extract(self, capture: CaptureOrTimeSeries):
signal = super().extract(capture)
return len(signal)
class EjectedFilter(FilterPlugin):
"""Filters captures based on whether they were ejected from the pore."""
@classmethod
def name(cls) -> str:
return "ejected"
def extract(self, capture: Capture):
return capture.ejected
"""
How to Create Your Own Custom Filter:
Need more advanced filtering than what we provide out of the box? No problem.
Create your own custom filter by inheriting from the FilterPlugin class.
For this example, let's do something complex. Say you only want to examine captures
that have more than 5 samples with a hyperbolic tangent greater than some threshold.
That means our custom filter's `apply` function should return True if and only if
the signal has more than 5 samples greater than the threshold, after taking the hyperbolic tangent in `extract`.
"""
class MyCustomFilter(FilterPlugin):
threshold: float = 0.5 # Totally arbitrary.
def name(self):
return "foo"
def extract(self, capture):
# Do the transformations here, or pre-process it before the filter.
# Gets the hyperbolic tangent of the signal.
extracted = np.tanh(capture.signal)
return extracted
def apply(self, signal):
# Only return true if more than 5 samples have a square root greater than 2.0 (arbitrary)
extracted = self.extract(signal)
# If we want to filter out signals with fewer than 5 matching samples, then we
# should retrun True when there are 5 or more matching samples.
n_meeting_threshold = len(
extracted[extracted > self.threshold]
) # Number of samples greater than the threshold
meets_criteria = (
n_meeting_threshold >= 5
) # Are there at least 5 samples meeting this threshold?
return meets_criteria
def apply_feature_filters(capture: CaptureOrTimeSeries, filters: List[FilterPlugin]) -> bool:
"""
Check whether an array of current values (i.e. a single nanopore capture)
passes a set of filters. Filters can be based on summary statistics
(e.g., mean) and/or a range of allowed values.
Notes on filter behavior: If the filters list is empty, there are no filters
and the capture passes.
Parameters
----------
capture : CaptureOrTimeSeries | NumpyArrayLike
Capture containing time series of nanopore current values for a single capture, or the signal itself.
filters : List[FilterPlugin]
List of FilterPlugin instances. Write your own filter by subclassing FilterPlugin.
Returns
-------
boolean
True if capture passes all filters; False otherwise.
"""
if filters is None:
filters = []
# TODO: Parallelize? https://github.com/uwmisl/poretitioner/issues/67
filtered = [filter_out(capture) for filter_out in filters]
print(filtered)
# Did this signal pass all filters?
all_passed = all(filtered)
return all_passed
def check_capture_ejection_by_read(f5, read_id):
"""Checks whether the current capture was in the pore until the voltage
was reversed.
Parameters
----------
f5 : h5py.File object (open for reading or more)
Capture fast5 file
read_id : TODO
Returns
-------
boolean
True if the end of the capture coincides with the end of a voltage window.
"""
try:
ejected = f5.get(f"/read_{read_id}/Signal").attrs["ejected"]
except AttributeError:
raise ValueError(f"path /read_{read_id} does not exist in the fast5 file.")
return ejected
def check_capture_ejection(end_capture, voltage_ends, tol_obs=20):
"""Checks whether the current capture was in the pore until the voltage
was reversed.
Essentially checks whether a value (end_capture) is close enough (within
a margin of tol_obs) to any value in voltage_ends.
Parameters
----------
end_capture : numeric
The end time of the capture.
voltage_ends : list of numeric
List of times when the standard voltage ends.
tol_obs : int, optional
Tolerance for defining when the end of the capture = voltage end, by default 20
Returns
-------
boolean
True if the end of the capture coincides with the end of a voltage window.
"""
for voltage_end in voltage_ends:
if np.abs(end_capture - voltage_end) < tol_obs:
return True
return False
def filter_like_existing(config, example_fast5, example_filter_path, fast5_files, new_filter_path):
# Filters a set of fast5 files exactly the same as an existing filter
# TODO : #68 : implement
raise NotImplementedError()
def get_filter_pass_path(filter_set_id, read_id):
return FILTER_PATH.filter_set_pass_path(filter_set_id)
__DEFAULT_FILTER_PLUGINS = [
MeanFilter,
StandardDeviationFilter,
MedianFilter,
MinimumFilter,
MaximumFilter,
LengthFilter,
]
DEFAULT_FILTER_PLUGINS = {
filter_plugin_class.name(): filter_plugin_class
for filter_plugin_class in __DEFAULT_FILTER_PLUGINS
}
class Filtering(Protocol):
"""Classes that adhere to the Filtering protocol
provide an 'apply' method to an input that returns True
if and only if the input passes its filter.
These are also callable, so calling a filter on an input
is functionally equivalent to calling its apply method.
"""
def __call__(self, *args, **kwargs) -> bool:
raise NotImplementedError("Filtering protocol hasn't implemented __call__ yet!")
def apply(self, *args, **kwargs) -> bool:
raise NotImplementedError("Filtering protocol hasn't implemented Apply yet!")
@dataclass
class Filter(Filtering):
"""A named filter that can be applied to some data.
You can use this filter by just calling it on some data.
my_signal = [1,2,3,4]
filter = Filter(...)
passed_filter: bool = filter(my_signal)
Parameters
----------
config : FilterConfig
A description of this filter's configuration (e.g. where it was loaded from).
plugin : FilterPlugin
The actual implementation of this filter.
We have this class defined with
"""
config: FilterConfig
plugin: FilterPlugin
def __call__(self, *args, **kwargs) -> bool:
return self.plugin(*args, **kwargs)
def apply(self, *args, **kwargs) -> bool:
return self.plugin.apply(*args, **kwargs)
@property
def name(self) -> FilterName:
return FilterName(self.plugin.__class__.name())
def as_attr(self) -> Dict[str, Any]:
name = self.name
attrs = {**vars(self.config), **vars(self.plugin), name: name}
return attrs
def from_attr(self, attr) -> IsAttr:
...
import json
@dataclass
class HDF5_FilterSerialable(Filter, HDF5_GroupSerialableDataclass):
def as_group(self, parent_group: HDF5_Group, log: Optional[Logger] = None) -> HDF5_Group:
log = log if log is not None else getLogger()
# Note: This line simply registers a group with the name 'name' in the parent group.
this_group = HDF5_Group(parent_group.require_group(self.name))
all_attrs = {**self.config, **vars(self.plugin)}
this_group.create_attrs(all_attrs)
# Implementers must now write their serialized instance to this group.
return this_group
@classmethod
def from_group(
cls, group: HDF5_Group, log: Optional[Logger] = None
) -> HDF5_GroupSerialableDataclass:
# You see, the trouble is, in the above 'as_group' call, we lumped together
# all the attributes of the FilterConfig and the FilterPlugin, not knowing
# which attributes belonged to which class.
#
# Now, here in `from_group`, it's time to pay the piper and figure out which attribute
# goes where to create a new Filter instance.
#
# This is likely achievable through the plugin architecture, since the plugin's
# name is unique, we can try to find a plugin with a given name, then get its attributes from there.
# Load
log.warning("Filter.from_group not implemented...It's a whole thing (see comment)")
# This is pure Hail Mary.
return super().from_group(group, log)
# class Filters(HDF5_GroupSerialableDataclass):
# filters:
Filters = Dict[FilterName, Filter]
def get_filters(filter_configs: Optional[FilterConfigs] = None) -> Filters:
"""Creates Filters from a list of filter configurations.
Parameters
----------
filter_configs : Optional[FilterConfigs]
A mapping of filter names to their configurations, None by default (i.e. no filtering).
Returns
-------
Filters
A set of callable/applyable filters.
"""
filter_configs = filter_configs if filter_configs is not None else FilterConfigs({})
my_filters = {
name: filter_from_config(name, filter_config)
for name, filter_config in filter_configs.items()
}
return my_filters
def does_pass_filters(capture: CaptureOrTimeSeries, filters: Iterable[Filter]) -> bool:
"""
Check whether an array of values (e.g. a single nanopore capture)
passes a set of filters. Filters can be based on summary statistics
(e.g., mean) and/or a range of allowed values.
Parameters
----------
capture : CaptureOrTimeSeries | NumpyArrayLike
Capture containing time series of nanopore current values for a single capture, or the signal itself.
filters : Iterable[Filter]
The set of filters to apply. Write your own filter by subclassing FilterPlugin.
Returns
-------
boolean
True if capture passes all filters; False otherwise.
"""
all_passed = True
for some_filter in filters:
if not some_filter(capture):
return False
return all_passed
@dataclass(frozen=True)
class FilterSetProtocol(Filtering, Protocol):
filter_set_id: FilterSetId
filters: Filters
@classmethod
def from_filter_configs(cls, name: FilterSetId, filter_configs: FilterConfigs = None):
...
@dataclass(frozen=True, init=False)
class FilterSet(FilterSetProtocol):
"""
A collection of filters with a name for easy
identification. Essentially a mapping of filter names to their implementations.
"""
def validate(self):
raise NotImplementedError("Implement validation for filters!")
def __init__(self, filter_set_id: FilterSetId, filters: Filters) -> None:
filterset = super().__init__(self)
object.__setattr__(self, "filter_set_id", filter_set_id)
object.__setattr__(self, "filters", filters)
# self.name = name
# self.filters = filters
############################
#
# FilterSetProtocol
#
############################
@classmethod
def from_filter_configs(cls, name: FilterSetId, filter_configs: FilterConfigs = None):
filters: Filters = get_filters(filter_configs)
filter_set = cls.__new__(cls, name, filters)
filter_set.__init__(name, filters)
return filter_set
def apply(self, capture: CaptureOrTimeSeries) -> bool:
return does_pass_filters(capture, self.filters.values())
def __call__(self, capture: CaptureOrTimeSeries) -> bool:
return self.apply(capture)
class HDF5_FilterSet(FilterSet, HDF5_GroupSerialableDataclass):
def __init__(self, filter_set: FilterSet) -> None:
self._filterset = filter_set
############################
#
# HDF5_GroupSerializable
#
############################
def name(self):
return self._filterset.filter_set_id
def as_group(self, parent_group: HDF5_Group, log: Optional[Logger] = None) -> HDF5_Group:
filter_set_group = parent_group.require_group(self.name())
for name, filter_t in self._filterset.filters.items():
hdf5_filter = HDF5_FilterSerialable(filter_t.config, filter_t.plugin)
hdf5_filter.as_group(filter_set_group)
return HDF5_Group(filter_set_group)
# @classmethod
# def from_group(
# cls, group: HDF5_Group, log: Optional[Logger] = None
# ) -> HDF5_GroupSerializable:
# raise NotImplementedError(
# f"from_group not implemented for {cls.__name__}. Make sure you write a method that returns a serialzied version of this object."
# )
def filter_from_config(name: str, config: FilterConfig, log: Logger = getLogger()) -> Filter:
"""Creates a Filter from a config spefication. If no "filename" is present in the FilterConfig, it's
assumed to be one of the default filtesr
Parameters
----------
name : str
The unique name of a filter.
config : FilterConfig
Filter configuration to build the plugin.
log : Logger, optional
Logger to use for information/warnings/debug, by default getLogger()
Returns
-------
Filter
A filter that can be applied to some data.
Raises
------
AttributeError
A filter plugin could not be built from the configuration description. If this error is raised, be sure to check
1) A plugin class with the name in the configuration is defined at the filepath described in the configuration
2) The plugin class inherits from the `FilterPlugin` abstract base class.
"""
filepath = config.get("filepath", None)
# TODO: For non-default FilterPlugins, load/unpickle the class from the filepath. https://github.com/uwmisl/poretitioner/issues/91
plugin = None
if name in DEFAULT_FILTER_PLUGINS:
plugin = DEFAULT_FILTER_PLUGINS[name]()
else:
# TODO: For non-default FilterPlugins, load the class from the filepath. https://github.com/uwmisl/poretitioner/issues/91
plugin = plugin_from_file(name, filepath)
pass
# Make sure any plugin attributes defined in the config are moved over to the plugin instance.
try:
# Here, we take care of setting whatever attributes the plugin config defines on the new plugin instance.
for key, value in config.items():
object.__setattr__(plugin, key, value)
except AttributeError as e:
log.warning(
"""
Uh oh, couldn't find plugin '{name}'. Are you sure:
1) A plugin class with the name '{name}' is defined in the file {filepath}?
2) That plugin class inherits from `FilterPlugin`?
"""
)
raise e
my_filter = Filter(config, plugin)
return my_filter
def plugin_from_file(name: str, filepath: PathLikeOrString):
"""[summary]
Parameters
----------
name : str
[description]
filepath : PathLikeOrString
[description]
Returns
-------
[type]
[description]
Raises
------
NotImplementedError
[description]
"""
# TODO: For non-default FilterPlugins, load/unpickle the class from the filepath. https://github.com/uwmisl/poretitioner/issues/91
raise NotImplementedError(
"Plugin from file has not been implemented! This method should take in a filepath and filter name, and return a runnable FilterPlugin!"
)
| 31.190176
| 171
| 0.666828
| 3,036
| 24,765
| 5.326416
| 0.170619
| 0.015027
| 0.008843
| 0.011688
| 0.286315
| 0.257127
| 0.217612
| 0.182858
| 0.176241
| 0.157133
| 0
| 0.003861
| 0.247083
| 24,765
| 793
| 172
| 31.229508
| 0.863402
| 0.42023
| 0
| 0.248387
| 0
| 0.009677
| 0.075717
| 0.003594
| 0
| 0
| 0
| 0.007566
| 0
| 1
| 0.174194
| false
| 0.051613
| 0.051613
| 0.051613
| 0.448387
| 0.003226
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 1
|
5319f9beb8c0372d2483c2292e3473295821dc00
| 12,467
|
py
|
Python
|
libs/PureCloudPlatformClientV2/models/management_unit.py
|
rocketbot-cl/genesysCloud
|
dd9d9b5ebb90a82bab98c0d88b9585c22c91f333
|
[
"MIT"
] | 1
|
2021-10-08T20:46:45.000Z
|
2021-10-08T20:46:45.000Z
|
libs/PureCloudPlatformClientV2/models/management_unit.py
|
rocketbot-cl/genesysCloud
|
dd9d9b5ebb90a82bab98c0d88b9585c22c91f333
|
[
"MIT"
] | null | null | null |
libs/PureCloudPlatformClientV2/models/management_unit.py
|
rocketbot-cl/genesysCloud
|
dd9d9b5ebb90a82bab98c0d88b9585c22c91f333
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
import json
from ..utils import sanitize_for_serialization
class ManagementUnit(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
ManagementUnit - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'id': 'str',
'name': 'str',
'division': 'Division',
'business_unit': 'BusinessUnitReference',
'start_day_of_week': 'str',
'time_zone': 'str',
'settings': 'ManagementUnitSettingsResponse',
'metadata': 'WfmVersionedEntityMetadata',
'version': 'int',
'date_modified': 'datetime',
'modified_by': 'UserReference',
'self_uri': 'str'
}
self.attribute_map = {
'id': 'id',
'name': 'name',
'division': 'division',
'business_unit': 'businessUnit',
'start_day_of_week': 'startDayOfWeek',
'time_zone': 'timeZone',
'settings': 'settings',
'metadata': 'metadata',
'version': 'version',
'date_modified': 'dateModified',
'modified_by': 'modifiedBy',
'self_uri': 'selfUri'
}
self._id = None
self._name = None
self._division = None
self._business_unit = None
self._start_day_of_week = None
self._time_zone = None
self._settings = None
self._metadata = None
self._version = None
self._date_modified = None
self._modified_by = None
self._self_uri = None
@property
def id(self):
"""
Gets the id of this ManagementUnit.
The globally unique identifier for the object.
:return: The id of this ManagementUnit.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this ManagementUnit.
The globally unique identifier for the object.
:param id: The id of this ManagementUnit.
:type: str
"""
self._id = id
@property
def name(self):
"""
Gets the name of this ManagementUnit.
:return: The name of this ManagementUnit.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this ManagementUnit.
:param name: The name of this ManagementUnit.
:type: str
"""
self._name = name
@property
def division(self):
"""
Gets the division of this ManagementUnit.
The division to which this entity belongs.
:return: The division of this ManagementUnit.
:rtype: Division
"""
return self._division
@division.setter
def division(self, division):
"""
Sets the division of this ManagementUnit.
The division to which this entity belongs.
:param division: The division of this ManagementUnit.
:type: Division
"""
self._division = division
@property
def business_unit(self):
"""
Gets the business_unit of this ManagementUnit.
The business unit to which this management unit belongs
:return: The business_unit of this ManagementUnit.
:rtype: BusinessUnitReference
"""
return self._business_unit
@business_unit.setter
def business_unit(self, business_unit):
"""
Sets the business_unit of this ManagementUnit.
The business unit to which this management unit belongs
:param business_unit: The business_unit of this ManagementUnit.
:type: BusinessUnitReference
"""
self._business_unit = business_unit
@property
def start_day_of_week(self):
"""
Gets the start_day_of_week of this ManagementUnit.
Start day of week for scheduling and forecasting purposes. Moving to Business Unit
:return: The start_day_of_week of this ManagementUnit.
:rtype: str
"""
return self._start_day_of_week
@start_day_of_week.setter
def start_day_of_week(self, start_day_of_week):
"""
Sets the start_day_of_week of this ManagementUnit.
Start day of week for scheduling and forecasting purposes. Moving to Business Unit
:param start_day_of_week: The start_day_of_week of this ManagementUnit.
:type: str
"""
allowed_values = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"]
if start_day_of_week.lower() not in map(str.lower, allowed_values):
# print("Invalid value for start_day_of_week -> " + start_day_of_week)
self._start_day_of_week = "outdated_sdk_version"
else:
self._start_day_of_week = start_day_of_week
@property
def time_zone(self):
"""
Gets the time_zone of this ManagementUnit.
The time zone for the management unit in standard Olson format. Moving to Business Unit
:return: The time_zone of this ManagementUnit.
:rtype: str
"""
return self._time_zone
@time_zone.setter
def time_zone(self, time_zone):
"""
Sets the time_zone of this ManagementUnit.
The time zone for the management unit in standard Olson format. Moving to Business Unit
:param time_zone: The time_zone of this ManagementUnit.
:type: str
"""
self._time_zone = time_zone
@property
def settings(self):
"""
Gets the settings of this ManagementUnit.
The configuration settings for this management unit
:return: The settings of this ManagementUnit.
:rtype: ManagementUnitSettingsResponse
"""
return self._settings
@settings.setter
def settings(self, settings):
"""
Sets the settings of this ManagementUnit.
The configuration settings for this management unit
:param settings: The settings of this ManagementUnit.
:type: ManagementUnitSettingsResponse
"""
self._settings = settings
@property
def metadata(self):
"""
Gets the metadata of this ManagementUnit.
Version info metadata for this management unit. Deprecated, use settings.metadata
:return: The metadata of this ManagementUnit.
:rtype: WfmVersionedEntityMetadata
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this ManagementUnit.
Version info metadata for this management unit. Deprecated, use settings.metadata
:param metadata: The metadata of this ManagementUnit.
:type: WfmVersionedEntityMetadata
"""
self._metadata = metadata
@property
def version(self):
"""
Gets the version of this ManagementUnit.
The version of the underlying entity. Deprecated, use field from settings.metadata instead
:return: The version of this ManagementUnit.
:rtype: int
"""
return self._version
@version.setter
def version(self, version):
"""
Sets the version of this ManagementUnit.
The version of the underlying entity. Deprecated, use field from settings.metadata instead
:param version: The version of this ManagementUnit.
:type: int
"""
self._version = version
@property
def date_modified(self):
"""
Gets the date_modified of this ManagementUnit.
The date and time at which this entity was last modified. Deprecated, use field from settings.metadata instead. Date time is represented as an ISO-8601 string. For example: yyyy-MM-ddTHH:mm:ss[.mmm]Z
:return: The date_modified of this ManagementUnit.
:rtype: datetime
"""
return self._date_modified
@date_modified.setter
def date_modified(self, date_modified):
"""
Sets the date_modified of this ManagementUnit.
The date and time at which this entity was last modified. Deprecated, use field from settings.metadata instead. Date time is represented as an ISO-8601 string. For example: yyyy-MM-ddTHH:mm:ss[.mmm]Z
:param date_modified: The date_modified of this ManagementUnit.
:type: datetime
"""
self._date_modified = date_modified
@property
def modified_by(self):
"""
Gets the modified_by of this ManagementUnit.
The user who last modified this entity. Deprecated, use field from settings.metadata instead
:return: The modified_by of this ManagementUnit.
:rtype: UserReference
"""
return self._modified_by
@modified_by.setter
def modified_by(self, modified_by):
"""
Sets the modified_by of this ManagementUnit.
The user who last modified this entity. Deprecated, use field from settings.metadata instead
:param modified_by: The modified_by of this ManagementUnit.
:type: UserReference
"""
self._modified_by = modified_by
@property
def self_uri(self):
"""
Gets the self_uri of this ManagementUnit.
The URI for this object
:return: The self_uri of this ManagementUnit.
:rtype: str
"""
return self._self_uri
@self_uri.setter
def self_uri(self, self_uri):
"""
Sets the self_uri of this ManagementUnit.
The URI for this object
:param self_uri: The self_uri of this ManagementUnit.
:type: str
"""
self._self_uri = self_uri
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
"""
Returns the model as raw JSON
"""
return json.dumps(sanitize_for_serialization(self.to_dict()))
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 29.612827
| 208
| 0.599422
| 1,410
| 12,467
| 5.146099
| 0.158156
| 0.039691
| 0.132304
| 0.040518
| 0.501792
| 0.422409
| 0.366593
| 0.343716
| 0.321665
| 0.293275
| 0
| 0.002374
| 0.324216
| 12,467
| 420
| 209
| 29.683333
| 0.858872
| 0.466111
| 0
| 0.104575
| 0
| 0
| 0.099775
| 0.014468
| 0
| 0
| 0
| 0
| 0
| 1
| 0.202614
| false
| 0
| 0.03268
| 0
| 0.359477
| 0.006536
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
532376f847c7965500c6f9f56d9f6308f976ea4f
| 1,599
|
py
|
Python
|
tests/unit/test_BaseDirection.py
|
vpalex999/project-mars
|
6e21c5acfe6105a7b7c87a79770e7420bda46f26
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/test_BaseDirection.py
|
vpalex999/project-mars
|
6e21c5acfe6105a7b7c87a79770e7420bda46f26
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/test_BaseDirection.py
|
vpalex999/project-mars
|
6e21c5acfe6105a7b7c87a79770e7420bda46f26
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import src.constants as cnst
from src.directions import BaseDirection
@pytest.fixture
def base_direction():
return BaseDirection()
def test_init_BaseDirection(base_direction):
assert isinstance(base_direction, BaseDirection)
def test_current_direction_is(base_direction):
assert base_direction.current == cnst.NORTH
@pytest.mark.parametrize(["turn_func", "expected_direction"], [
# turn_left
(lambda f: f.turn_left(), cnst.WEST),
(lambda f: f.turn_left().turn_left(), cnst.SOUTH),
(lambda f: f.turn_left().turn_left().turn_left(), cnst.EAST),
(lambda f: f.turn_left().turn_left().turn_left().turn_left(), cnst.NORTH),
(lambda f: f.turn_left().turn_left().turn_left().turn_left().turn_left(), cnst.WEST),
# turn_right()
(lambda f: f.turn_right(), cnst.EAST),
(lambda f: f.turn_right().turn_right(), cnst.SOUTH),
(lambda f: f.turn_right().turn_right().turn_right(), cnst.WEST),
(lambda f: f.turn_right().turn_right().turn_right().turn_right(), cnst.NORTH),
(lambda f: f.turn_right().turn_right().turn_right().turn_right().turn_right(), cnst.EAST),
# any combinations
(lambda f: f.turn_left().turn_right(), cnst.NORTH),
(lambda f: f.turn_left().turn_left().turn_right(), cnst.WEST),
(lambda f: f.turn_left().turn_right().turn_left(), cnst.WEST),
(lambda f: f.turn_left().turn_right().turn_left().turn_right().turn_right(), cnst.EAST),
]
)
def test_turn_direction(base_direction, turn_func, expected_direction):
turn_func(base_direction)
assert base_direction.current == expected_direction
| 35.533333
| 94
| 0.707942
| 232
| 1,599
| 4.594828
| 0.150862
| 0.172608
| 0.180113
| 0.157599
| 0.681051
| 0.603189
| 0.473734
| 0.426829
| 0.401501
| 0.311445
| 0
| 0
| 0.126329
| 1,599
| 44
| 95
| 36.340909
| 0.763064
| 0.02439
| 0
| 0
| 0
| 0
| 0.017352
| 0
| 0
| 0
| 0
| 0
| 0.1
| 1
| 0.133333
| false
| 0
| 0.1
| 0.033333
| 0.266667
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
53288d2b29e82fc8c4f0e83a7806673cbfd64265
| 538
|
py
|
Python
|
dont_worry.py
|
karianjahi/fahrer_minijob
|
020a9de27b77f8e0bcdec198a37cfb7f1d4736ed
|
[
"MIT"
] | null | null | null |
dont_worry.py
|
karianjahi/fahrer_minijob
|
020a9de27b77f8e0bcdec198a37cfb7f1d4736ed
|
[
"MIT"
] | null | null | null |
dont_worry.py
|
karianjahi/fahrer_minijob
|
020a9de27b77f8e0bcdec198a37cfb7f1d4736ed
|
[
"MIT"
] | null | null | null |
class Hey:
def __init__(jose, name="mours"):
jose.name = name
def get_name(jose):
return jose.name
class Person(object):
def __init__(self, name, phone):
self.name = name
self.phone = phone
class Teenager(Person):
def __init__(self, *args, **kwargs):
self.website = kwargs.pop("website")
super(Teenager, self).__init__(*args, **kwargs)
if __name__ == "__main__":
#print(Hey().get_name())
teen = Teenager("Joseph Njeri", 924, "www.fowr.gd")
print(teen.website)
| 26.9
| 55
| 0.615242
| 68
| 538
| 4.485294
| 0.426471
| 0.068852
| 0.072131
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007282
| 0.234201
| 538
| 20
| 56
| 26.9
| 0.73301
| 0.042751
| 0
| 0
| 0
| 0
| 0.083495
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0.0625
| 0.5
| 0.0625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
53315defe5a40f6e5f9bc740259ebb1dfe1b3225
| 3,515
|
py
|
Python
|
__init__.py
|
NeonJarbas/skill-ddg
|
48476ad650e72f68ee7e96dd92c6d18f841ce6ec
|
[
"Apache-2.0"
] | null | null | null |
__init__.py
|
NeonJarbas/skill-ddg
|
48476ad650e72f68ee7e96dd92c6d18f841ce6ec
|
[
"Apache-2.0"
] | null | null | null |
__init__.py
|
NeonJarbas/skill-ddg
|
48476ad650e72f68ee7e96dd92c6d18f841ce6ec
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from ovos_utils.gui import can_use_gui
from adapt.intent import IntentBuilder
from mycroft.skills.common_query_skill import CommonQuerySkill, CQSMatchLevel
from mycroft.skills.core import intent_handler
from neon_solver_ddg_plugin import DDGSolver
class DuckDuckGoSkill(CommonQuerySkill):
def __init__(self):
super().__init__()
self.duck = DDGSolver()
# for usage in tell me more / follow up questions
self.idx = 0
self.results = []
self.image = None
# intents
@intent_handler("search_duck.intent")
def handle_search(self, message):
query = message.data["query"]
summary = self.ask_the_duck(query)
if summary:
self.speak_result()
else:
self.speak_dialog("no_answer")
@intent_handler(IntentBuilder("DuckMore").require("More").
require("DuckKnows"))
def handle_tell_more(self, message):
""" Follow up query handler, "tell me more"."""
# query = message.data["DuckKnows"]
# data, related_queries = self.duck.get_infobox(query)
# TODO maybe do something with the infobox data ?
self.speak_result()
# common query
def CQS_match_query_phrase(self, utt):
summary = self.ask_the_duck(utt)
if summary:
self.idx += 1 # spoken by common query
return (utt, CQSMatchLevel.GENERAL, summary,
{'query': utt,
'image': self.image,
'answer': summary})
def CQS_action(self, phrase, data):
""" If selected show gui """
self.display_ddg(data["answer"], data["image"])
# duck duck go api
def ask_the_duck(self, query):
# context for follow up questions
self.set_context("DuckKnows", query)
self.idx = 0
self.results = self.duck.long_answer(query, lang=self.lang)
self.image = self.duck.get_image(query)
if self.results:
return self.results[0]["summary"]
def display_ddg(self, summary=None, image=None):
if not can_use_gui(self.bus):
return
image = image or \
self.image or \
"https://github.com/JarbasSkills/skill-ddg/raw/master/ui/logo.png"
if image.startswith("/"):
image = "https://duckduckgo.com" + image
self.gui['summary'] = summary or ""
self.gui['imgLink'] = image
self.gui.show_page("DuckDelegate.qml", override_idle=60)
def speak_result(self):
if self.idx + 1 > len(self.results):
self.speak_dialog("thats all")
self.remove_context("DuckKnows")
self.idx = 0
else:
self.display_ddg(self.results[self.idx]["summary"],
self.results[self.idx]["img"])
self.speak(self.results[self.idx]["summary"])
self.idx += 1
def create_skill():
return DuckDuckGoSkill()
| 35.867347
| 82
| 0.62276
| 438
| 3,515
| 4.876712
| 0.383562
| 0.029494
| 0.042135
| 0.025281
| 0.068352
| 0.048689
| 0
| 0
| 0
| 0
| 0
| 0.005074
| 0.271124
| 3,515
| 97
| 83
| 36.237113
| 0.828649
| 0.244666
| 0
| 0.171875
| 0
| 0.015625
| 0.094584
| 0
| 0
| 0
| 0
| 0.010309
| 0
| 1
| 0.140625
| false
| 0
| 0.078125
| 0.015625
| 0.296875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
533284cf96b1c69f9f29a622772bb5c570e08d44
| 3,619
|
py
|
Python
|
rigl/experimental/jax/pruning/pruning.py
|
vishalbelsare/rigl
|
f18abc7d82ae3acc6736068408a0186c9efa575c
|
[
"Apache-2.0"
] | 276
|
2019-11-25T22:05:45.000Z
|
2022-03-30T11:55:34.000Z
|
rigl/experimental/jax/pruning/pruning.py
|
vishalbelsare/rigl
|
f18abc7d82ae3acc6736068408a0186c9efa575c
|
[
"Apache-2.0"
] | 10
|
2020-02-26T14:53:50.000Z
|
2021-09-08T16:27:28.000Z
|
rigl/experimental/jax/pruning/pruning.py
|
vishalbelsare/rigl
|
f18abc7d82ae3acc6736068408a0186c9efa575c
|
[
"Apache-2.0"
] | 54
|
2019-11-26T18:50:33.000Z
|
2022-03-29T20:08:08.000Z
|
# coding=utf-8
# Copyright 2021 RigL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Functions for pruning FLAX masked models."""
import collections
from typing import Any, Callable, Mapping, Optional, Union
import flax
import jax.numpy as jnp
from rigl.experimental.jax.pruning import masked
def weight_magnitude(weights):
"""Creates weight magnitude-based saliencies, given a weight matrix."""
return jnp.absolute(weights)
def prune(
model,
pruning_rate,
saliency_fn = weight_magnitude,
mask = None,
compare_fn = jnp.greater):
"""Returns a mask for a model where the params in each layer are pruned using a saliency function.
Args:
model: The model to create a pruning mask for.
pruning_rate: The fraction of lowest magnitude saliency weights that are
pruned. If a float, the same rate is used for all layers, otherwise if it
is a mapping, it must contain a rate for all masked layers in the model.
saliency_fn: A function that returns a float number used to rank
the importance of individual weights in the layer.
mask: If the model has an existing mask, the mask will be applied before
pruning the model.
compare_fn: A pairwise operator to compare saliency with threshold, and
return True if the saliency indicates the value should not be masked.
Returns:
A pruned mask for the given model.
"""
if not mask:
mask = masked.simple_mask(model, jnp.ones, masked.WEIGHT_PARAM_NAMES)
if not isinstance(pruning_rate, collections.Mapping):
pruning_rate_dict = {}
for param_name, _ in masked.iterate_mask(mask):
# Get the layer name from the parameter's full name/path.
layer_name = param_name.split('/')[-2]
pruning_rate_dict[layer_name] = pruning_rate
pruning_rate = pruning_rate_dict
for param_path, param_mask in masked.iterate_mask(mask):
split_param_path = param_path.split('/')
layer_name = split_param_path[-2]
param_name = split_param_path[-1]
# If we don't have a pruning rate for the given layer, don't mask it.
if layer_name in pruning_rate and mask[layer_name][param_name] is not None:
param_value = model.params[layer_name][
masked.MaskedModule.UNMASKED][param_name]
# Here any existing mask is first applied to weight matrix.
# Note: need to check explicitly is not None for np array.
if param_mask is not None:
saliencies = saliency_fn(param_mask * param_value)
else:
saliencies = saliency_fn(param_value)
# TODO: Use partition here (partial sort) instead of sort,
# since it's O(N), not O(N log N), however JAX doesn't support it.
sorted_param = jnp.sort(jnp.abs(saliencies.flatten()))
# Figure out the weight magnitude threshold.
threshold_index = jnp.round(pruning_rate[layer_name] *
sorted_param.size).astype(jnp.int32)
threshold = sorted_param[threshold_index]
mask[layer_name][param_name] = jnp.array(
compare_fn(saliencies, threshold), dtype=jnp.int32)
return mask
| 37.697917
| 100
| 0.716773
| 540
| 3,619
| 4.690741
| 0.368519
| 0.047769
| 0.017766
| 0.021319
| 0.053691
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005957
| 0.211384
| 3,619
| 95
| 101
| 38.094737
| 0.88157
| 0.522244
| 0
| 0
| 0
| 0
| 0.001203
| 0
| 0
| 0
| 0
| 0.010526
| 0
| 1
| 0.051282
| false
| 0
| 0.128205
| 0
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
533866f2077fc08488ebf544ff7c3db315b050b5
| 283
|
py
|
Python
|
src/marion/marion/urls/__init__.py
|
OmenApps/marion
|
f501674cafbd91f0bbad7454e4dcf3527cf4445e
|
[
"MIT"
] | 7
|
2021-04-06T20:33:31.000Z
|
2021-09-30T23:29:24.000Z
|
src/marion/marion/urls/__init__.py
|
OmenApps/marion
|
f501674cafbd91f0bbad7454e4dcf3527cf4445e
|
[
"MIT"
] | 23
|
2020-09-09T15:01:50.000Z
|
2022-01-03T08:58:36.000Z
|
src/marion/marion/urls/__init__.py
|
OmenApps/marion
|
f501674cafbd91f0bbad7454e4dcf3527cf4445e
|
[
"MIT"
] | 2
|
2020-12-14T10:07:07.000Z
|
2021-06-29T00:20:43.000Z
|
"""Urls for the marion application"""
from django.urls import include, path
from rest_framework import routers
from .. import views
router = routers.DefaultRouter()
router.register(r"requests", views.DocumentRequestViewSet)
urlpatterns = [
path("", include(router.urls)),
]
| 18.866667
| 58
| 0.749117
| 33
| 283
| 6.393939
| 0.636364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134276
| 283
| 14
| 59
| 20.214286
| 0.861224
| 0.109541
| 0
| 0
| 0
| 0
| 0.03252
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.375
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 1
|
5342baca137d0ce393a0884db4bee3c92fc045d0
| 1,503
|
py
|
Python
|
tests/simple_gan_test.py
|
alanpeixinho/NiftyNet
|
9a17022a71985974f9e5ca992c765d55860fdd7d
|
[
"Apache-2.0"
] | null | null | null |
tests/simple_gan_test.py
|
alanpeixinho/NiftyNet
|
9a17022a71985974f9e5ca992c765d55860fdd7d
|
[
"Apache-2.0"
] | null | null | null |
tests/simple_gan_test.py
|
alanpeixinho/NiftyNet
|
9a17022a71985974f9e5ca992c765d55860fdd7d
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import, print_function
import unittest
import os
import tensorflow as tf
from tensorflow.keras import regularizers
from niftynet.network.simple_gan import SimpleGAN
from tests.niftynet_testcase import NiftyNetTestCase
class SimpleGANTest(NiftyNetTestCase):
def test_3d_reg_shape(self):
input_shape = (2, 32, 32, 32, 1)
noise_shape = (2, 512)
x = tf.ones(input_shape)
r = tf.ones(noise_shape)
simple_gan_instance = SimpleGAN()
out = simple_gan_instance(r, x, is_training=True)
with self.cached_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
out = sess.run(out)
self.assertAllClose(input_shape, out[0].shape)
self.assertAllClose((2, 1), out[1].shape)
self.assertAllClose((2, 1), out[2].shape)
def test_2d_reg_shape(self):
input_shape = (2, 64, 64, 1)
noise_shape = (2, 512)
x = tf.ones(input_shape)
r = tf.ones(noise_shape)
simple_gan_instance = SimpleGAN()
out = simple_gan_instance(r, x, is_training=True)
with self.cached_session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
out = sess.run(out)
self.assertAllClose(input_shape, out[0].shape)
self.assertAllClose((2, 1), out[1].shape)
self.assertAllClose((2, 1), out[2].shape)
if __name__ == "__main__":
tf.test.main()
| 30.673469
| 65
| 0.642715
| 199
| 1,503
| 4.613065
| 0.301508
| 0.058824
| 0.074074
| 0.104575
| 0.679739
| 0.679739
| 0.62963
| 0.62963
| 0.62963
| 0.62963
| 0
| 0.035398
| 0.24817
| 1,503
| 48
| 66
| 31.3125
| 0.776991
| 0
| 0
| 0.611111
| 0
| 0
| 0.005323
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 1
| 0.055556
| false
| 0
| 0.194444
| 0
| 0.277778
| 0.027778
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.