id
int64
0
10k
text
stringlengths
186
4k
length
int64
128
1.02k
9,900
# Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import sys sys.path.insert(0, os.path.abspath('../..')) # -- Project information ----------------------------------------------------- project = 'IDFM_API' copyright = '2021, drosocode' author = 'drosocode' # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ "sphinx.ext.autodoc", "sphinx.ext.autosummary", "sphinxcontrib.napoleon", "sphinx_autodoc_typehints" ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = [] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'sphinx_rtd_theme' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static']
523
9,901
import os from haco.DIDrive_core.utils.others.general_helper import sort_nicely """ FUNCTIONS FOR SAVING THE CHECKPOINTS """ def is_ready_to_save(iteration, cfg): """ Returns if the iteration is a iteration for saving a checkpoint """ if iteration in set(cfg.SAVE_SCHEDULE): return True else: return False def get_latest_saved_checkpoint(exp_batch, exp_alias): """ Returns the , latest checkpoint number that was saved """ if os.path.exists(os.path.join('_logs', exp_batch, exp_alias, 'checkpoints')): checkpoint_files = os.listdir(os.path.join('_logs', exp_batch, exp_alias, 'checkpoints')) if checkpoint_files == []: return None else: sort_nicely(checkpoint_files) return checkpoint_files[-1] else: return None
348
9,902
# -*- coding: utf-8 -*- # Generated by Django 1.11.29 on 2020-07-16 18:50 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Dislike', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('timestamp', models.DateTimeField(auto_now_add=True)), ], ), migrations.CreateModel( name='DownloadRecord', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('timestamp', models.DateTimeField(auto_now_add=True)), ], ), migrations.CreateModel( name='Like', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('timestamp', models.DateTimeField(auto_now_add=True)), ], ), ]
549
9,903
""" summary: generates microcode for selection description: Generates microcode for selection and dumps it to the output window. """ import ida_bytes import ida_range import ida_kernwin import ida_hexrays if ida_hexrays.init_hexrays_plugin(): sel, sea, eea = ida_kernwin.read_range_selection(None) w = ida_kernwin.warning if sel: F = ida_bytes.get_flags(sea) if ida_bytes.is_code(F): hf = ida_hexrays.hexrays_failure_t() mbr = ida_hexrays.mba_ranges_t() mbr.ranges.push_back(ida_range.range_t(sea, eea)) mba = ida_hexrays.gen_microcode(mbr, hf, None, ida_hexrays.DECOMP_WARNINGS) if mba: print("Successfully generated microcode for 0x%08x..0x%08x\n" % (sea, eea)) vp = ida_hexrays.vd_printer_t() mba._print(vp) else: w("0x%08x: %s" % (hf.errea, hf.str)) else: w("The selected range must start with an instruction") else: w("Please select a range of addresses to analyze") else: print('vds13: Hex-rays is not available.')
555
9,904
from . import * class TestSubmitCommand(BrokerTestCase): def test_basics(self): msg = 'hello from ' + self.id() tid = int(self_check_output(['submit', 'echo', msg]).strip()) full_human_status = self_check_output(['status']) self.assertSearch(r'\b%d\b[^\n]+pending' % tid, full_human_status) one_human_status = self_check_output(['status', str(tid)]) self.assertSearch(r'\b%d\b[^\n]+pending' % tid, one_human_status) with override_stdio() as (out, _): self_call(['status', '--csv', 'id,status']) full_csv_status = list(DictReader(out)) self.assertTrue(len(full_csv_status) >= 1) self.assertTrue(any(int(t['id']) == tid and t['status'] == 'pending' for t in full_csv_status)) with override_stdio() as (out, _): self_call(['status', '--csv', 'id,status', str(tid)]) one_csv_status = list(DictReader(out)) self.assertTrue(len(one_csv_status) == 1) t = one_csv_status[0] self.assertEqual(int(t['id']), tid ) self.assertEqual(t['status'], 'pending') self.worker.run_to_end() one_status = self_check_output(['status', str(tid)]) self.assertSearch(r'\b%d\b[^\n]+success' % tid, one_status) # Unfortunately we can't actually capture this message.
605
9,905
class Stack(object): def __init__(self): self.stack = [] # Push is a bottleneck. def push(self, val): self.stack.append(val) def pop(self): if not self.isEmpty(): return self.stack.remove(self.stack[-1]) def top(self): if not self.isEmpty(): return self.stack[-1] def isEmpty(self): return len(self.stack) == 0 def size(self): return len(self.stack) def __str__(self): result = "[" for num in self.stack: result += str(num) + " " result += "\b]" return result if __name__ == "__main__": st = Stack() st.push(10) st.push(20) st.push(30) print(st) print(st.pop()) print(st)
370
9,906
#import all relevant contents from the associated module. from duality.misc._meta import ( __author__, __copyright__, __credits__, __license__, __version__, __documentation__, __contact__, __donate__, ) #all relevant contents. __all__ = [ __author__, __copyright__, __credits__, __license__, __version__, __documentation__, __contact__, __donate__, ]
173
9,907
from sailing.common.common import * from sailing.common.notifier import NOTIFIER import os from sailing.conf import ENVIRONMENT_VARIABLE APP_NAME = os.environ[ENVIRONMENT_VARIABLE] DATA_ROOT = os.getcwd() ABNORMAL_NOTIFIER = NOTIFIER LOG_NAME = '%s.log' % APP_NAME LOG_PATH = join_path(DATA_ROOT, 'logs') LOG_LEVEL = 'TRACE' LOG_SIZE = 2000000 LOG_ROTATION = 20 LOG_NOTIFIER = ABNORMAL_NOTIFIER LOG_NOTIFICATION_LEVEL = 'ERROR' MINIMAL_FILE_AGE = 1 POLLING_INTERVAL = 2 IDLE_INTERVAL = 1 * 60 RUNNING_FLAG = join_path(DATA_ROOT, '.%s_running' % APP_NAME) CLOSING_FLAG = join_path(DATA_ROOT, '.%s_closing' % APP_NAME) TIME_ZONE = 'RPC'
274
9,908
import unittest from user_credentials import User class TestUsers(unittest.TestCase): """ This is a test class that defines the test cases for the users class behavior Args: unittest.TestCase: helps in creating test cases """ def setUp(self): ''' Method that runs before each test case to ensure objects are being instantiated correctly ''' self.new_user = User("BMuthoni", "pass@word") def tearDown(self): User.users_list = [] def test_init(self): self.assertEqual(self.new_user.user_name, "BMuthoni") self.assertEqual(self.new_user.password,"pass@word") def test_save_user(self): ''' Test case to confirm that user objects are being saved to the user list ''' self.new_user.save_user() #saving a new user self.assertEqual(len(User.users_list), 1) def test_delete_user(self): ''' Test to check whether a user can remove their account ''' self.new_user.save_user() test_user = User("BMuthoni", "4676jl") test_user.save_user() self.new_user.delete_user() self.assertEqual(len(User.users_list), 1) def test_find_user(self): ''' Test case to check test whether a user can only login once they have created an account ''' self.new_user.save_user() test_user = User("Muthoni", "pass@word") test_user.save_user() for user in User.users_list: if user.user_name == test_user.user_name and user.password == test_user.password: current_user = user.user_name return current_user self.assertEqual(current_user, User.find_user(test_user.password, test_user.user_name)) if __name__ == '__main__': unittest.main()
642
9,909
# -*- coding: utf-8 -*- import scrapy class YahooFinanceSpider(scrapy.Spider): name = 'yahoo_finance' allowed_domains = ['finance.yahoo.com'] start_urls = ['https://finance.yahoo.com/sector/ms_technology'] def parse(self, response): company_name_list = response.xpath('//*[@id="scr-res-table"]/div[1]/table/tbody/tr/td[2]/text()').extract() company_price_list= response.xpath('//*[@id="scr-res-table"]/div[1]/table/tbody/tr/td[3]/span/text()').extract() print("") print("") print("") print("") count = len(company_name_list) print("Total Number :",count) for i in range(0,count): print(company_name_list[i],company_price_list[i]) print("") print("") print("") print("")
380
9,910
''' file torus.py @author Gianmarco Ducci @copyright Copyright © UCLouvain 2020 multiflap is a Python tool for finding periodic orbits and assess their stability via the Floquet multipliers. Copyright <2020> <Université catholique de Louvain (UCLouvain), Belgique> List of the contributors to the development of multiflap, Description and complete License: see LICENSE and NOTICE files. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' import numpy as np """ Example case adopted from: Practical Bifurcation and Stability Analysis, page 325 Seydel R. Eq. (7.15) - Isothermal chemical reaction dynamics """ class Torus: def __init__(self, lam=1.8): self.lam = lam self.dimension=3 # specify the dimension of the problem def dynamics(self, x0, t): """ODE system This function will be passed to the numerical integrator Inputs: x0: initial values t: time Outputs: x_dot: velocity vector """ y1, y2, y3 = x0 dy1_dt = (self.lam - 3)*y1 - 0.25*y2 +y1*(y3 + 0.2*(1-y3**2)) dy2_dt = 0.25*y1 + (self.lam - 3)*y2 + y2*(y3 +0.2*(1-y3**2)) dy3_dt = self.lam*y3 - (y1**2 + y2**2 + y3**2) vel_array = np.array([dy1_dt, dy2_dt, dy3_dt], float) return vel_array def get_stability_matrix(self, x0, t): """ Stability matrix of the ODE system Inputs: x0: initial condition Outputs: A: Stability matrix evaluated at x0. (dxd) dimension A[i, j] = dv[i]/dx[j] """ #A_matrix = np.array([[30 - 0.5*y1 -y2 -y3, -y1 + 2*0.001*y2, -y1], # [y2, y1 - 2*0.001*y2 -self.lam, 0.], # [-y3, 0., 16.5 - y1 - y3]], float) return #A_matrix
994
9,911
import sys import archon.facade as facade import archon.broker as broker import archon.model.models as m import archon.exchange.exchanges as exc from archon.util import * import time import datetime from util import * #from order_utils import * import math a = broker.Broker() ae = [exc.KUCOIN,exc.BITTREX,exc.CRYPTOPIA,exc.HITBTC] a.set_active_exchanges(ae) a.set_keys_exchange_file() def ordering(): market = m.get_market("LTC","BTC",e) b = a.afacade.balance_all(exchange=e) #buy order #send 50% of qty to 1 exchange and 50% to another #check the amount possible to buy qty1 = 0.5*qty qty2 = 0.5*qty if __name__=='__main__': ordering()
288
9,912
# Copyright 2020 MONAI Consortium # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import numpy as np import torch from ignite.engine import create_supervised_trainer from torch.utils.data import DataLoader, Dataset from monai.data.synthetic import create_test_image_2d from monai.losses.dice import DiceLoss from monai.networks.nets.unet import UNet def run_test(batch_size=64, train_steps=100, device=torch.device("cuda:0")): class _TestBatch(Dataset): def __getitem__(self, _unused_id): im, seg = create_test_image_2d(128, 128, noise_max=1, num_objs=4, num_seg_classes=1) return im[None], seg[None].astype(np.float32) def __len__(self): return train_steps net = UNet( dimensions=2, in_channels=1, num_classes=1, channels=(4, 8, 16, 32), strides=(2, 2, 2), num_res_units=2, ) loss = DiceLoss(do_sigmoid=True) opt = torch.optim.Adam(net.parameters(), 1e-4) src = DataLoader(_TestBatch(), batch_size=batch_size) def loss_fn(pred, grnd): return loss(pred[0], grnd) trainer = create_supervised_trainer(net, opt, loss_fn, device, False) trainer.run(src, 1) return trainer.state.output if __name__ == "__main__": result = run_test() print(result) sys.exit(0 if result < 1 else 1)
706
9,913
_base_ = [ '../../_base_/default_runtime.py', '../../_base_/schedules/schedule_sgd_1200e.py', '../../_base_/det_models/drrg_r50_fpn_unet.py', '../../_base_/det_datasets/ctw1500.py', '../../_base_/det_pipelines/drrg_pipeline.py' ] train_list = {{_base_.train_list}} test_list = {{_base_.test_list}} train_pipeline = {{_base_.train_pipeline}} test_pipeline = {{_base_.test_pipeline}} data = dict( samples_per_gpu=4, workers_per_gpu=4, val_dataloader=dict(samples_per_gpu=1), test_dataloader=dict(samples_per_gpu=1), train=dict( type='UniformConcatDataset', datasets=train_list, pipeline=train_pipeline), val=dict( type='UniformConcatDataset', datasets=test_list, pipeline=test_pipeline), test=dict( type='UniformConcatDataset', datasets=test_list, pipeline=test_pipeline)) evaluation = dict(interval=20, metric='hmean-iou')
453
9,914
def fizzbuzz(num): for i in range(1, num): string = "" if(i % 3 == 0): string = string + "fizz" if(i % 5 == 0): string = string + "buzz" if(string != ""): print(string, end = " ") else: print(i, end = " ") fizzbuzz(16)
192
9,915
import time import logging from motiv.actor.process import Ticker from motiv.streams import Emitter, Subscriber def getLogger(name): logger = logging.getLogger(f"motiv:{name}") logger.setLevel(logging.INFO) handler = logging.StreamHandler() formatter = logging.Formatter("[%(asctime)s] %(name)-12s: %(message)s") handler.setFormatter(formatter) logger.addHandler(handler) return logger publisher_stream = Emitter(address="ipc:///tmp/publisher") subscriber_stream = Subscriber(address="ipc:///tmp/publisher") class PublisherTicker(Ticker): def pre_start(self): self.logger = getLogger(self.name) self.stream_out.connect() def post_stop(self): self.stream_out.close() def tick(self): time.sleep(2) self.publish("mytopic", b"Hello world") print("\n") self.logger.info("\tPublishing to subscribers") class SubscriberTicker(Ticker): def pre_start(self): self.logger = getLogger(self.name) self.stream_in.subscribe("mytopic") self.stream_in.connect() def post_stop(self): self.stream_in.close() def tick(self): try: channel, payload = self.receive(timeout=3000) self.logger.info(f"\tReceived {payload}") except TimeoutError: self.logger.exception("Timed out") if __name__ == '__main__': pub = PublisherTicker("publisher[1]") pub.set_stream(publisher_stream) subscribers = [] for i in range(5): subscriber = SubscriberTicker(f"subscriber[{i}]") subscriber.set_stream(subscriber_stream) subscribers.append(subscriber) pub.start() [sub.start() for sub in subscribers]
719
9,916
class UpdateHeuristics: """ Heuristics to control thread based updates with user triggered updates. """ ticks = 0 updated = 0 @staticmethod def tick(): UpdateHeuristics.ticks += 1 UpdateHeuristics.updated += 1 @staticmethod def force_unupdate(): UpdateHeuristics.updated += 1 @staticmethod def skip_update(): diff_heuristic = UpdateHeuristics.updated - UpdateHeuristics.ticks if 2 < diff_heuristic < 10: UpdateHeuristics.updated = 0 return True elif 30 > diff_heuristic > 2: UpdateHeuristics.updated -= 1 return False UpdateHeuristics.updated = 0 return False
306
9,917
# Generated by Django 3.2.2 on 2021-08-18 02:18 import database.models import django.core.validators from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('database', '0006_auto_20210818_0636'), ] operations = [ migrations.AlterField( model_name='note', name='file', field=models.FileField(upload_to=database.models.notes_path, validators=[django.core.validators.FileExtensionValidator(['pdf'])]), ), ]
217
9,918
from distutils.core import setup from distutils.extension import Extension def readme(): with open('README.md') as readme: return readme.read() setup(name='pyiArduinoI2Ckeyboard', version='0.0.2', description='iarduino.ru module for Raspberry Pi', long_description=readme(), classifiers=[ 'Programming Language :: Python :: 3', ], url='http://github.com/tremaru/pyiArduinoI2Ckeyboard', author='iarduino.ru', author_email='shop@iarduino.ru', license='MIT', ext_modules = [Extension( name="pyiArduinoI2Ckeyboard", sources=["pyiArduinoI2Ckeyboard/pyiArduinoI2Ckeyboard.cpp"])], # include_package_data=True, # zip_safe=False, # python_requires='>=3', )
302
9,919
#!/usr/bin/env python3 import datetime import os import subprocess import sys import time if len(sys.argv) != 5: sys.exit("Usage:\n\t%s <binary> <testcase> <checker> <report-builder>" % (sys.argv[0],)) binary = sys.argv[1] testcase = sys.argv[2] checker = sys.argv[3] reportbuilder = sys.argv[4] for i in [testcase, checker, reportbuilder]: if not os.path.exists(i): sys.exit("File not found: %s" % (i,)) exec(open(testcase).read()) for i in ['input', 'output']: if i not in locals(): sys.exit("Testcase %s does not provide variable '%s'" % (testcase, i)) if 'flags' not in locals(): flags = "" if 'timeout' not in locals(): timeout = 100 exec(open(checker).read()) for i in ['checker']: if i not in locals(): sys.exit("Checker %s does not provide variable '%s'" % (checker, i)) exec(open(reportbuilder).read()) for i in ['reportStartTest', 'reportStreams', 'reportTimeout', 'reportFailure', 'reportSuccess', 'reportEndTest']: if i not in locals(): sys.exit("Report-builder %s does not provide variable '%s'" % (reportbuilder, i)) args = binary.split() args.extend(flags.split()) reportStartTest(args, testcase) first = 0 out = input.strip().encode() err = "" def run(args): global input global out global err global timeout try: if timeout < 0: return begin = time.time() process = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (out, err) = process.communicate(out, timeout=timeout) timeout = timeout - time.time() + begin except subprocess.TimeoutExpired: timeout = -1 process.kill() for i in range(0, len(args)): if args[i] == "|": run(args[first:i]) first = i+1 if first < len(args): run(args[first:]) if timeout < 0: reportTimeout(out, err) else: out = out.decode() err = err.decode() reportStreams(out, err) checker(out, err) reportEndTest(args, testcase)
850
9,920
#サイコロの機能を持つDice型を作り、モジュール化する #モジュール名は dice import random class Dice: #初期化メソッド #これもメソッドなので、引数にselfを書く必要がある #正多面体の面の数を受け取る(4、6、8、12、20) def __init__(self, val = 6): #エラーを発生させるにはraiseを使う #ここでは一般的な例外を示すExceptionを使う、これはエラーを示すデータ型 #not in でその値がリストになければTrueを返す if val not in [4,6,8,12,20]: raise Exception('Not exist') #valをface_numに代入して面の数を保存する #クラス内で使うのでアトリビュートにする、そのためself.face_numとする self.face_num = val #確認用 #print('Your choose: {}'.format(self.face_num)) #サイコロを振る動作を再現したメソッド ##メソッドを書くときは常に引数を1つ書かなければらなず、それをselfにするのが決まりごと def shoot(self): #上で定義されたface_numを使う #ただface_numと書いてもエラーが出る #イメージとしては、「class自身を指すselfの中のface_numを使うよ」という感じ return random.randint(1,self.face_num)
680
9,921
#!/usr/bin/env python """tmplrun Usage: tmplrun.py COMMAND TEMPLATE [ options ] tmplrun.py COMMAND TEMPLATE (-h | --help) Options: -h --help Show this screen. """ import os import sys import re from docopt import docopt from collections import namedtuple from pathlib import Path from jinja2 import Template __VERSION__="1.0.0" def get_tmpl(cmd, tmpl): script = Path(os.path.abspath(__file__)) tmpl_file = Path.joinpath(script.parent, cmd, tmpl + ".jinja2") if not tmpl_file.exists(): tmpl_file = Path.joinpath(script.parent, cmd, tmpl + f".{cmd}.jinja2") if not tmpl_file.exists(): raise ValueError(f"No template found for {cmd}/{tmpl}") return tmpl_file.read_text() def main(): str_ver = f"tmplrun version {__VERSION__}" args = docopt(__doc__, argv=sys.argv[1:3], version=str_ver) template_str = get_tmpl(args["COMMAND"], args["TEMPLATE"]) xms = re.X | re.M | re.S docopt_header_match = re.match("^\{\#\-?(.*?)\-?\#\}", template_str, xms) if docopt_header_match: docopt_header = docopt_header_match.groups()[0].strip() args = docopt(docopt_header, version=str_ver) t = Template(template_str, trim_blocks=True, lstrip_blocks=True) result = t.render(args=args) print(result) if __name__ == '__main__': main()
572
9,922
#!/home/seun/Desktop/Mask_RCNN-master/rcnn/bin/python3 # $Id: rst2xml.py 4564 2006-05-21 20:44:42Z wiemann $ # Author: David Goodger <goodger@python.org> # Copyright: This module has been placed in the public domain. """ A minimal front end to the Docutils Publisher, producing Docutils XML. """ try: import locale locale.setlocale(locale.LC_ALL, '') except: pass from docutils.core import publish_cmdline, default_description description = ('Generates Docutils-native XML from standalone ' 'reStructuredText sources. ' + default_description) publish_cmdline(writer_name='xml', description=description)
218
9,923
from elasticapm.utils.module_import import import_string _cls_register = { 'elasticapm.instrumentation.packages.botocore.BotocoreInstrumentation', 'elasticapm.instrumentation.packages.jinja2.Jinja2Instrumentation', 'elasticapm.instrumentation.packages.psycopg2.Psycopg2Instrumentation', 'elasticapm.instrumentation.packages.psycopg2.Psycopg2RegisterTypeInstrumentation', 'elasticapm.instrumentation.packages.mysql.MySQLInstrumentation', 'elasticapm.instrumentation.packages.pylibmc.PyLibMcInstrumentation', 'elasticapm.instrumentation.packages.pymongo.PyMongoInstrumentation', 'elasticapm.instrumentation.packages.pymongo.PyMongoBulkInstrumentation', 'elasticapm.instrumentation.packages.pymongo.PyMongoCursorInstrumentation', 'elasticapm.instrumentation.packages.python_memcached.PythonMemcachedInstrumentation', 'elasticapm.instrumentation.packages.redis.RedisInstrumentation', 'elasticapm.instrumentation.packages.redis.RedisPipelineInstrumentation', 'elasticapm.instrumentation.packages.requests.RequestsInstrumentation', 'elasticapm.instrumentation.packages.sqlite.SQLiteInstrumentation', 'elasticapm.instrumentation.packages.urllib3.Urllib3Instrumentation', 'elasticapm.instrumentation.packages.elasticsearch.ElasticsearchConnectionInstrumentation', 'elasticapm.instrumentation.packages.elasticsearch.ElasticsearchInstrumentation', 'elasticapm.instrumentation.packages.django.template.DjangoTemplateInstrumentation', 'elasticapm.instrumentation.packages.django.template.DjangoTemplateSourceInstrumentation', } def register(cls): _cls_register.add(cls) _instrumentation_singletons = {} def get_instrumentation_objects(): for cls_str in _cls_register: if cls_str not in _instrumentation_singletons: cls = import_string(cls_str) _instrumentation_singletons[cls_str] = cls() obj = _instrumentation_singletons[cls_str] yield obj
729
9,924
import sys import re import os newFile = sys.argv[1] label = sys.argv[2] existingFile = sys.argv[3] newScores = open(newFile).readlines() newScores = [x.replace("\n", "") for x in newScores] handle = open(existingFile) existingData = handle.readlines() handle.close() existingData = [x.replace("\n","") for x in existingData] handle = open(existingFile, "w") header = existingData[0] header = header.replace("\n","") header = "%s\t%s\n" % (header, label) handle.write(header) existingData = existingData[1:] for (i,line) in enumerate(existingData): line += "\t"+str(newScores[i])+"\n" handle.write(line) handle.close()
238
9,925
#!/usr/bin/env python3 # -*- coding: iso-8859-1 -*- from collections.abc import Iterable from os import listdir from os.path import basename from pickle import load, UnpicklingError from traceback import format_exc import numpy as np import pandas as pd def logg(msg, filename='error.txt'): """ todo :param msg: :param filename: :return: """ with open(filename, 'w') as fp: fp.write(msg) def obj2str(data): """ todo :param data: :return: """ try: td = type(data) if td in [np.ndarray, pd.Series, pd.DataFrame]: data = np.array(data).tolist() elif td == dict: data = sorted({k: obj2str(v) for k, v in data.items()}.items()) elif isinstance(data, Iterable): data = [obj2str(n) for n in data] return str(data) except: logg(format_exc()) def disc(filename, data=None): """ todo :param filename: :param data: :return: """ try: if data is None: with open(filename, 'rb') as fp: return load(fp) else: with open(filename, 'w') as fp: fp.write(obj2str(data)) except UnpicklingError: # text file pass except: logg(format_exc(), filename + '.err') for input_file in set(listdir('.')) - {basename(__file__)}: tmp = disc(input_file) if tmp is not None: disc(input_file + '.txt', tmp)
734
9,926
# Example of Incremental Variable Elimination # # For further information please refer to: # "Suppressing Gender and Age in Face Templates Using Incremental Variable Elimination" by # Philipp Terhörst, Naser Damer, Florian Kirchbuchner and Arjan Kuijper, # International Conference on Biometrics (ICB), 2019 # import numpy as np from sklearn.ensemble import RandomForestClassifier from sklearn.preprocessing import StandardScaler from incremental_variable_elimination import IncrementalVariableElimination as IVE # load data # in this example we use only a small subset X = np.load("sample_features.npy") Y = np.load("sample_gender_labels.npy") # feature normalization scaler = StandardScaler() X = scaler.fit_transform(X) # define classifier model_train = RandomForestClassifier(n_estimators=30) # define params # number of steps and number of eliminations per step have to be # adjusted depending on the feature size num_steps = 20 num_eliminations = 5 # init, fit and transform ive = IVE(model_train, num_eliminations, num_steps) ive.fit(X,Y) X_new = ive.transform(X)
324
9,927
#!/usr/bin/env python # -*- coding: utf-8 -*- # $Id$ # Author: David Goodger <goodger@python.org> # Copyright: (C) 1998-2015 by David J. Goodger # License: GPL 2 (see __init__.py) """ Concrete polytwig (orders 1 through 3) puzzles. """ from puzzler.puzzles.polytwigs import Polytwigs123, OneSidedPolytwigs123 class OneSidedPolytwigs123Triangle(OneSidedPolytwigs123): """3 solutions""" height = 3 width = 3 def coordinates(self): return self.coordinates_triangle(2) def customize_piece_data(self): OneSidedPolytwigs123.customize_piece_data(self) self.piece_data['C3'][-1]['rotations'] = (1,)
257
9,928
import os import importlib from day import Day def get_days(): files = [ file for file in os.listdir(os.path.dirname(__file__)) if file.endswith('py') and file != '__init__.py' ] for file in files: mod_name = file[:-3] importlib.import_module(f'days.{mod_name}') return Day.__subclasses__()
150
9,929
import RPi.GPIO as GPIO def virta_on(rele_pin): GPIO.output(rele_pin, GPIO.HIGH) def virta_off(rele_pin): GPIO.output(rele_pin, GPIO.LOW) def cleanup(rele_pin): GPIO.output(rele_pin, GPIO.LOW) GPIO.cleanup() def switch(mode, PID, temp_req, temp_now, deadband_max, deadband_min, rele_pin): GPIO.setmode(GPIO.BOARD) GPIO.setup(rele_pin, GPIO.OUT) if mode == "active": if temp_req > temp_now: virta_on(rele_pin) return "on" else: virta_off(rele_pin) return "off" elif mode == "PIDctrl": if PID > deadband_max: virta_on(rele_pin) return "on" elif PID < deadband_min: virta_off(rele_pin) return "off"
431
9,930
def get_learnset(api, query): try: pikasp = api.get_pokemon_species(query) except: response = "Seems like that ain't a valid Pokemon name.." return response gen_name = pikasp.generation.name str_list = gen_name.split('-') str_list[0] = str_list[0].title() # Generation str_list[1] = str_list[1].upper() # IV etc PRE_URL = 'https://bulbapedia.bulbagarden.net/wiki/' pname = '' for n in pikasp.names: if n.language.name == 'en': pname = n.name MID = pname + '_%28Pokémon%29/' END = str_list[0] + '_' + str_list[1] + '_learnset' B_URL = PRE_URL + MID + END response = '[Gen '+ str_list[1] +' learnset](' + B_URL + ')' + '\n' if pikasp.generation.name != 'generation-vii' and pikasp.generation.name != 'generation-viii': END = str_list[0] + '_VII_learnset' B_URL = PRE_URL + MID + END response = response + '[Gen VII learnset](' + B_URL + ')' return response
442
9,931
from typing import Any, Dict from ....models.models import ProjectorCountdown from ....permissions.permissions import Permissions from ....shared.exceptions import ActionException from ....shared.patterns import FullQualifiedId from ...generics.delete import DeleteAction from ...util.default_schema import DefaultSchema from ...util.register import register_action @register_action("projector_countdown.delete") class ProjectorCountdownDelete(DeleteAction): """ Action to delete a projector countdown. """ model = ProjectorCountdown() schema = DefaultSchema(ProjectorCountdown()).get_delete_schema() permission = Permissions.Projector.CAN_MANAGE def update_instance(self, instance: Dict[str, Any]) -> Dict[str, Any]: projector_countdown = self.datastore.get( FullQualifiedId(self.model.collection, instance["id"]), [ "used_as_list_of_speaker_countdown_meeting_id", "used_as_poll_countdown_meeting_id", ], ) meeting_id = projector_countdown.get( "used_as_list_of_speaker_countdown_meeting_id" ) or projector_countdown.get("used_as_poll_countdown_meeting_id") if ( meeting_id and not self.is_meeting_deleted(meeting_id) and self.parent_action != "meeting.delete" ): raise ActionException( "List of speakers or poll countdown is not allowed to delete." ) return instance
600
9,932
''' Created by auto_sdk on 2020.09.15 ''' from dingtalk.api.base import RestApi class OapiCspaceAuditlogListRequest(RestApi): def __init__(self,url=None): RestApi.__init__(self,url) self.end_date = None self.load_more_biz_id = None self.load_more_gmt_create = None self.page_size = None self.start_date = None def getHttpMethod(self): return 'POST' def getapiname(self): return 'dingtalk.oapi.cspace.auditlog.list'
180
9,933
from django.views.generic import TemplateView from blog.models import Post class HomePageView(TemplateView): template_name = 'pages/home.html' def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context['posts'] = Post.objects.all()[:5] return context class AboutPageView(TemplateView): template_name = 'pages/about.html'
140
9,934
import re import unittest from time import sleep from multiprocessing import Process from urllib import request from sanic import Sanic from sanic.response import json from sanic_prometheus import monitor, SanicPrometheusError TEST_PORT = 54424 def launch_server(): app = Sanic('test_mp') @app.route('/test') async def test(request): return json({'a': 'b'}) monitor(app).expose_endpoint() app.run(port=TEST_PORT, workers=2) class TestMultiprocessing(unittest.TestCase): def setUp(self): self._procs = [] def tearDown(self): for p in self._procs: p.terminate() def test_start_server_should_not_work_with_mp(self): app = Sanic('test_mp') self.assertRaises(SanicPrometheusError, monitor(app).start_server) def test_metrics_are_aggregated_between_workers(self): p = Process(target=launch_server) self._procs.append(p) p.start() sleep(1) for _ in range(100): r = request.urlopen("http://localhost:{}/test".format(TEST_PORT)) _ = r.read() r = request.urlopen("http://localhost:{}/metrics".format(TEST_PORT)) nreqs = None for l in r.readlines(): l = l.decode('ascii') m = re.match(r"^sanic_request_count_total\{.+\}\s+(\d+)\s*", l) if m: nreqs = int(m.group(1)) break self.assertIsNotNone(nreqs) self.assertEqual(nreqs, 100)
692
9,935
# pylint: disable=protected-access from sheepdog import models from sheepdog.globals import ( ENTITY_STATE_TRANSITIONS, FILE_STATE_TRANSITIONS, FILE_STATE_KEY, STATE_KEY, SUBMITTABLE_FILE_STATES, SUBMITTABLE_STATES, ) from sheepdog.transactions.entity_base import EntityBase, EntityErrors class SubmissionEntity(EntityBase): """Models an entity to be marked submitted.""" def __init__(self, transaction, node): super(SubmissionEntity, self).__init__(transaction, node) self.action = "submit" def version_node(self): """ Clone the current state of ``entity.node`` to the ``versioned_nodes`` table in the database. """ self.logger.info("Versioning {}.".format(self.node)) with self.transaction.db_driver.session_scope() as session: session.add(models.VersionedNode.clone(self.node)) @property def secondary_keys(self): """Return the list of unique dicts for the node.""" return self.node._secondary_keys @property def secondary_keys_dicts(self): """Return the list of unique tuples for the node.""" return self.node._secondary_keys_dicts @property def pg_secondary_keys(self): """Return the list of unique tuples for the node type""" return getattr(self.node, "__pg_secondary_keys", []) def submit(self): """ Check whether this is a valid transition and transition the entity's state to `submitted` (and file_state if valid). """ self.logger.info("Submitting {}.".format(self.node)) to_state = "submitted" current_state = self.node._props.get(STATE_KEY, None) current_file_state = self.node._props.get(FILE_STATE_KEY, None) has_file_state = hasattr(self.node.__class__, FILE_STATE_KEY) # Check node.state if current_state not in SUBMITTABLE_STATES: return self.record_error( "Unable to submit node with state: '{}'".format(current_state), type=EntityErrors.INVALID_PROPERTY, ) # Conditionally update node.file_state if has_file_state and current_file_state in SUBMITTABLE_FILE_STATES: self.node.props[FILE_STATE_KEY] = to_state self.node.props[STATE_KEY] = to_state # Clone to version table self.version_node()
982
9,936
import logging from ckanext.hdx_theme.util.analytics_api import APICallAnalyticsSender import api_tracking_helper as api_th log = logging.getLogger(__name__) class APITrackingMiddleware(object): def __init__(self, app, config): self.app = app def __call__(self, environ, start_response): app = self.app(environ, start_response) if (api_th.is_api_call(environ)): route_info = api_th.get_route_info(environ) log.debug('API Call: ' + str(route_info)) api_action = api_th.get_api_action(environ) log.debug('Action:' + api_action) APICallAnalyticsSender(api_action).send_to_queue() return app
309
9,937
""" @newfield metadata: This class understands the following metadata """ from tinctest.case import TINCTestCase from tinctest.case import dataProvider from tinctest.case import skipLoading from tinctest.suite import TINCTestSuite from tinctest.loader import TINCTestLoader, TINCTestLoaderReverse, TINCTestLoaderRandomized from tinctest.runner import TINCTestRunner, TINCTextTestResult, TINCTestResultSet from tinctest.main import TINCTestProgram from tinctest.lib import get_logger from tinctest.main import TINCException import os #: The default test loader used by tinc that loads tests in the default test order default_test_loader = TINCTestLoader() #: A reverse test loader that can be used to load tests in a reverse order reverse_test_loader = TINCTestLoaderReverse() #: A randomized test loader that can be used to load tests in a randomized order randomized_test_loader = TINCTestLoaderRandomized() #: The default tinc logger that should be used by all the tinc components for logging logger = get_logger() ##### constants used across all of TINC #### # for any test for which SkipTest has to be handled separately # use this message prefix. (Example, tests skipped within # ConcurrencyTestCase) _SKIP_TEST_MSG_PREFIX = 'Skipped test within TINC: '
368
9,938
import sys import datetime sys.path.append("../") from libraries import DB class MemberModel: objDB = None table = '' def __init__(self): self.objDB = DB() self.table = 'data_members' def dataHaveNotWon(self): return self.objDB.execute_sql("SELECT * FROM "+ self.table +" WHERE won_on_date IS NULL ORDER BY fullname").fetchall() def data(self): return self.objDB.execute_sql("SELECT * FROM "+ self.table +" ORDER BY fullname").fetchall() def detail(self, pk): sql = "SELECT * FROM "+ self.table +" WHERE id = ?"; return self.objDB.execute_sql(sql, (pk,)).fetchone(); def primary_key(self): query = self.objDB.execute_sql("SELECT id FROM "+ self.table +" ORDER BY id DESC").fetchone() if(query == None): return 1 else: return int(query[0]) + 1 def save(self, fullname, description): # prepare save pk = self.primary_key() created_at = datetime.datetime.now() sql = "INSERT INTO "+ self.table +" VALUES (?, NULL, ?, ?, ?)" # save self.objDB.execute_sql(sql, (created_at, pk, fullname, description)) self.objDB.commit() return True def update_the_winner(self, primary_key, date): sql = "UPDATE "+ self.table +" SET won_on_date = ? WHERE id = ?" self.objDB.execute_sql(sql, (date, primary_key)) self.objDB.commit() return True def delete(self, primary_key): sql = "DELETE FROM "+ self.table +" WHERE id = ?" self.objDB.execute_sql(sql, (primary_key,)) self.objDB.commit() return True
598
9,939
class CAPostalCode: __slots__ = ['postal_code', 'city', 'place_names', 'province'] def __init__( self, postal_code, city, place_names, province ): self.postal_code = postal_code self.city = city self.place_names = place_names self.province = province
184
9,940
# -*- coding: utf-8 -*- from __future__ import absolute_import from .utils import wait_for from selenium.common.exceptions import NoSuchElementException class ContentTestMixin: def should_see_immediately(self, text, **kwargs): """ Assert that DOM contains the given text. """ return self.find_text(text, **kwargs) @wait_for def should_see(self, text, **kwargs): """ Wait for text to appear before testing assertion. """ return self.should_see_immediately(text, **kwargs) def should_not_see(self, text, **kwargs): """ Wait for text to not appear before testing assertion. """ self.assertRaises( NoSuchElementException, self.should_see, text, **kwargs) @wait_for def title_should_be(self, title, **kwargs): """ Assert that page title matches. """ self.assertEqual(self.browser.title, title) def title_should_not_be(self, title, **kwargs): """ Assert when page title does not match. """ self.assertRaises( AssertionError, self.title_should_be, title, **kwargs) @wait_for def title_should_contain(self, text, **kwargs): """ Assert that page title contains text. """ self.assertIn(text, self.browser.title) def title_should_not_contain(self, text, **kwargs): """ Assert that page title does not contain text. """ self.assertRaises( AssertionError, self.title_should_contain, text, **kwargs) @wait_for def source_should_contain(self, text, **kwargs): """ Assert that page source contains text. """ self.assertIn(text, self.browser.page_source) def source_should_not_contain(self, text, **kwargs): """ Assert that page source does not contain text. """ self.assertRaises( AssertionError, self.source_should_contain, text, **kwargs)
730
9,941
# Copyright (c) The Diem Core Contributors # SPDX-License-Identifier: Apache-2.0 import typing from enum import Enum class DiemCurrency(str, Enum): XUS = "XUS" class FiatCurrency(str, Enum): USD = "USD" EUR = "EUR" GBP = "GBP" CHF = "CHF" CAD = "CAD" AUD = "AUD" NZD = "NZD" JPY = "JPY" Currencies = typing.Union[FiatCurrency, DiemCurrency] DEFAULT_DIEM_CURRENCY = DiemCurrency.XUS.value
195
9,942
from chill import * source('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/jacobi-1d/kernel.c') destination('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/experiments/jacobi-1d/tmp_files/5094.c') procedure('kernel_jacobi_1d') loop(0) known(' n > 2 ') tile(0,2,16,2) tile(1,2,16,2)
164
9,943
# coding=utf-8 # Copyright 2019 The TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for rockyou dataset module.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow_datasets import testing from tensorflow_datasets.structured import rock_you class RockYouTest(testing.DatasetBuilderTestCase): DATASET_CLASS = rock_you.RockYou SPLITS = { "train": 11, } DL_EXTRACT_RESULT = "rockyou.txt" if __name__ == "__main__": testing.test_main()
316
9,944
from fastapi import FastAPI from fastapi.responses import StreamingResponse from io import BytesIO from consul import Consul import httpx from opentelemetry.instrumentation.fastapi import FastAPIInstrumentor from opentelemetry import trace from opentelemetry.exporter import zipkin from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import BatchExportSpanProcessor trace.set_tracer_provider(TracerProvider()) zipkin_exporter = zipkin.ZipkinSpanExporter( service_name="image-service", # host_name="localhost", # port=9411, # endpoint='/api/v2/spans' ) trace.get_tracer_provider().add_span_processor( BatchExportSpanProcessor(zipkin_exporter) ) tracer = trace.get_tracer(__name__) app = FastAPI() client = httpx.AsyncClient() consul = Consul(host='172.22.0.21', port=8500) @app.on_event("startup") async def startup_event(): service = Consul.Agent.Service(consul.agent) service.register("FastAPI", port=3002) @app.on_event("shutdown") def shutdown_event(): service = Consul.Agent.Service(consul.agent) service.deregister("FastAPI") @app.get("/") async def root(): return {"message": "Hello World"} @app.get("/random") async def random_image(): r = await client.get("https://source.unsplash.com/random") image = BytesIO(r.content) return StreamingResponse(content=image, media_type='image/jpeg') FastAPIInstrumentor.instrument_app(app)
512
9,945
from core.rgb_spectrum import RGBSpectrum def XYZToRGB(x: float, y: float, z: float)->(float, float, float): return 3.240479*x - 1.537150*y - 0.498535*z, -0.969256*x + 1.875991*y + 0.041556*z, 0.055648*x - 0.204043*y + 1.057311*z def RGBToXYZ(r: float, g: float, b: float)->(float, float, float): return 0.412453*r + 0.357580*g + 0.180423*b, 0.212671*r + 0.715160*g + 0.072169*g,0.019334*r + 0.119193*g + 0.950227*b def y(self)->float: raise NotImplemented Spectrum = RGBSpectrum #else: # Spectrum = SampledSpectrum
279
9,946
# -*- coding: utf-8 -*- REQUEST_VAR_REGION = 'region' REQUEST_VAR_CT = 'content_type' REQUEST_VAR_ID = 'content_id' SPLIT_CHUNKS_EVERY = 14 #: the format of the cache key, to be filled so that storing and deleting #: rendered regions can take place. # RENDERED_CACHE_KEY = 'editregions_rendered_{content_type_id}_{content_id}_{region}'
134
9,947
class MeshMixin(object): msg_lace_search_missing = "{} is implemented in `lace-search` which is not installed and not yet publicly avaliable" def compute_aabb_tree(self): try: from lace_search.aabb_tree import AabbTree except ImportError: raise NotImplementedError(self.msg_lace_search_missing.format('compute_aabb_tree')) return AabbTree(self.v, self.f) def compute_aabb_normals_tree(self): try: from lace_search.aabb_normals_tree import AabbNormalsTree except ImportError: raise NotImplementedError(self.msg_lace_search_missing.format('compute_aabb_normals_tree')) return AabbNormalsTree(self) def compute_closest_point_tree(self): try: from lace_search.closest_point_tree import ClosestPointTree except ImportError: raise NotImplementedError(self.msg_lace_search_missing.format('compute_closest_point_tree')) return ClosestPointTree(self) def closest_vertices(self, vertices): return self.compute_closest_point_tree().nearest(vertices) def closest_points(self, vertices): return self.closest_faces_and_points(vertices)[1] def closest_faces_and_points(self, vertices): return self.compute_aabb_tree().nearest(vertices) def vertices_within(self, vertex_or_vertices, radius): tree = self.compute_closest_point_tree() return tree.vertices_within(vertex_or_vertices, radius)
618
9,948
""" WSGI config for es_api project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'es_api.settings') application = get_wsgi_application()
133
9,949
#!/usr/bin/env python # # Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This example updates a product's notes. To determine which products exist, run get_all_products.py. """ # Import appropriate modules from the client library. from googleads import dfp PRODUCT_ID = 'INSERT_PRODUCT_ID_HERE' def main(client, product_id): # Initialize appropriate service. product_service = client.GetService('ProductService', version='v201711') # Create statement object to select a single product by an ID. statement = (dfp.StatementBuilder() .Where('id = :id') .WithBindVariable('id', long(product_id)) .Limit(1)) # Get products by statement. response = product_service.getProductsByStatement(statement.ToStatement()) if 'results' in response and len(response['results']): # Update each local product object by changing its notes. updated_products = [] for product in response['results']: product['notes'] = 'Product needs further review before activation.' updated_products.append(product) # Update products remotely. products = product_service.updateProducts(updated_products) # Display results. if products: for product in products: print ('Product with id "%s", name "%s", and ' 'notes "%s" was updated.' % (product['id'], product['name'], product['notes'])) else: print 'No products were updated.' else: print 'No products found to update.' if __name__ == '__main__': # Initialize client object. dfp_client = dfp.DfpClient.LoadFromStorage() main(dfp_client, PRODUCT_ID)
695
9,950
from setuptools import setup def readme(): with open('README.md') as f: README = f.read() return README setup( name="color-harmony", version="1.0.0", description="A Python package to get color harmonies of a given image.", long_description=readme(), long_description_content_type="text/markdown", url="https://github.com/profnote/ColorHarmony", author="Niti Wattanasirichaigoon", author_email="niti.wattanasirichaigoon@gmail.com", license="MIT", classifiers=[ "License :: OSI Approved :: MIT License", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.7", ], packages=["ColorHarmony"], include_package_data=True, install_requires=["pillow", "scikit-learn", "matplotlib"], entry_points={ "console_scripts": [ "color-harmony=ColorHarmony.colorHarmony:main", ] }, )
374
9,951
from __future__ import absolute_import, division, print_function from tests.core import mock from trakt import Trakt from httmock import HTTMock import pytest def test_likes(): with HTTMock(mock.fixtures, mock.unknown): with Trakt.configuration.auth('mock', 'mock'): likes = Trakt['users'].likes() assert likes is not None likes = list(likes) assert len(likes) == 3 assert likes[0].keys == [ ('trakt', 1519) ] assert likes[1].keys == [ ('trakt', '1238362'), ('slug', 'star-wars-machete') ] assert likes[2].keys == [ ('trakt', '840781'), ('slug', 'star-wars-timeline') ] def test_likes_invalid_response(): with HTTMock(mock.fixtures, mock.unknown): likes = Trakt['users'].likes() assert likes is not None likes = list(likes) assert len(likes) == 0 def test_likes_invalid_type(): with HTTMock(mock.fixtures, mock.unknown): with pytest.raises(ValueError): likes = Trakt['users'].likes('invalid') assert likes is not None likes = list(likes)
512
9,952
""" Дана строка, состоящая из слов, разделенных пробелами. Определите, сколько в ней слов. Формат ввода Вводится строка. Формат вывода Выведите ответ на задачу. """ s = input() print(s.count(' ')+1)
191
9,953
"""RefreshMapProcessor: refresh sections in map """ # pylint: disable=C0116,R0903,E0401,W0703,W1201,redefined-outer-name,missing-function-docstring,E0401,C0114,W0511,W1203,C0200,C0103,W1203 import logging import os VERSION = "__version__ = " ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) # This is your Project Root class HelpProcessor: """HelpProcessor""" def __init__(self, supported_processor): self.supported_processor = supported_processor @property def get_version(self): """read file and return the version""" change_log_relative_path = "../../changelog.md" change_log_path = os.path.abspath( os.path.join(ROOT_DIR, change_log_relative_path) ) try: with open(change_log_path, mode="r", encoding="UTF-8") as file_change_log: txt = file_change_log.readlines() version = max(sorted(filter(lambda f: VERSION in f, txt))) logging.info(f"v. {version}") return version.strip() except FileNotFoundError: logging.warning(f"skipping {change_log_path}") return None def process(self): """Get version.""" logging.warning(self.supported_processor) return self.get_version
568
9,954
# -*- coding: utf-8 -*- import sys import unittest import json as JSON sys.path.append('.') from sms_api.altiria_client import AltiriaClient from sms_api.altiria_client import AltiriaModelTextMessage from sms_api.exceptions import AltiriaGwException from sms_api.exceptions import ConnectionException from sms_api.exceptions import JsonException from tests import Config class TestGetCreditAuth(unittest.TestCase,Config): # The login parameter is missed. def test_error_no_login(self): if Config.debug: print('test_error_no_login') try: client = AltiriaClient(None, Config.password) client.getCredit() self.fail('JsonException should have been thrown') except JsonException as je: self.assertEqual('LOGIN_NOT_NULL', je.message) # The password parameter is missed. def test_error_no_password(self): if Config.debug: print('test_error_no_password') try: client = AltiriaClient(Config.login, None) client.getCredit() self.fail('JsonException should have been thrown') except JsonException as je: self.assertEqual('PASSWORD_NOT_NULL', je.message) if __name__ == '__main__': unittest.main()
531
9,955
import pytest from fastapi import status from app.tests.conftest import AuthClient from app.users.models import User @pytest.mark.anyio async def test_me(auth_client: AuthClient, user: User): response = await auth_client.get("/users/me/", user=user) assert response.status_code == status.HTTP_200_OK data = response.json() assert data["username"] == user.username assert data["uuid"] == str(user.uuid)
143
9,956
import Algorithmia input = { "username": "elonmusk", "auth": { "app_key": "xxxxxxxxxx", "app_secret": "xxxxxxxxxx", "oauth_token": "xxxxxxxxxx", "oauth_token_secret": "xxxxxxxxxx" } } client = Algorithmia.client('YOUR_API_KEY') algo = client.algo('twitter/RetrieveTwitterFollowers/0.1.0') print(algo.pipe(input))
145
9,957
from django import template from django.template.defaultfilters import safe import mistune from pygments import highlight from pygments.lexers import get_lexer_by_name from ..models import Page, Link register = template.Library() @register.simple_tag def get_main_page(*args, **kwargs): return Page.objects.get(is_mainpage=True) @register.simple_tag def get_pages(*args, **kwargs): return Page.objects.all() @register.simple_tag def get_links(*args, **kwargs): return Link.objects.all() @register.simple_tag def has_perm(user, perm): return user.has_perm(perm) @register.simple_tag def is_app_installed(app): from django.apps import apps return apps.is_installed(app) class HighlightRenderer(mistune.Renderer): def block_code(self, code, lang): if not lang: return '\n<pre><code>%s</code></pre>\n' % \ mistune.escape(code) lexer = get_lexer_by_name(lang, stripall=True) formatter = HtmlFormatter() return highlight(code, lexer, formatter) @register.filter def hilight(value): renderer = HighlightRenderer() markdown = mistune.Markdown(renderer=renderer) print(markdown(value)) return markdown(value) @register.simple_tag def generate_twitter_card(site, title, description, img_url='', creator='', is_large_image=False): if is_large_image and img_url != '': card = '<meta name="twitter:card" content="summary_large_image" />\n' else: card = '<meta name="twitter:card" content="summary" />\n' card += ('<meta name="twitter:title" content="{0}" />\n' '<meta name="twitter:description" content="{1}" />\n').format(site, title, description) if img_url != '': card += '<meta name="twitter:image" content="{}" />\n'.format(img_url) return safe(card)
701
9,958
# -*- coding: utf-8 -*- from collections import defaultdict import unittest import wikipediaapi from mock_data import wikipedia_api_request class TestCategories(unittest.TestCase): def setUp(self): self.wiki = wikipediaapi.Wikipedia("en") self.wiki._query = wikipedia_api_request def test_categories_count(self): page = self.wiki.page('Test_1') self.assertEqual(len(page.categories), 3) def test_categories_titles(self): page = self.wiki.page('Test_1') self.assertEqual( list(sorted(map(lambda s: s.title, page.categories.values()))), ['Category:C' + str(i + 1) for i in range(3)] ) def test_categories_nss(self): page = self.wiki.page('Test_1') self.assertEqual( list(sorted(map(lambda s: s.ns, page.categories.values()))), [14] * 3 )
398
9,959
import setuptools with open("README.md", "r") as fh: long_description = fh.read() print(setuptools.find_packages()) setuptools.setup(name='PostOCR', version='0.1', description='Tkinter based GUI for detection and correction of OCR errors', url='https://github.com/naiveHobo/PostOCR.git', author='naiveHobo', author_email='sarthakmittal2608@gmail.com', license='MIT', long_description=long_description, long_description_content_type="text/markdown", packages=setuptools.find_packages(), install_requires=[ 'Pillow', 'pdfplumber', 'PyPDF2', 'pytesseract', 'tensorflow-gpu' ], zip_safe=False)
509
9,960
#!/usr/bin/env python # encoding: utf-8 ''' Test program for simple NeoPixelBase This script shows the simplest way to initialize an led strip, rendering the tricolore. @author: MBizm @copyright: 2019 organization_name. All rights reserved. @license: Apache License 2.0 @deffield created: December 2019 @deffield updated: Updated ''' from catatumbo.core.neopixel_base import NeoPixelBase from catatumbo.core.util.cmd_functions import cmd_options from catatumbo.core.neopixel_colors import NeoPixelColors ######################################## # MAIN # ######################################## if __name__ == '__main__': # interpret cmd line arguments opts = cmd_options(NeoPixelBase.__version__, NeoPixelBase.__updated__, par = "simple") np = NeoPixelBase(pixelpin = opts.port, pixelnum = int(opts.len), pixelorder = opts.schema, color_schema = NeoPixelColors, brightness = float(opts.bright)) # preset sampleboard - Viva la France! sampleboard = (NeoPixelColors.W_BLUE, NeoPixelColors.W_WHITE, NeoPixelColors.W_RED) np.setPixelBySampleboard(sampleboard)
638
9,961
import pusher import pusher.aiohttp import asyncio def main(): client = pusher.Pusher.from_env( backend=pusher.aiohttp.AsyncIOBackend, timeout=50 ) print("before trigger") response = yield from client.trigger("hello", "world", dict(foo='bar')) print(response) asyncio.get_event_loop().run_until_complete(main())
152
9,962
from tqdm import tqdm import torch import torch.nn.functional as F ## Model training function def train(model, device, train_loader, optimizer, train_acc, train_losses): model.train() pbar = tqdm(train_loader) correct = 0 processed = 0 for batch_idx, (data, target) in enumerate(pbar): data = data["image"].to(device) target = target.to(device) optimizer.zero_grad() y_pred = model(data) # loss = F.nll_loss(y_pred, target) loss = F.cross_entropy(y_pred, target) train_losses.append(loss.item()) # Backpropagation loss.backward() optimizer.step() pred = y_pred.argmax(dim=1, keepdim=True) correct += pred.eq(target.view_as(pred)).sum().item() processed += len(data) pbar.set_description( desc=f'Loss={loss.item()} Batch_id={batch_idx} Accuracy={100*correct/processed:0.2f}') train_acc.append(100*correct/processed) def test(model, device, test_loader, test_acc, test_losses): model.eval() test_loss = 0 correct = 0 with torch.no_grad(): for data, target in test_loader: data, target = data["image"].to(device), target.to(device) output = model(data) test_loss += F.cross_entropy(output, target, reduction='sum').item() pred = output.argmax(dim=1, keepdim=True) correct += pred.eq(target.view_as(pred)).sum().item() test_loss /= len(test_loader.dataset) test_losses.append(test_loss) print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format( test_loss, correct, len(test_loader.dataset), 100. * correct / len(test_loader.dataset))) test_acc.append(100. * correct / len(test_loader.dataset))
827
9,963
# pylint: disable=C0111,R0902,R0904,R0912,R0913,R0915,E1101 # Smartsheet Python SDK. # # Copyright 2018 Smartsheet.com, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"): you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import absolute_import from .cell import Cell from .user import User from ..types import * from ..util import serialize from ..util import deserialize class CellHistory(Cell): """Smartsheet CellHistory data model.""" def __init__(self, props=None, base_obj=None): """Initialize the CellHistory model.""" super(CellHistory, self).__init__(None, base_obj) self._base = None if base_obj is not None: self._base = base_obj self._modified_at = Timestamp() self._modified_by = TypedObject(User) if props: deserialize(self, props) self.__initialized = True @property def modified_at(self): return self._modified_at.value @modified_at.setter def modified_at(self, value): self._modified_at.value = value @property def modified_by(self): return self._modified_by.value @modified_by.setter def modified_by(self, value): self._modified_by.value = value def to_dict(self): return serialize(self) def to_json(self): return json.dumps(self.to_dict()) def __str__(self): return self.to_json()
688
9,964
from django.core.files.storage import FileSystemStorage from django.http import HttpResponse class ImageStorage(FileSystemStorage): from django.conf import settings def __init__(self, location=settings.MEDIA_ROOT, base_url=settings.MEDIA_URL): # 初始化 super(ImageStorage, self).__init__(location, base_url) # 重写 _save方法 def _save(self, name, content): import os, random, time # 文件扩展名 ext = os.path.splitext(name)[1] # 文件目录 d = os.path.dirname(name) # 定义文件名,年月日时分秒随机数 fn = time.strftime('%Y%m%d_%H%M%S') fn = fn + '_%d' % random.randint(0,1000) # 重写合成文件名 name = os.path.join(d, fn + ext) # 调用父类方法 return super(ImageStorage, self)._save(name, content)
440
9,965
import redis db = redis.StrictRedis(host='127.0.0.1', port=6379, decode_responses=True) f = open('./movieName.txt', 'w') # parsing movie name # for tittle in db.sscan_iter('movieName'): # if 'Season' in tittle or 'Volume' in tittle or 'Vol.' in tittle: # continue # name = tittle.strip() # name = name.split('(')[0] # name = name.strip() # name = name.strip('VHS') # name = name.strip('[VHS]') # name = name.strip() # f.write(name + '\n') while(db.scard('movieName') > 0): tittle = db.spop('movieName') if 'Season' in tittle or 'Volume' in tittle or 'Vol.' in tittle: continue name = tittle.strip() name = name.split('(')[0] name = name.strip() name = name.strip('VHS') name = name.strip('[VHS]') name = name.strip() if tittle == name: f.write(name + '\n') else: db.sadd('movieName', name) f.close()
412
9,966
import csv import os from multiprocessing.dummy import Pool from urllib import request # dataset can be found here: https://github.com/openimages/dataset # https://storage.googleapis.com/openimages/2017_11/images_2017_11.tar.gz IMAGES = 10000 PATH = "images/" def download(url): filename = url[url.rfind("/") + 1:] if os.path.isfile(os.path.join(PATH, filename)): print("File {} already exists".format(filename)) return print("Downloading {}".format(filename)) try: data = request.urlopen(url).read() with open(os.path.join(PATH, filename), "wb") as f: f.write(data) print("Downloaded {}".format(filename)) except: print("Error {}".format(filename)) with open("images.csv") as csvfile: reader = csv.reader(csvfile, delimiter=',') urls = [] i = 0 for row in reader: if i >= IMAGES: break url = row[2] if not "http" in url: continue urls.append(url) i += 1 pool = Pool() pool.map(download, urls) pool.close() pool.join()
476
9,967
def bubble_sort(alist): n = len(alist) for j in range(0, n-1): # j表示需要n-1次冒泡过程, i表示一次冒泡过程中的交换 count = 0 for i in range(0, n-j-1): # 只需要到列表的n-1位置,因为从0开始正好range结尾是闭区间,所以只需要-1 # 比如总共9个元素,需要遍历到第八个位置,让其跟8+1去比较,第八个元素的索引是7,所以range(0, 8) if alist[i] > alist[i+1]: alist[i], alist[i+1] = alist[i+1], alist[i] count += 1 """ 每次冒泡的过程count都为0来记录这次冒泡过程中是否有交换发生 如果只要有一次冒泡过程中没有交换发生直接结束循环,表示前面的都是有序的 [1,2,3,9,8]第三次循环的时候发现count没变,直接return """ if 0 == count: """ break, continue, return的区别主要表现在终止循环的层数上面 break终止的是当前循环层,因为这里正好处于最外层的for j循环中,所以下面的return可以被break替换 return终止所有循环层,表示此函数终止 continue跳过本次循环 """ return
942
9,968
from django.db import models from django.utils import timezone from django.contrib.auth.models import User class Post(models.Model): STATUS_CHOICES = ( ('draft', 'Draft'), ('published', 'Published'), ) title = models.CharField(max_length=250) slug = models.SlugField(max_length=250, unique_for_date='publish') author = models.ForeignKey(User, on_delete=models.CASCADE, related_name='blog_post') body = models.TextField() publish = models.DateTimeField(default=timezone.now) created = models.DateTimeField(auto_now_add=True) updated = models.DateTimeField(auto_now=True) status = models.CharField(max_length=10, choices=STATUS_CHOICES, default='draft') class Meta: ordering = ('-publish',) def __str__(self): return self.title
301
9,969
#!/usr/bin/env python3 """ AT Commander for AUCTUS based radios Allow for communication with AUCTUS A6 radios to their serial interface through the debug interface. Commands can either be "AT" commands or "CPS" commands. Both styles will work. """ from a6 import get_chan_info, SerialIO __author__ = "jhart99" __license__ = "MIT" if __name__ == "__main__": import argparse parser = argparse.ArgumentParser(description='Auctus A6 ATECPS commander') parser.add_argument('-p', '--port', default='/dev/ttyUSB0', type=str, help='serial port') parser.add_argument('-b','--baudrate', default=921600, type=int, help='baud rate') parser.add_argument('-v','--verbosity', default=0, action='count', help='print sent and received frames to stderr for debugging') parser.add_argument('-V', '--version', action='version', version='%(prog)s 0.0.1', help='display version information and exit') parser.add_argument('channel', type=int, help='channel number') args = parser.parse_args() uart = SerialIO(args.port, args.baudrate, args.verbosity) get_chan_info(args.channel)
490
9,970
# Generated by Django 3.2.7 on 2021-10-29 07:22 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('projectpost', '0018_postmodel_language_junre'), ] operations = [ migrations.CreateModel( name='questionModel', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('room', models.CharField(blank=True, max_length=40, null=True)), ('question', models.TextField()), ('profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='profileOf', to='projectpost.usermodel')), ], ), migrations.DeleteModel( name='postModel', ), ]
383
9,971
#!/usr/bin/env python import rospy from std_msgs.msg import UInt32MultiArray <<<<<<< HEAD Id = UInt32MultiArray() #tableau des identifiant present sur l'image def markers(msg) for i in msg.data: Id.data.append(i) rospy.init_node("Piece_presente") rospy.Subscriber("/aruco_marker_publisher/markers_list",UInt32MultiArray,markers,queue_size = 20) ======= Id = UInt32MultiArray() #tableau des identifiant present sur l'image def markers(msg): Id.data = [] # effacer le tableau a chaque call for i in msg.data: Id.data.append(i) rospy.init_node("Piece_presente") rospy.Subscriber("/aruco_marker_publisher/markers_list",UInt32MultiArray,markers,queue_size = 10) >>>>>>> Najib pub = rospy.Publisher("/Pieces_presentes",UInt32MultiArray,queue_size = 10) rate = rospy.Rate(10) while not rospy.is_shutdown(): pub.publish(Id) rate.sleep()
347
9,972
from . import models from rest_framework import serializers from officialWebsite.users.serializers import UserSerializer class PodcastGuestLinkSerializer(serializers.ModelSerializer): # This serializer is for guest link class Meta: model = models.PodcastGuestLink fields = ('id', 'link', 'link_type') read_only_fields = ('id',) class PodcastGuestModelSerializer(serializers.ModelSerializer): # This is podcast model serializer. # guest links links = PodcastGuestLinkSerializer(many=True, read_only=True) class Meta: model = models.PodcastGuest fields = ('id', 'name', 'organisation', 'linkedin', 'about', 'image', 'links') read_only_fields = ('id',) class PodcastSerializer(serializers.ModelSerializer): # Main podcast serializer guest = PodcastGuestModelSerializer(read_only=True) members = UserSerializer(read_only=True, many=True) embed = serializers.SerializerMethodField('get_embed') def get_embed(self, obj): try: link = obj.link url_elements = link.split('/') # add 'embed' at 4th index url_elements.insert(4, 'embed') url_elements[1] = "/" embed = '/'.join(str(element) for element in url_elements) return embed except: return obj.link class Meta: model = models.Podcast fields = ('id', 'number', 'name', 'guest', 'members', 'recorded_on', 'date_created', 'published', 'image', 'link', 'embed') read_only_fields = ('id',) class PodcastSeriesSerializer(serializers.ModelSerializer): # series serializer hosted = UserSerializer(read_only=True, many=True) podcasts = PodcastSerializer(many=True) # rename hosted to members class Meta: model = models.PodcastSeries fields = ('id', 'name', 'hosted', 'note', 'logo', 'link', 'podcasts') read_only_fields = ('id',)
763
9,973
''' Pattern Enter number of rows: 5 56789 4567 345 23 1 ''' print('Number Pattern: ') number_rows=int(input('Enter number of rows: ')) for row in range(number_rows-1,-1,-1): k=row for column in range(0,row+1): k+=1 if k<10: print(f'0{k}',end=' ') else: print(k,end=' ') print()
159
9,974
import _plotly_utils.basevalidators class XanchorValidator(_plotly_utils.basevalidators.EnumeratedValidator): def __init__(self, plotly_name="xanchor", parent_name="layout.image", **kwargs): super(XanchorValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "arraydraw"), values=kwargs.pop("values", ["left", "center", "right"]), **kwargs )
215
9,975
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class SyncGroupSchemaTableColumn(Model): """Properties of column in sync group table. :param quoted_name: Quoted name of sync group table column. :type quoted_name: str :param data_size: Data size of the column. :type data_size: str :param data_type: Data type of the column. :type data_type: str """ _attribute_map = { 'quoted_name': {'key': 'quotedName', 'type': 'str'}, 'data_size': {'key': 'dataSize', 'type': 'str'}, 'data_type': {'key': 'dataType', 'type': 'str'}, } def __init__(self, quoted_name=None, data_size=None, data_type=None): super(SyncGroupSchemaTableColumn, self).__init__() self.quoted_name = quoted_name self.data_size = data_size self.data_type = data_type
416
9,976
from numpy import abs, nanmax, nanmean, nanmin, nanstd, nanvar, sqrt def rmse(model, ref): """ root mean square error. Ideal value is zero model = numerical solution of shape M by N ref = analytical solution or observations of shape M by N returns rmse """ e = model - ref e2 = e ** 2 mse = nanmean(e2) rmse = sqrt(mse) return rmse def nrmse(model, ref): """ normalized (by data range) root mean square error. Ideal value is zero model = numerical solution of shape M by N ref = analytical solution or observations of shape M by N returns rmse """ _rmse = rmse(model, ref) _range = nanmax(model) - nanmin(model) _nrmse = _rmse / _range return _nrmse def r2(model, ref): """ coefficient of determination. Ideal value is 1 model = nuemrical solution of shape M by N ref = analytical solution or observation of shape M by N returns r2 """ e = model - ref e2 = e ** 2 mse = nanmean(e2) var_ref = nanvar(ref) r2 = 1 - mse / var_ref return r2 def r(model, ref): """ Pearson correlation coefficient. model = nuemrical solution of shape M by N ref = analytical solution or observation of shape M by N returns r """ mod_res = model - nanmean(model) ref_res = ref - nanmean(ref) mod_sqr_res = mod_res ** 2 ref_sqr_res = ref_res ** 2 numerator = nanmean(mod_res * ref_res) denominator = sqrt(nanmean(mod_sqr_res) * nanmean(ref_sqr_res)) r = numerator / denominator return r def SI(model, ref): """ Scatter index. model = nuemrical solution of shape M by N ref = analytical solution or observation of shape M by N returns SI """ e = model - ref std_e = nanstd(e) mean_abs_ref = nanmean(abs(ref)) SI = std_e / mean_abs_ref return SI def bias(model, ref): """ bias model = numerical solution of shape M by N ref = analytical solution of shape M by N returns bias """ e = model - ref mean_e = nanmean(e) return mean_e def nb(model, ref): """ Normalzied bias. model = nuemrical solution of shape M by N ref = analytical solution or observation of shape M by N returns nb """ e = model - ref mean_e = nanmean(e) mean_abs_ref = nanmean(abs(ref)) nb = mean_e / mean_abs_ref return nb
940
9,977
# -*- coding: utf-8 -*- import os from flask import Blueprint blog_blueprint = Blueprint('blog', __name__, # template_folder='../../templates/blog' template_folder=os.path.join( os.path.pardir, os.path.pardir, 'templates', 'blog'), url_prefix='/blog') # 这里导入views,纯粹是为了将views中的路由暴露出来,这样外部只导入该文件就可以了, # 导入errors的原因同理 from . import views, errors
334
9,978
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.lib.services.network import base class SecurityGroupsClient(base.BaseNetworkClient): def create_security_group(self, **kwargs): """Creates an OpenStack Networking security group. For a full list of available parameters, please refer to the official API reference: http://developer.openstack.org/api-ref/networking/v2/index.html#create-security-group """ uri = '/security-groups' post_data = {'security_group': kwargs} return self.create_resource(uri, post_data) def update_security_group(self, security_group_id, **kwargs): """Updates a security group. For a full list of available parameters, please refer to the official API reference: http://developer.openstack.org/api-ref/networking/v2/index.html#update-security-group """ uri = '/security-groups/%s' % security_group_id post_data = {'security_group': kwargs} return self.update_resource(uri, post_data) def show_security_group(self, security_group_id, **fields): """Shows details for a security group. For a full list of available parameters, please refer to the official API reference: http://developer.openstack.org/api-ref/networking/v2/index.html#show-security-group """ uri = '/security-groups/%s' % security_group_id return self.show_resource(uri, **fields) def delete_security_group(self, security_group_id): """Deletes an OpenStack Networking security group. For a full list of available parameters, please refer to the official API reference: http://developer.openstack.org/api-ref/networking/v2/index.html#delete-security-group """ uri = '/security-groups/%s' % security_group_id return self.delete_resource(uri) def list_security_groups(self, **filters): """Lists OpenStack Networking security groups. For a full list of available parameters, please refer to the official API reference: http://developer.openstack.org/api-ref/networking/v2/index.html#list-security-groups """ uri = '/security-groups' return self.list_resources(uri, **filters)
1,022
9,979
""" This file contains all experiments of the supplement. """ import numpy as np from X_corr import get_x_corr_params import X_corr_seita from plot_approx_errors import plot_fig4 import torch learn_X_corr = False learn_X_corr_seita = True if learn_X_corr_seita: x_max = 10 # end points for sampling n_points = 4000 # number of grid points used (=2*n_points+1) lam = 10 x_corr_filename = False normal_sigma = 1.0 x_corr_df_seita = X_corr_seita.create_table(x_max=x_max, n_points=n_points,\ sigma=normal_sigma, print_to_file=x_corr_filename, lam=lam) if learn_X_corr: np.random.seed(123) torch.manual_seed(123) T = 20000 x_max = 10 n_points = 1000 all_C = [1.0, 1.5, 1.75, 2.0] lr = 1e-2 for C in all_C: path_to_file = './X_corr/supplement_test_x_corr_params_C{}.pickle'.format(np.round(C,2)) get_x_corr_params(x_max=x_max, n_points=n_points,C=C, lr=lr, T=T, path_to_file=path_to_file) # Note, it takes a while to run above """ else: try : plot_fig4('./results/approx_error_res.pickle') except : plot_fig4() from plot_supplement_GMM_appr import done from plot_supplement_cdfs import done from plot_supplement_seita_pdf import done """
508
9,980
# import openshift as oc from .ocUtils import applyStage from os import environ, getenv, path from .utils import buildStageAssets, removeTrailSlash, isBinAvailableInPath from .notify import missingStage, missingConfigurationFile, ocBinaryMissingFromPath import yaml from .constants import OCPEASY_CONFIG_NAME def deployStage(stageId: str, proxy: str = None): if not isBinAvailableInPath("oc"): return ocBinaryMissingFromPath() buildStageAssets(stageId, proxy) PREFIX_PROJECT_ROOT = environ.get("PROJECT_DEV_PATH", ".") # TODO: define read ocpeasyConfig function projectDevPath = getenv("PROJECT_DEV_PATH", None) pathProject = "." if not projectDevPath else removeTrailSlash(projectDevPath) ocpPeasyConfigPath = f"{pathProject}/{OCPEASY_CONFIG_NAME}" if path.isfile(ocpPeasyConfigPath): with open(ocpPeasyConfigPath) as ocpPeasyConfigFile: deployConfigDict = yaml.load(ocpPeasyConfigFile, Loader=yaml.FullLoader) globalValues = dict(deployConfigDict) stage = next( (x for x in globalValues.get("stages") if x.get("stageId") == stageId), None, ) if stage: applyStage( stage.get("projectId"), f"{PREFIX_PROJECT_ROOT}/.ocpeasy/{stageId}" ) else: return missingStage() else: return missingConfigurationFile()
615
9,981
# solveODE.py # ----------------------------------------------------------------------------- # Solution of ODE for harmonic oscillator. # ----------------------------------------------------------------------------- import numpy as np import matplotlib.pyplot as plt from scipy.integrate import odeint # Import function to integrate: from simple_oscillator import F # array of time values to study t_min = 0; t_max = 10; dt = 0.1 t = np.arange(t_min, t_max+dt, dt) # two sets of initial conditions initial_conditions = [ (1.0, 0.0), (0.0, 1.0) ] plt.figure() # Create figure; then, add plots. for y0 in initial_conditions: y = odeint(F, y0, t) plt.plot(t, y[:, 0], linewidth=2) skip = 5 t_test = t[::skip] # compare at a subset of points plt.plot(t_test, np.cos(t_test), 'bo') # exact solution for y0 = (1,0) plt.plot(t_test, np.sin(t_test), 'go') # exact solution for y0 = (0,1) plt.show()
323
9,982
import pytest from main.libs import generate_token from main.models.category import Category def test_create_category_successfully(client, initialize_records): test_db = initialize_records user_id = test_db["users"][0].id new_category = {"name": "item1"} response = client.post( "/categories", json=new_category, headers={"Authorization": f"Bearer {generate_token(user_id)}"}, ) assert response.status_code == 200 assert Category.query.filter_by(name=new_category["name"]).one_or_none() is not None @pytest.mark.parametrize( "category", [ {"name": ""}, {"name": "Dat诶"}, {}, ], ) def test_create_category_with_invalid_data(client, category, initialize_records): test_db = initialize_records user_id = test_db["users"][0].id response = client.post( "/categories", json=category, headers={"Authorization": f"Bearer {generate_token(user_id)}"}, ) assert response.status_code == 400 assert ( Category.query.filter_by( name=category["name"] if "name" in category else None ).one_or_none() is None ) def test_create_category_with_existed_name(client, initialize_records): test_db = initialize_records exist_category = {"name": "category1"} # this category is existed in test_db user_id = test_db["users"][0].id response = client.post( "/categories", json=exist_category, headers={"Authorization": f"Bearer {generate_token(user_id)}"}, ) assert response.status_code == 400 assert Category.query.filter_by(name=exist_category["name"]).one() is not None # error will be raised when there are 2 duplicated category names
687
9,983
from dumpster.registries.base import ModelRegistryBase from dumpster import storage import io import os class ModelRegistry(ModelRegistryBase): def __init__(self, name, bucket): """ Save model to google cloud storage. Parameters ---------- name : str Name of the model. Will be used as unique identifier. bucket : google.cloud.storage.bucket.Bucket Google storage bucket. """ super().__init__(name) self.bucket = bucket def _path(self, key): """ If path path with extension is given, does nothing. If no extension was given. The last path location in the key will be interpreted as directory. Will return `some/directory/<name>.pth` Parameters ---------- key : str Location key in gcp storage Returns ------- key : str """ _, ext = os.path.splitext(key) if len(ext) == 0: return os.path.join(key, f"{self.name}.pth") else: return key def dump(self, key): """ Save model state and source. Parameters ---------- key : str Location key in gcp storage """ storage.write_blob(self._path(key), self.state_blob_f, self.bucket) return self def load(self, key): """ Load model state and source. Parameters ---------- key : str Location key in gcp storage """ f = io.BytesIO() storage.download_blob(self._path(key), f, self.bucket) f.seek(0) self.load_blob(f) return self
793
9,984
# coding: utf8 from common import Keyboard layouts = [] STANDARD_LAYOUT = 'KEYMAP({0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}, {10}, {11}, {12},' # 13 STANDARD_LAYOUT += '{14}, {15}, {16}, {17}, {18}, {19}, {20}, {21}, {22}, {23}, {24}, {25}, {26},' # 13 STANDARD_LAYOUT += '{27}, {28}, {29}, {30}, {31}, {32}, {33}, {34}, {35}, {36}, {37}, {38}, {39},' # 13 STANDARD_LAYOUT += '{40}, {41}, {42}, {43}, {44}, {45}, {46}, {47}, {48}, {49}, {50}, {51}, {52},' # 13 STANDARD_LAYOUT += '{53}, {54}, {55}, {56}, {57}, {58}, {59}, {60}, {61}, {13}),' # 10 layouts.append({'layout':STANDARD_LAYOUT, 'num_keys':62}) keyboard = Keyboard( name='provan', description='ProVan 60 PCB', firmware_folder='provan', layouts=layouts )
360
9,985
#!/usr/bin/env python3 # coding: utf-8 # # ThermalPrinter documentation build configuration file, created by # sphinx-quickstart on Tue Sep 27 11:49:38 2016. # # -- General configuration ------------------------------------------------ # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.intersphinx', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = 'ThermalPrinter' copyright = '2016-2020, Tiger-222' author = 'Tiger-222' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.2.0' # The full version, including alpha/beta/rc tags. release = 'latest' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = 'en' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = [] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'default' # Output file base name for HTML help builder. htmlhelp_basename = 'ThermalPrinterdoc' # -- Options for LaTeX output --------------------------------------------- # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'ThermalPrinter.tex', 'ThermalPrinter Documentation', 'Tiger-222', 'manual'), ] # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'thermalprinter', 'ThermalPrinter Documentation', [author], 1) ] # If true, show URL addresses after external links. man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'ThermalPrinter', 'ThermalPrinter Documentation', author, 'ThermalPrinter', 'One line description of project.', 'Miscellaneous'), ] # -- Options for Epub output ---------------------------------------------- # Bibliographic Dublin Core info. epub_title = project epub_author = author epub_publisher = author epub_copyright = copyright # For external links to standard library intersphinx_mapping = { 'python': ('https://docs.python.org/3', None), }
1,018
9,986
from flask import Flask, render_template, redirect from flask_pymongo import PyMongo import scrape_costa # Create an instance of Flask app = Flask(__name__) # Use PyMongo to establish Mongo connection mongo = PyMongo(app, uri="mongodb://localhost:27017/weather_app") # Route to render index.html template using data from Mongo @app.route("/") def home(): # Find one record of data from the mongo database # @TODO: YOUR CODE HERE! # Return template and data return render_template("index.html", vacation=destination_data) # Route that will trigger the scrape function @app.route("/scrape") def scrape(): # Run the scrape function and save the results to a variable # @TODO: YOUR CODE HERE! # Update the Mongo database using update and upsert=True # @TODO: YOUR CODE HERE! # Redirect back to home page return redirect("/") if __name__ == "__main__": app.run(debug=True)
299
9,987
import numpy as np import shapefile from shapely.geometry import shape import os import time import argparse from hydroDL.data import gridMET from hydroDL.utils import gis from hydroDL import kPath if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('-S', dest='iStart', type=int, default=7000) parser.add_argument('-E', dest='iEnd', type=int, default=7111) parser.add_argument('-R', dest='reMask', type=int, default=True) args = parser.parse_args() iStart = args.iStart iEnd = args.iEnd reMask = args.reMask ncFile = os.path.join(kPath.dirData, 'gridMET', 'etr_1979.nc') shpFile = os.path.join(kPath.dirData, 'USGS', 'basins', 'basinAll_prj.shp') saveDir = os.path.join(kPath.dirData, 'USGS', 'gridMET', 'mask') t, lat, lon = gridMET.readNcInfo(ncFile) sf = shapefile.Reader(shpFile) shapeLst = sf.shapes() recLst = sf.records() siteNoLst = [rec[2] for rec in recLst] if reMask is False: maskLst = [f[:-4] for f in os.listdir(saveDir) if f[-3:] == 'npy'] tempShpLst = list() tempNoLst = list() for shp, siteNo in zip(shapeLst, siteNoLst): if siteNo not in maskLst: tempShpLst.append(shp) tempNoLst.append(siteNo) shapeLst = tempShpLst siteNoLst = tempNoLst if iEnd == 0: # do mask for every basin iEnd = len(siteNoLst) iStart = 0 t0 = time.time() for k in range(iStart, iEnd): t1 = time.time() geog = shape(shapeLst[k]) mask = gis.gridMask(lat, lon, geog) print('basin {} {:.2f}'.format( k, time.time()-t1)) outFile = os.path.join(saveDir, siteNoLst[k]) np.save(outFile, mask) print('total time {}'.format(time.time() - t0)) # """ script to run on ICME # screen # srun --exclusive --time 8:0:0 --pty bash # source activate pytorch # python /home/kuaifang/GitHUB/geolearn/app/waterQual/gridMetMask-job.py -S 1481 -E 1500 # """ # """ script to run on sherlock # app/waterQual/data/slurmScript.py # """
866
9,988
#!/usr/bin/env python # -*- coding: utf-8 -*- import json from alipay.aop.api.constant.ParamConstants import * from alipay.aop.api.domain.InputInvoiceOrderRequest import InputInvoiceOrderRequest class AlipayBossFncGfsettleprodPoinvoicerelateCreateModel(object): def __init__(self): self._input_invoice_order_request = None @property def input_invoice_order_request(self): return self._input_invoice_order_request @input_invoice_order_request.setter def input_invoice_order_request(self, value): if isinstance(value, InputInvoiceOrderRequest): self._input_invoice_order_request = value else: self._input_invoice_order_request = InputInvoiceOrderRequest.from_alipay_dict(value) def to_alipay_dict(self): params = dict() if self.input_invoice_order_request: if hasattr(self.input_invoice_order_request, 'to_alipay_dict'): params['input_invoice_order_request'] = self.input_invoice_order_request.to_alipay_dict() else: params['input_invoice_order_request'] = self.input_invoice_order_request return params @staticmethod def from_alipay_dict(d): if not d: return None o = AlipayBossFncGfsettleprodPoinvoicerelateCreateModel() if 'input_invoice_order_request' in d: o.input_invoice_order_request = d['input_invoice_order_request'] return o
633
9,989
#!/usr/bin/env python import logging import click import click_log from schematic.manifest.commands import manifest as manifest_cli # get manifest commands from schematic.models.commands import model as model_cli # submit manifest commands from schematic.schemas.commands import schema as schema_cli # schema conversion commands from schematic import init as init_cli # schematic initialization commands logger = logging.getLogger() click_log.basic_config(logger) # dict() -> new empty dictionary CONTEXT_SETTINGS = dict(help_option_names=['--help', '-h']) # help options # invoke_without_command=True -> forces the application not to show aids before losing them with a --h @click.group(context_settings=CONTEXT_SETTINGS, invoke_without_command=True) @click_log.simple_verbosity_option(logger) def main(): """ Command line interface to the `schematic` backend services. """ logger.info("Starting schematic...") logger.debug("Existing sub-commands need to be used with schematic.") main.add_command(init_cli) # add init commands main.add_command(manifest_cli) # add manifest commands main.add_command(model_cli) # add model commands main.add_command(schema_cli) # add schema commands if __name__ == '__main__': main()
371
9,990
#!/usr/bin/env python3 # vim: fileencoding=utf-8 expandtab ts=4 nospell # SPDX-FileCopyrightText: 2021 Benedict Harcourt <ben.harcourt@harcourtprogramming.co.uk> # # SPDX-License-Identifier: BSD-2-Clause """Boardgames Voting""" from __future__ import annotations from typing import Any, Dict import gunicorn.app.base # type: ignore from boardgames.wsgi import BGHandler class StandAlone(gunicorn.app.base.Application): # type: ignore options: Dict[str, Any] def __init__(self, options: Dict[str, Any]): self.options = options super().__init__() def load_config(self) -> None: config = { key: value for key, value in self.options.items() if key in self.cfg.settings and value is not None } for key, value in config.items(): self.cfg.set(key.lower(), value) def init(self, parser: Any, opts: Any, args: Any) -> None: pass def load(self) -> BGHandler: return BGHandler() if __name__ == "__main__": _options = { "bind": "127.0.1.3:8888", "workers": 1, } StandAlone(_options).run()
481
9,991
# coding: utf-8 """ Kubernetes No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) OpenAPI spec version: v1.9.4 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import os import sys import unittest import client from client.rest import ApiException from client.models.v1beta1_subject_access_review import V1beta1SubjectAccessReview class TestV1beta1SubjectAccessReview(unittest.TestCase): """ V1beta1SubjectAccessReview unit test stubs """ def setUp(self): pass def tearDown(self): pass def testV1beta1SubjectAccessReview(self): """ Test V1beta1SubjectAccessReview """ # FIXME: construct object with mandatory attributes with example values #model = client.models.v1beta1_subject_access_review.V1beta1SubjectAccessReview() pass if __name__ == '__main__': unittest.main()
366
9,992
import os import sqlite3 def clear_webauthn_entries(): conn = sqlite3.connect('/home/damian/Documents/Educational/MEng/webauthn-firewall/webauthn-firewall.db') conn.execute('delete from webauthn_entries ;') conn.commit() conn.close() def main(): for n_users in range(1, 30 + 1): clear_webauthn_entries() os.system('python3.6 test_gogs.py {}'.format(n_users)) if __name__ == '__main__': main()
189
9,993
class Solution(object): def topKFrequent(self, nums, k): """ :type nums: List[int] :type k: int :rtype: List[int] """ # from collections import Counter # import heapq # cnt = Counter(nums) # top_k = heapq.nlargest(k, cnt.values()) # result = [] # for key in cnt.keys(): # if cnt[key] in top_k: # result.append(key) # return result max_num = max(nums) min_num = min(nums) count = [0] * (max_num - min_num + 1) for n in nums: count[n - min_num] += 1 m = {} for index, c in enumerate(count): if c in m: m[c].append(index + min_num) else: m[c] = [index + min_num] index = 0 result = [] for c in range(max(count), -1, -1): if index >= k: return result if c in m: result += m[c] index += len(m[c])
709
9,994
from __future__ import division, print_function # coding=utf-8 import sys import os import glob import re from pathlib import Path # Import fast.ai Library from fastai import * from fastai.vision import * # Flask utils from flask import Flask, redirect, url_for, request, render_template from werkzeug.utils import secure_filename # Define a flask app app = Flask(__name__) path = Path("path") classes = ['ten','twenty','fifty', 'hundred','five_hundred','thousand','five_thousand'] data2 = ImageDataBunch.single_from_classes(path, classes, tfms=get_transforms(), size=224).normalize(imagenet_stats) learn = create_cnn(data2, models.resnet34) learn.load('model_9.pth') def model_predict(img_path): """ model_predict will return the preprocessed image """ img = open_image(img_path) pred_class,pred_idx,outputs = learn.predict(img) return pred_class @app.route('/', methods=['GET']) def index(): # Main page return render_template('index.html') @app.route('/predict', methods=['GET', 'POST']) def upload(): if request.method == 'POST': # Get the file from post request f = request.files['file'] # Save the file to ./uploads basepath = os.path.dirname(__file__) file_path = os.path.join( basepath, 'uploads', secure_filename(f.filename)) f.save(file_path) # Make prediction preds = model_predict(file_path) return preds return None if __name__ == '__main__': app.run()
593
9,995
class Calculator: def power(self, n, p): if n < 0 or p < 0: raise Exception("n and p should be non-negative") return pow(n, p) myCalculator=Calculator() T=int(input()) for i in range(T): n,p = map(int, input().split()) try: ans=myCalculator.power(n,p) print(ans) except Exception as e: print(e)
174
9,996
#!/usr/bin/env python3 from .metric import ReferencedMetric from .impl.meteor import PyMeteorWrapper from logzero import logger class Meteor(ReferencedMetric): """METEOR uses the original Java Meteor-1.5 implementation with a wrapper adapted from MSCOCO/E2E-metrics.""" def support_caching(self): # METEOR is corpus-level, so individual examples can't be aggregated. # While individual scores can be computed, the overall score is different. return False def compute(self, cache, predictions, references): try: m = PyMeteorWrapper(predictions.language.alpha_2) except Exception as e: logger.warn(f"Cannot run Meteor -- Skipping: {str(e)}") return {"meteor": None} # ignore individual sentence scores try: meteor, _ = m.compute_score(predictions.untokenized, references.untokenized) except BrokenPipeError: logger.warn("METEOR FAILED TO COMPUTE.") meteor = -99 return {"meteor": meteor}
410
9,997
''' You are given an array of unique integers that contain numbers in random order. Write a program to find the longest possible sequence of consecutive numbers using the numbers from given array. You need to return the output array which contains consecutive elements. Order of elements in the output is not important. Best solution takes O(n) time. If two sequences are of equal length then return the sequence starting with the number whose occurrence is earlier in the array. Input Format : Line 1 : Integer n, Size of array Line 2 : Array elements (separated by space) Constraints : 0 <= n <= 10^8 Sample Input 1 : 13 2 12 9 16 10 5 3 20 25 11 1 8 6 Sample Output 1 : 8 9 10 11 12 Sample Input 2 : 7 3 7 2 1 9 8 41 Sample Output 2 : 7 8 9 Explanation: Sequence should be of consecutive numbers. Here we have 2 sequences with same length i.e. [1, 2, 3] and [7, 8, 9], but output should be [7, 8, 9] because the starting point of [7, 8, 9] comes first in input array. Sample Input 3 : 7 15 24 23 12 19 11 16 Sample Output 3 : 15 16 ''' def longestConsecutiveSubsequence(l): #Implement Your Code Here #You have To Return the list of longestConsecutiveSubsequence ans = [] #l.sort() maxlength = 0 start = 0 d = {} for i in range(len(l)): d[l[i]] = [1, True] #print(d) for i in range(len(l)): j = l[i] #print("hello") while(d[l[i]][1] == True): if j + 1 in d: d[l[i]][0] += 1 j += 1 else: d[l[i]][1] = False if d[l[i]][0] > maxlength: maxlength = d[l[i]][0] start = l[i] #print(d) #print("max", maxlength) for i in range(maxlength): ans.append(start) start += 1 return ans n=int(input()) l=list(int(i) for i in input().strip().split(' ')) final = longestConsecutiveSubsequence(l) for num in final: print(num)
790
9,998
from django.db import models from django.urls import reverse from django.conf import settings from django.contrib.contenttypes.fields import GenericForeignKey from django.contrib.contenttypes.models import ContentType from posts.models import Post class commentsManager(models.Manager): def all(self): qs = super(commentsManager , self).filter(parent=None) return qs def filter_by_instance(self , instance): content_type = ContentType.objects.get_for_model(instance.__class__) qs = super(commentsManager , self).filter( content_type=content_type , object_id=instance.id ).filter(parent=None) return qs class comments(models.Model): user = models.ForeignKey(settings.AUTH_USER_MODEL, default=1 ,on_delete=None) #comments = models.ForeignKey(Post ,on_delete=None) content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE) object_id = models.PositiveIntegerField() content_object = GenericForeignKey('content_type', 'object_id') parent = models.ForeignKey("self" , null=True , blank=True ,on_delete=models.CASCADE) content = models.TextField() timestamp= models.DateTimeField(auto_now_add=True) objects = commentsManager() class Meta: ordering = ['-timestamp'] def __str__(self): return self.user.username def __unicode__(self): return self.user.username def children(self): return comments.objects.filter(parent=self) def get_absolute_url(self): return reverse("comments:comment_thread", kwargs={"id": self.id}) @property def is_parent(self): if self.parent is None: return False return True
732
9,999
# -*- coding: utf-8 -*- # Generated by Django 1.10.6 on 2017-03-22 10:32 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('xwing_data', '0014_statisticset_skill'), ('matches', '0004_matchpilot_points'), ] operations = [ migrations.CreateModel( name='MatchUpgrade', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('removed', models.BooleanField(default=False)), ('upgrade', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='xwing_data.Upgrade')), ], ), migrations.AddField( model_name='matchpilot', name='stats', field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='xwing_data.StatisticSet'), preserve_default=False, ), migrations.AlterField( model_name='matchpilot', name='upgrades', field=models.ManyToManyField(blank=True, to='matches.MatchUpgrade'), ), ]
575