| |
| |
| |
|
|
|
|
| import requests |
| import bs4 |
| import markdownify |
| import zipfile |
| import urllib |
| import json |
|
|
| sjpUrl = "https://sjp.pl/sl/growy/" |
| mdfiedFile ="./sjp.pl_sl_growy.md" |
| sjpZip = "./sjp.growy.zip" |
|
|
| sourceserver = requests.get(sjpUrl) |
|
|
| with open(mdfiedFile, "w") as sjpPage: |
| sjpPage.write(markdownify.markdownify(sourceserver.text)) |
|
|
| with open("./links.json", "r") as sjpPage: |
| links = set(json.loads(sjpPage.read())) |
|
|
| |
|
|
| webpage = bs4.BeautifulSoup(sourceserver.text) |
|
|
| for link in webpage.find_all("a"): |
| print(link) |
| try: |
| links.add( |
| urllib.parse.urljoin(sjpUrl, str(link.get("href"))) |
| ) |
| except Exception as error: |
| print("ERROR:", error) |
|
|
|
|
| with open("./links.json", "w") as sjpPage: |
| sjpPage.write(json.dumps(list(links))) |
|
|
| print(links) |
| |
| for link in links: |
| print("ZIPTEST", link[-4:], link[-4:]==".zip") |
| if link[-4:]==".zip": |
| sjpDownload = requests.get(link) |
| with open(sjpZip, 'wb') as fd: |
| for chunk in sjpDownload.iter_content(chunk_size=128): |
| fd.write(chunk) |
|
|
| |
|
|
| zipfile.ZipFile(sjpZip).extractall() |
|
|