Executing black over all jupyter notebook (#190)

Reverting black for the jupyter notebooks gets old. Can we just run
black over all of them?
This commit is contained in:
2023-10-04 20:03:47 +02:00
committed by GitHub
parent 030ad00c7d
commit 41f2c9f995
15 changed files with 658 additions and 487 deletions

View File

@ -31,8 +31,9 @@
"outputs": [],
"source": [
"import requests\n",
"url = 'https://www.tagesschau.de/api2/'\n",
"r = requests.get(url +'homepage')\n",
"\n",
"url = \"https://www.tagesschau.de/api2/\"\n",
"r = requests.get(url + \"homepage\")\n",
"r = r.json()"
]
},
@ -76,10 +77,11 @@
"source": [
"# Aggregieren der Titel und Beschreibungen\n",
"import pandas as pd\n",
"\n",
"data = {\"titles\": [], \"description\": []}\n",
"for i in range(len(r[\"news\"])): \n",
" data[\"titles\"].append(r[\"news\"][i][\"title\"])\n",
" data[\"description\"].append(r[\"news\"][i][\"content\"][0][\"value\"])\n",
"for i in range(len(r[\"news\"])):\n",
" data[\"titles\"].append(r[\"news\"][i][\"title\"])\n",
" data[\"description\"].append(r[\"news\"][i][\"content\"][0][\"value\"])\n",
"df = pd.DataFrame(data)\n",
"print(df.__len__)"
]
@ -19323,7 +19325,7 @@
}
],
"source": [
"r = requests.get(url +'news').json()\n",
"r = requests.get(url + \"news\").json()\n",
"r"
]
},
@ -19355,8 +19357,8 @@
],
"source": [
"data = {\"titles\": []}\n",
"for i in range(len(r[\"news\"])): \n",
" data[\"titles\"].append(r[\"news\"][i][\"title\"])\n",
"for i in range(len(r[\"news\"])):\n",
" data[\"titles\"].append(r[\"news\"][i][\"title\"])\n",
"# data[\"description\"].append(r[\"news\"][i][\"content\"][0][\"value\"])\n",
"df = pd.DataFrame(data)\n",
"print(df)"
@ -19391,7 +19393,7 @@
],
"source": [
"date = \"230425\"\n",
"r = requests.get(url +'newsfeed-101~_date-{date}.json').json()\n",
"r = requests.get(url + \"newsfeed-101~_date-{date}.json\").json()\n",
"r"
]
},
@ -19989,7 +19991,7 @@
"pageSize = 5\n",
"resultPage = 2\n",
"print(url)\n",
"txt = f'search/?searchText={searchtxt}&pageSize={pageSize}&resultPage={resultPage}'\n",
"txt = f\"search/?searchText={searchtxt}&pageSize={pageSize}&resultPage={resultPage}\"\n",
"r = requests.get(url + txt).json()\n",
"r"
]

View File

@ -51,6 +51,7 @@
],
"source": [
"from deutschland.bundesanzeiger import Bundesanzeiger\n",
"\n",
"ba = Bundesanzeiger()\n",
"# search term\n",
"data = ba.get_reports(\"Atos IT-Dienstleistung & Beratung GmbH\")\n",
@ -73,11 +74,13 @@
],
"source": [
"# Note: There can be multiple \"Aufsichtsrat\" entries per Company, the API however does only return one because the keys are overwritten\n",
"jahresabschluss = data['Jahresabschluss zum Geschäftsjahr vom 01.01.2019 bis zum 31.12.2019']\n",
"jahresabschluss = data[\n",
" \"Jahresabschluss zum Geschäftsjahr vom 01.01.2019 bis zum 31.12.2019\"\n",
"]\n",
"\n",
"# Note: Although the report includes the entire text it lacks the formatting that would make extracting information a lot easier as the data is wrapped inside a <table> originally\n",
"with open(\"./jahresabschluss-example.txt\", \"w\") as file:\n",
" file.write(jahresabschluss['report'])\n",
" file.write(jahresabschluss[\"report\"])\n",
"print(jahresabschluss.keys())"
]
},
@ -96,6 +99,7 @@
],
"source": [
"from deutschland.handelsregister import Handelsregister\n",
"\n",
"hr = Handelsregister()\n",
"\n",
"results = hr.search(keywords=\"BLUECHILLED Verwaltungs GmbH\")\n",
@ -128,6 +132,7 @@
"source": [
"# SQLite export\n",
"import sqlite3\n",
"\n",
"con = sqlite3.connect(\"../data/openregister.db\")"
]
},
@ -176,7 +181,7 @@
],
"source": [
"schema = cur.execute(\"SELECT name FROM sqlite_master WHERE type='table';\")\n",
"schema.fetchall()\n"
"schema.fetchall()"
]
},
{
@ -414,6 +419,7 @@
],
"source": [
"import pandas as pd\n",
"\n",
"df = pd.read_sql_query(\"SELECT * FROM company LIMIT 100\", con)\n",
"df.head()"
]