Executing black over all jupyter notebook (#190)

Reverting black for the jupyter notebooks gets old. Can we just run
black over all of them?
This commit is contained in:
2023-10-04 20:03:47 +02:00
committed by GitHub
parent 030ad00c7d
commit 41f2c9f995
15 changed files with 658 additions and 487 deletions

View File

@ -31,8 +31,9 @@
"outputs": [],
"source": [
"import requests\n",
"url = 'https://www.tagesschau.de/api2/'\n",
"r = requests.get(url +'homepage')\n",
"\n",
"url = \"https://www.tagesschau.de/api2/\"\n",
"r = requests.get(url + \"homepage\")\n",
"r = r.json()"
]
},
@ -76,10 +77,11 @@
"source": [
"# Aggregieren der Titel und Beschreibungen\n",
"import pandas as pd\n",
"\n",
"data = {\"titles\": [], \"description\": []}\n",
"for i in range(len(r[\"news\"])): \n",
" data[\"titles\"].append(r[\"news\"][i][\"title\"])\n",
" data[\"description\"].append(r[\"news\"][i][\"content\"][0][\"value\"])\n",
"for i in range(len(r[\"news\"])):\n",
" data[\"titles\"].append(r[\"news\"][i][\"title\"])\n",
" data[\"description\"].append(r[\"news\"][i][\"content\"][0][\"value\"])\n",
"df = pd.DataFrame(data)\n",
"print(df.__len__)"
]
@ -19323,7 +19325,7 @@
}
],
"source": [
"r = requests.get(url +'news').json()\n",
"r = requests.get(url + \"news\").json()\n",
"r"
]
},
@ -19355,8 +19357,8 @@
],
"source": [
"data = {\"titles\": []}\n",
"for i in range(len(r[\"news\"])): \n",
" data[\"titles\"].append(r[\"news\"][i][\"title\"])\n",
"for i in range(len(r[\"news\"])):\n",
" data[\"titles\"].append(r[\"news\"][i][\"title\"])\n",
"# data[\"description\"].append(r[\"news\"][i][\"content\"][0][\"value\"])\n",
"df = pd.DataFrame(data)\n",
"print(df)"
@ -19391,7 +19393,7 @@
],
"source": [
"date = \"230425\"\n",
"r = requests.get(url +'newsfeed-101~_date-{date}.json').json()\n",
"r = requests.get(url + \"newsfeed-101~_date-{date}.json\").json()\n",
"r"
]
},
@ -19989,7 +19991,7 @@
"pageSize = 5\n",
"resultPage = 2\n",
"print(url)\n",
"txt = f'search/?searchText={searchtxt}&pageSize={pageSize}&resultPage={resultPage}'\n",
"txt = f\"search/?searchText={searchtxt}&pageSize={pageSize}&resultPage={resultPage}\"\n",
"r = requests.get(url + txt).json()\n",
"r"
]