diff --git a/main.ipynb b/main.ipynb index ed19025..f0a9cf1 100644 --- a/main.ipynb +++ b/main.ipynb @@ -8,10 +8,10 @@ "\n", "The game reversi is a very good game to apply deep learning methods to.\n", "\n", - "Othello also known as reversi is a board game first published in 1883 by eiter Lewis Waterman or John W. Mollet in England (each one was denouncing the other as fraud).\n", - "It is a strickt turn based zero-sum game with a clear Markov chain and now hidden states like in card games with an unknown distribution of cards or unknown player allegiance.\n", - "There is like for the game go only one set of stones with two colors which is much easier to abstract than chess with its 6 unique pieces.\n", - "The game has a symmetrical game board wich allows to play with rotating the state around an axis to allow for a breaking of sequences or interesting ANN architectures, quadruple the data generation by simulation or interesting test cases where a symetry in turns should be observable if the AI reaches an \"objective\" policy." + "Othello, also known as reversi, is a board game first published in 1883 by either Lewis Waterman or John W. Mollet in England (each one was denouncing the other as fraud).\n", + "It is a strict turn-based zero-sum game with a clear Markov chain and no hidden states, unlike card games with an unknown distribution of cards or unknown player allegiance.\n", + "The game is played with one set of stones with two colors, which is much easier to abstract than chess with its 6 unique pieces.\n", + "The game board is symmetrical and allows for playing with rotating the state around an axis or flipping/mirroring the board, which can allow for a breaking of sequences or interesting ANN architectures, quadruple the data generation by simulation, or interesting test cases where symmetry in turns should be observable if the AI reaches an \"objective\" policy." ] }, { @@ -717,9 +717,11 @@ "\n", "\n", "@np_cache(maxsize=2000, array_argument_elements=(0, 1))\n", - "def _get_possible_turns_for_board(board: np.ndarray, poss_turns: np.ndarray) -> np.ndarray:\n", + "def _get_possible_turns_for_board(\n", + " board: np.ndarray, poss_turns: np.ndarray\n", + ") -> np.ndarray:\n", " \"\"\"Calcualtes where turns are possible.\n", - " \n", + "\n", " Args:\n", " board: The board that should be checked for a playable action.\n", " poss_turns: An array of actions that could be possible. All true fileds are empty and next to an enemy stone.\n", @@ -1681,15 +1683,17 @@ " )\n", " ax2.scatter(turn, mean_possibility_count[turn], marker=\"x\")\n", " ax2.legend()\n", - " \n", + "\n", " action_space_cumprod = np.cumprod(_mean_possibility_count[::-1], axis=0)[::-1]\n", " ax4.plot(range(70), action_space_cumprod)\n", - " \n", + "\n", " ax4.scatter(turn, action_space_cumprod[turn], marker=\"x\")\n", " ax4.set_yscale(\"log\", base=10)\n", " ax4.set_xlabel(\"Turn\")\n", " ax4.set_ylabel(\"Mean remaining total action space size\")\n", - " ax4.set_title(f\"Remaining action space at {turn} = {action_space_cumprod[turn].round():.2E}\")\n", + " ax4.set_title(\n", + " f\"Remaining action space at {turn} = {action_space_cumprod[turn].round():.2E}\"\n", + " )\n", " fig.delaxes(ax3)\n", " fig.tight_layout()\n", " plt.show()" @@ -1870,7 +1874,7 @@ "source": [ "def history_changed(board_history: np.ndarray) -> np.ndarray:\n", " \"\"\"Calculates if the board changed between actions.\n", - " \n", + "\n", " Args:\n", " board_history: A history of game baords. Shaped (70 * n * 8 * 8)\n", " \"\"\"\n", @@ -1925,10 +1929,8 @@ " assert len(board_history.shape) == 4\n", " assert board_history.shape[-2:] == (8, 8)\n", " assert board_history.shape[0] == SIMULATE_TURNS\n", - " return (\n", - " pd.Series(\n", - " [count_unique_boards(board_history[turn]) for turn in range(SIMULATE_TURNS)]\n", - " )\n", + " return pd.Series(\n", + " [count_unique_boards(board_history[turn]) for turn in range(SIMULATE_TURNS)]\n", " )\n", "\n", "\n", @@ -2039,16 +2041,27 @@ " score[player_2_won] = -score2_final\n", " return score\n", "\n", + "\n", "np.random.seed(2)\n", "_baords = simulate_game(10, (RandomPolicy(1), RandomPolicy(1)))[0]\n", - "np.testing.assert_array_equal(np.sum(_baords[-1], axis=(1,2)), final_boards_evaluation(_baords[-1]))\n", + "np.testing.assert_array_equal(\n", + " np.sum(_baords[-1], axis=(1, 2)), final_boards_evaluation(_baords[-1])\n", + ")\n", "np.random.seed(2)\n", - "np.testing.assert_array_equal(np.array([ -6., -36., -12., -16., 38., -12., 2., -22., 2., 10.]), final_boards_evaluation(simulate_game(10, (RandomPolicy(1), RandomPolicy(1)))[0][-1]))\n", + "np.testing.assert_array_equal(\n", + " np.array([-6.0, -36.0, -12.0, -16.0, 38.0, -12.0, 2.0, -22.0, 2.0, 10.0]),\n", + " final_boards_evaluation(\n", + " simulate_game(10, (RandomPolicy(1), RandomPolicy(1)))[0][-1]\n", + " ),\n", + ")\n", "\n", "np.random.seed(2)\n", "boards = simulate_game(10, (RandomPolicy(1), RandomPolicy(1)))[0][-1]\n", "boards[:, 4, :] = 0\n", - "np.testing.assert_array_equal(np.array([-14., -38., -14., -22., 40., -16., -14., -28., 0., 20.]), final_boards_evaluation(boards))\n", + "np.testing.assert_array_equal(\n", + " np.array([-14.0, -38.0, -14.0, -22.0, 40.0, -16.0, -14.0, -28.0, 0.0, 20.0]),\n", + " final_boards_evaluation(boards),\n", + ")\n", "\n", "_boards = get_new_games(EXAMPLE_STACK_SIZE)\n", "%timeit final_boards_evaluation(_boards)" @@ -2064,16 +2077,20 @@ "source": [ "def calculate_final_evaluation_for_history(board_history: np.ndarray) -> np.ndarray:\n", " \"\"\"Calculates the final scores for a stack of game histories.\n", - " \n", + "\n", " Args:\n", " board_history: A stack of game histories.\n", " \"\"\"\n", " final_evaluation = final_boards_evaluation(board_history[-1])\n", " return final_evaluation / 64\n", "\n", + "\n", "np.random.seed(2)\n", "_boards = simulate_game(10, (RandomPolicy(1), RandomPolicy(1)))[0]\n", - "np.testing.assert_array_equal(np.array([ -6., -36., -12., -16., 38., -12., 2., -22., 2., 10.]) / 64, calculate_final_evaluation_for_history(_boards))" + "np.testing.assert_array_equal(\n", + " np.array([-6.0, -36.0, -12.0, -16.0, 38.0, -12.0, 2.0, -22.0, 2.0, 10.0]) / 64,\n", + " calculate_final_evaluation_for_history(_boards),\n", + ")" ] }, { @@ -2140,7 +2157,7 @@ " Returns:\n", " the combined score for both players.\n", " \"\"\"\n", - " assert boards.shape[-2:] == (8,8)\n", + " assert boards.shape[-2:] == (8, 8)\n", " return np.sum(boards, axis=(-1, -2))\n", "\n", "\n", @@ -2159,10 +2176,10 @@ " (70, 10),\n", ")\n", "np.random.seed(3)\n", - "np.testing.assert_array_equal(evaluate_boards(simulate_game(10, (RandomPolicy(1), RandomPolicy(1)))[0][:4, :3]), np.array([[0, 0, 0],\n", - " [3, 3, 3],\n", - " [0, 0, 0],\n", - " [5, 3, 3]]))\n", + "np.testing.assert_array_equal(\n", + " evaluate_boards(simulate_game(10, (RandomPolicy(1), RandomPolicy(1)))[0][:4, :3]),\n", + " np.array([[0, 0, 0], [3, 3, 3], [0, 0, 0], [5, 3, 3]]),\n", + ")\n", "\n", "_boards = get_new_games(EXAMPLE_STACK_SIZE)\n", "%timeit evaluate_boards(_boards)" @@ -2273,7 +2290,8 @@ "plt.title(\"Win distribution\")\n", "plt.bar(\n", " [\"black\", \"draw\", \"white\"],\n", - " pd.Series(calculate_who_won(_board_history)).value_counts().sort_index() / _board_history.shape[1],\n", + " pd.Series(calculate_who_won(_board_history)).value_counts().sort_index()\n", + " / _board_history.shape[1],\n", ")\n", "plt.show()" ] @@ -2298,8 +2316,8 @@ "outputs": [], "source": [ "def calculate_direct_score(board_history: np.ndarray) -> np.ndarray:\n", - " \"\"\"Calcualtes the delta score for all actions.\n", - " \n", + " \"\"\"Calculates the delta score for all actions.\n", + "\n", " Args:\n", " board_history: A history of board games or a stack of board games. Shaped (70 * n * 8 * 8)\n", " \"\"\"\n", @@ -2329,7 +2347,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "When ploting the direct score it can be easily seen that the later turnse are point whise more importent. A bad opening however will not alow the player to keep those points. But it is easy to see that points not made at the begining of the game can be made at the end of the game. This allows for concentration on the gameplay and some preperation at the start of the game." + "When plotting the direct score it can be easily seen that the later turnse are point-wise more important. A bad opening however will not allow the player to keep those points. But it is easy to see that points not made at the beginning of the game can be made at the end of the game. This allows for concentration on the gameplay and some preparation at the start of the game." ] }, { @@ -2372,7 +2390,7 @@ " f\"Histogram of scores changes on turn {turn} by {'white' if turn % 2 == 0 else 'black'}\"\n", " )\n", " score = score_history[turn]\n", - " bins = max(1, int(max(score) - min(score)) )\n", + " bins = max(1, int(max(score) - min(score)))\n", " ax1.hist(score, density=True, bins=bins)\n", " ax1.set_xlabel(\"Points made\")\n", " ax1.set_ylabel(\"Score probability\")\n", @@ -2392,47 +2410,30 @@ " plt.show()" ] }, - { - "cell_type": "code", - "execution_count": 108, - "metadata": { - "pycharm": { - "is_executing": true - } - }, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAiMAAAGzCAYAAAD9pBdvAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy88F64QAAAACXBIWXMAAA9hAAAPYQGoP6dpAAAnE0lEQVR4nO3df1SUdaLH8c8AMogI/sAgWVYSf0EmJCwKqdAuK5Vb2bb3ul43kJJ7zhqbOf1kK8n1dHBTESs2b3ZJM1vZbW91T7W0uyi1GImKumk3S3cR1ADdVSDsgMFz/+g4OfFDRsGvyPt1zpyaZ77P83wfHeHNM88MNsuyLAEAABjiYXoCAACgfyNGAACAUcQIAAAwihgBAABGESMAAMAoYgQAABhFjAAAAKOIEQAAYBQxAgAAjCJGgD6upKRENptNJSUlvb4vm82mJ5980nl//fr1stlsqqys7PV9z58/X2FhYc77lZWVstlsWrlyZa/vW5KefPJJ2Wy2S7IvoL8hRgCDfve738lms+n1119v91hUVJRsNpu2bt3a7rHvfve7SkhIuBRT7HGnT5/Wk08+eUniyV2X89yAKxkxAhg0bdo0SVJpaanL8oaGBu3bt09eXl7atm2by2PV1dWqrq52rjtjxgx9+eWXmjFjxqWZ9Dnuuusuffnllxo1alS31zl9+rSWLl3q9jf8devW6cCBA27O0D1dze3xxx/Xl19+2av7B/orL9MTAPqzkSNH6pprrmkXI2VlZbIsS//2b//W7rGz98/GiIeHh3x8fC7NhL/F09NTnp6evbqPpqYmDRo0SAMGDOjV/ZyPl5eXvLz4kgn0Bs6MAIZNmzZNu3fvdvmpe9u2bbr22mt1880368MPP1RbW5vLYzabTTfccIOkjq8ZSUpK0sSJE/Xxxx/rxhtvlK+vr0JCQvT00093a07Nzc1avHixRowYocGDB+u2227TkSNH2o3r6JqRnTt3KiUlRYGBgRo4cKCuueYa3X333ZK+vs5jxIgRkqSlS5fKZrO5XIcyf/58+fn56dChQ7rllls0ePBgzZs3z/nYudeMnGv16tUaNWqUBg4cqMTERO3bt8/l8aSkJCUlJbVb79xtnm9uHV0z8tVXX2nZsmUKDw+X3W5XWFiYfvnLX6q5udllXFhYmH70ox+ptLRUcXFx8vHx0ejRo/Xyyy93eDxAf0OMAIZNmzZNZ86c0fbt253Ltm3bpoSEBCUkJKi+vt7lm+u2bds0YcIEDR8+vMvtnjx5UjfddJOioqK0atUqTZgwQY888oj++Mc/nndOCxYsUF5enmbOnKnly5drwIABmjVr1nnXq6ur08yZM1VZWalHH31Uzz77rObNm6cPP/xQkjRixAg9//zzkqQ77rhDGzdu1MaNG/XjH//YuY2vvvpKKSkpuuqqq7Ry5UrdeeedXe7z5Zdf1jPPPKN7771XWVlZ2rdvn77//e+rtrb2vPM9V3fm9m0LFizQkiVLNHnyZK1evVqJiYnKycnRT3/603ZjDx48qJ/85Cf64Q9/qFWrVmno0KGaP3++9u/f79Y8gSuSBcCo/fv3W5KsZcuWWZZlWWfOnLEGDRpkbdiwwbIsywoKCrLy8/Mty7KshoYGy9PT08rIyHCuv3XrVkuStXXrVueyxMRES5L18ssvO5c1NzdbwcHB1p133tnlfPbs2WNJshYuXOiy/D/+4z8sSVZ2drZz2UsvvWRJsv7xj39YlmVZr7/+uiXJ2rFjR6fbP378eLvtnJWWlmZJsh599NEOHxs1apTz/j/+8Q9LkjVw4EDryJEjzuXbt2+3JFmLFy92LktMTLQSExPPu82u5padnW2d+yXz7J/TggULXMY9+OCDliRry5YtzmWjRo2yJFnvv/++c1ldXZ1lt9utBx54oN2+gP6GMyOAYRERERo+fLjzWpC9e/eqqanJ+W6ZhIQE50WsZWVlam1tdV4v0hU/Pz/97Gc/c9739vZWXFyc/v73v3e53jvvvCNJuu+++1yW33///efd55AhQyRJb731ls6cOXPe8Z35+c9/3u2xs2fPVkhIiPN+XFycpkyZ4jyO3nJ2+w6Hw2X5Aw88IEl6++23XZZHRkZq+vTpzvsjRozQ+PHjz/v3AfQHxAhgmM1mU0JCgvPakG3btumqq67SmDFjJLnGyNn/didGvvOd77S7xmHo0KE6efJkl+sdPnxYHh4eCg8Pd1k+fvz48+4zMTFRd955p5YuXarAwEDdfvvteumll9pdQ9EVLy8vfec73+n2+LFjx7ZbNm7cuF7/7JOzf05n/57OCg4O1pAhQ3T48GGX5d/97nfbbaM7fx9Af0CMAJeBadOmqb6+Xh999JHzepGzEhISdPjwYR09elSlpaUaOXKkRo8efd5tdvYuF8uyemze32az2fTaa6+prKxMmZmZOnr0qO6++27FxMToiy++6NY27Ha7PDx69ktTZx9W1tra2mvb/jYTfx9AX0GMAJeBcz9vZNu2bc53ykhSTEyM7Ha7SkpKtH37dpfHesOoUaPU1tamQ4cOuSx35zM+pk6dqqeeeko7d+7Upk2btH//fm3evFlS9795d9dnn33Wbtmnn37q8s6boUOH6tSpU+3GffvshTtzO/vn9O3919bW6tSpU2599grQ3xEjwGUgNjZWPj4+2rRpk44ePepyZsRut2vy5MnKz89XU1NTt16iuRg333yzJOmZZ55xWZ6Xl3fedU+ePNnuJ/3o6GhJcr5U4+vrK0kdxsGFeOONN3T06FHn/fLycm3fvt15HJIUHh6uTz75RMePH3cu27t3b7sPlHNnbrfccouk9n8uubm5ktStdx8B+Bqf4ANcBry9vfW9731Pf/3rX2W32xUTE+PyeEJCglatWiWpe9eLXIzo6GjNnTtXv/nNb1RfX6+EhAQVFxfr4MGD5113w4YN+s1vfqM77rhD4eHhamxs1Lp16+Tv7+/85j1w4EBFRkaqsLBQ48aN07BhwzRx4kRNnDjxguY7ZswYTZs2TT//+c/V3NysvLw8DR8+XA8//LBzzN13363c3FylpKTonnvuUV1dndauXatrr71WDQ0NznHuzC0qKkppaWl64YUXdOrUKSUmJqq8vFwbNmzQ7NmzdeONN17Q8QD9EWdGgMvE2cg4+7LMuc6+NDN48GBFRUX1+lwKCgp03333qaioSA8//LDOnDnT7t0hHUlMTFRsbKw2b96s++67T08//bTGjh2rLVu26JprrnGOe/HFFxUSEqLFixdr7ty5eu211y54rqmpqfrFL36h5557Tk899ZSuvfZabdmyRVdffbVzTEREhF5++WXV19fL4XDof//3f7Vx40ZNnjy53fbcmduLL76opUuXaseOHbr//vu1ZcsWZWVlOV+SAtA9NourpwAAgEGcGQEAAEYRIwAAwChiBAAAGEWMAAAAo4gRAABgFDECAACM6hMfetbW1qZjx45p8ODBPf5R0gAAoHdYlqXGxkaNHDmyy9851Sdi5NixYwoNDTU9DQAAcAGqq6u7/G3cfSJGBg8eLOnrg/H39zc8GwAA0B0NDQ0KDQ11fh/vTJ+IkbMvzfj7+xMjAAD0Mee7xIILWAEAgFHECAAAMIoYAQAARhEjAADAKGIEAAAYRYwAAACjiBEAAGAUMQIAAIwiRgAAgFHECAAAMIoYAQAARhEjAADAKGIEAAAYRYwAAACjvExPwLSwR982PQUYVrl8lukpAEC/dkFnRvLz8xUWFiYfHx9NmTJF5eXlnY5dv369bDaby83Hx+eCJwwAAK4sbsdIYWGhHA6HsrOzVVFRoaioKKWkpKiurq7Tdfz9/fX55587b4cPH76oSQMAgCuH2zGSm5urjIwMpaenKzIyUmvXrpWvr68KCgo6Xcdmsyk4ONh5CwoK6nIfzc3NamhocLkBAIArk1sx0tLSol27dik5OfmbDXh4KDk5WWVlZZ2u98UXX2jUqFEKDQ3V7bffrv3793e5n5ycHAUEBDhvoaGh7kwTAAD0IW7FyIkTJ9Ta2truzEZQUJBqamo6XGf8+PEqKCjQm2++qVdeeUVtbW1KSEjQkSNHOt1PVlaW6uvrnbfq6mp3pgkAAPqQXn83TXx8vOLj4533ExISFBERof/6r//SsmXLOlzHbrfLbrf39tQAAMBlwK0zI4GBgfL09FRtba3L8traWgUHB3drGwMGDND111+vgwcPurNrAABwhXIrRry9vRUTE6Pi4mLnsra2NhUXF7uc/ehKa2urPvroI1199dXuzRQAAFyR3H6ZxuFwKC0tTbGxsYqLi1NeXp6ampqUnp4uSUpNTVVISIhycnIkSb/61a80depUjRkzRqdOndKKFSt0+PBhLViwoGePBAAA9Elux8icOXN0/PhxLVmyRDU1NYqOjlZRUZHzotaqqip5eHxzwuXkyZPKyMhQTU2Nhg4dqpiYGH3wwQeKjIzsuaMAAAB9ls2yLMv0JM6noaFBAQEBqq+vl7+/f49um4+DBx8HDwC9o7vfv/lFeQAAwChiBAAAGEWMAAAAo4gRAABgFDECAACMIkYAAIBRxAgAADCKGAEAAEYRIwAAwChiBAAAGEWMAAAAo4gRAABgFDECAACMIkYAAIBRxAgAADCKGAEAAEYRIwAAwChiBAAAGEWMAAAAo4gRAABgFDECAACMIkYAAIBRxAgAADCKGAEAAEYRIwAAwCgv0xMAAJgV9ujbpqcAwyqXzzK6f86MAAAAo4gRAABgFDECAACMIkYAAIBRxAgAADCKGAEAAEYRIwAAwChiBAAAGEWMAAAAo4gRAABgFDECAACMIkYAAIBRxAgAADCKGAEAAEYRIwAAwChiBAAAGEWMAAAAo4gRAABgFDECAACMIkYAAIBRxAgAADCKGAEAAEYRIwAAwChiBAAAGEWMAAAAo4gRAABgFDECAACMIkYAAIBRxAgAADCKGAEAAEYRIwAAwChiBAAAGHVBMZKfn6+wsDD5+PhoypQpKi8v79Z6mzdvls1m0+zZsy9ktwAA4ArkdowUFhbK4XAoOztbFRUVioqKUkpKiurq6rpcr7KyUg8++KCmT59+wZMFAABXHrdjJDc3VxkZGUpPT1dkZKTWrl0rX19fFRQUdLpOa2ur5s2bp6VLl2r06NEXNWEAAHBlcStGWlpatGvXLiUnJ3+zAQ8PJScnq6ysrNP1fvWrX+mqq67SPffc0639NDc3q6GhweUGAACuTG7FyIkTJ9Ta2qqgoCCX5UFBQaqpqelwndLSUv33f/+31q1b1+395OTkKCAgwHkLDQ11Z5oAAKAP6dV30zQ2Nuquu+7SunXrFBgY2O31srKyVF9f77xVV1f34iwBAIBJXu4MDgwMlKenp2pra12W19bWKjg4uN34Q4cOqbKyUrfeeqtzWVtb29c79vLSgQMHFB4e3m49u90uu93uztQAAEAf5daZEW9vb8XExKi4uNi5rK2tTcXFxYqPj283fsKECfroo4+0Z88e5+22227TjTfeqD179vDyCwAAcO/MiCQ5HA6lpaUpNjZWcXFxysvLU1NTk9LT0yVJqampCgkJUU5Ojnx8fDRx4kSX9YcMGSJJ7ZYDAID+ye0YmTNnjo4fP64lS5aopqZG0dHRKioqcl7UWlVVJQ8PPtgVAAB0j9sxIkmZmZnKzMzs8LGSkpIu112/fv2F7BIAAFyhOIUBAACMIkYAAIBRxAgAADCKGAEAAEYRIwAAwChiBAAAGEWMAAAAo4gRAABgFDECAACMIkYAAIBRxAgAADCKGAEAAEYRIwAAwChiBAAAGEWMAAAAo4gRAABgFDECAACMIkYAAIBRxAgAADCKGAEAAEYRIwAAwChiBAAAGEWMAAAAo4gRAABgFDECAACMIkYAAIBRxAgAADCKGAEAAEYRIwAAwChiBAAAGEWMAAAAo4gRAABgFDECAACMIkYAAIBRxAgAADCKGAEAAEYRIwAAwChiBAAAGEWMAAAAo4gRAABgFDECAACMIkYAAIBRxAgAADCKGAEAAEYRIwAAwChiBAAAGEWMAAAAo4gRAABgFDECAACMIkYAAIBRxAgAADCKGAEAAEYRIwAAwChiBAAAGEWMAAAAo4gRAABgFDECAACMIkYAAIBRxAgAADDqgmIkPz9fYWFh8vHx0ZQpU1ReXt7p2P/5n/9RbGyshgwZokGDBik6OlobN2684AkDAIAri9sxUlhYKIfDoezsbFVUVCgqKkopKSmqq6vrcPywYcP02GOPqaysTH/729+Unp6u9PR0vfvuuxc9eQAA0Pe5HSO5ubnKyMhQenq6IiMjtXbtWvn6+qqgoKDD8UlJSbrjjjsUERGh8PBwLVq0SJMmTVJpaelFTx4AAPR9bsVIS0uLdu3apeTk5G824OGh5ORklZWVnXd9y7JUXFysAwcOaMaMGZ2Oa25uVkNDg8sNAABcmdyKkRMnTqi1tVVBQUEuy4OCglRTU9PpevX19fLz85O3t7dmzZqlZ599Vj/84Q87HZ+Tk6OAgADnLTQ01J1pAgCAPuSSvJtm8ODB2rNnj3bs2KGnnnpKDodDJSUlnY7PyspSfX2981ZdXX0ppgkAAAzwcmdwYGCgPD09VVtb67K8trZWwcHBna7n4eGhMWPGSJKio6P1f//3f8rJyVFSUlKH4+12u+x2uztTAwAAfZRbZ0a8vb0VExOj4uJi57K2tjYVFxcrPj6+29tpa2tTc3OzO7sGAABXKLfOjEiSw+FQWlqaYmNjFRcXp7y8PDU1NSk9PV2SlJqaqpCQEOXk5Ej6+vqP2NhYhYeHq7m5We+88442btyo559/vmePBAAA9Elux8icOXN0/PhxLVmyRDU1NYqOjlZRUZHzotaqqip5eHxzwqWpqUkLFy7UkSNHNHDgQE2YMEGvvPKK5syZ03NHAQAA+iybZVmW6UmcT0NDgwICAlRfXy9/f/8e3XbYo2/36PbQ91Qun2V6CoBRfB1Eb30d7O73b343DQAAMIoYAQAARhEjAADAKGIEAAAYRYwAAACjiBEAAGAUMQIAAIwiRgAAgFHECAAAMIoYAQAARhEjAADAKGIEAAAYRYwAAACjiBEAAGAUMQIAAIwiRgAAgFHECAAAMIoYAQAARhEjAADAKGIEAAAYRYwAAACjiBEAAGAUMQIAAIwiRgAAgFHECAAAMIoYAQAARhEjAADAKGIEAAAYRYwAAACjiBEAAGAUMQIAAIwiRgAAgFHECAAAMIoYAQAARhEjAADAKGIEAAAYRYwAAACjiBEAAGAUMQIAAIwiRgAAgFHECAAAMIoYAQAARhEjAADAKGIEAAAYRYwAAACjiBEAAGAUMQIAAIwiRgAAgFHECAAAMIoYAQAARhEjAADAKGIEAAAYRYwAAACjiBEAAGAUMQIAAIwiRgAAgFHECAAAMIoYAQAARl1QjOTn5yssLEw+Pj6aMmWKysvLOx27bt06TZ8+XUOHDtXQoUOVnJzc5XgAANC/uB0jhYWFcjgcys7OVkVFhaKiopSSkqK6uroOx5eUlGju3LnaunWrysrKFBoaqpkzZ+ro0aMXPXkAAND3uR0jubm5ysjIUHp6uiIjI7V27Vr5+vqqoKCgw/GbNm3SwoULFR0drQkTJujFF19UW1ubiouLL3ryAACg73MrRlpaWrRr1y4lJyd/swEPDyUnJ6usrKxb2zh9+rTOnDmjYcOGdTqmublZDQ0NLjcAAHBlcitGTpw4odbWVgUFBbksDwoKUk1NTbe28cgjj2jkyJEuQfNtOTk5CggIcN5CQ0PdmSYAAOhDLum7aZYvX67Nmzfr9ddfl4+PT6fjsrKyVF9f77xVV1dfwlkCAIBLycudwYGBgfL09FRtba3L8traWgUHB3e57sqVK7V8+XL95S9/0aRJk7oca7fbZbfb3ZkaAADoo9w6M+Lt7a2YmBiXi0/PXowaHx/f6XpPP/20li1bpqKiIsXGxl74bAEAwBXHrTMjkuRwOJSWlqbY2FjFxcUpLy9PTU1NSk9PlySlpqYqJCREOTk5kqRf//rXWrJkiV599VWFhYU5ry3x8/OTn59fDx4KAADoi9yOkTlz5uj48eNasmSJampqFB0draKiIudFrVVVVfLw+OaEy/PPP6+Wlhb95Cc/cdlOdna2nnzyyYubPQAA6PPcjhFJyszMVGZmZoePlZSUuNyvrKy8kF0AAIB+gt9NAwAAjCJGAACAUcQIAAAwihgBAABGESMAAMAoYgQAABhFjAAAAKOIEQAAYBQxAgAAjCJGAACAUcQIAAAwihgBAABGESMAAMAoYgQAABhFjAAAAKOIEQAAYBQxAgAAjCJGAACAUcQIAAAwihgBAABGESMAAMAoYgQAABhFjAAAAKOIEQAAYBQxAgAAjCJGAACAUcQIAAAwihgBAABGESMAAMAoYgQAABhFjAAAAKOIEQAAYBQxAgAAjCJGAACAUcQIAAAwihgBAABGESMAAMAoYgQAABhFjAAAAKOIEQAAYBQxAgAAjCJGAACAUcQIAAAwihgBAABGESMAAMAoYgQAABhFjAAAAKOIEQAAYBQxAgAAjCJGAACAUcQIAAAwihgBAABGESMAAMAoYgQAABhFjAAAAKOIEQAAYBQxAgAAjCJGAACAUcQIAAAw6oJiJD8/X2FhYfLx8dGUKVNUXl7e6dj9+/frzjvvVFhYmGw2m/Ly8i50rgAA4ArkdowUFhbK4XAoOztbFRUVioqKUkpKiurq6jocf/r0aY0ePVrLly9XcHDwRU8YAABcWdyOkdzcXGVkZCg9PV2RkZFau3atfH19VVBQ0OH4733ve1qxYoV++tOfym63X/SEAQDAlcWtGGlpadGuXbuUnJz8zQY8PJScnKyysrIem1Rzc7MaGhpcbgAA4MrkVoycOHFCra2tCgoKclkeFBSkmpqaHptUTk6OAgICnLfQ0NAe2zYAALi8XJbvpsnKylJ9fb3zVl1dbXpKAACgl3i5MzgwMFCenp6qra11WV5bW9ujF6fa7XauLwEAoJ9w68yIt7e3YmJiVFxc7FzW1tam4uJixcfH9/jkAADAlc+tMyOS5HA4lJaWptjYWMXFxSkvL09NTU1KT0+XJKWmpiokJEQ5OTmSvr7o9eOPP3b+/9GjR7Vnzx75+flpzJgxPXgoAACgL3I7RubMmaPjx49ryZIlqqmpUXR0tIqKipwXtVZVVcnD45sTLseOHdP111/vvL9y5UqtXLlSiYmJKikpufgjAAAAfZrbMSJJmZmZyszM7PCxbwdGWFiYLMu6kN0AAIB+4LJ8Nw0AAOg/iBEAAGAUMQIAAIwiRgAAgFHECAAAMIoYAQAARhEjAADAKGIEAAAYRYwAAACjiBEAAGAUMQIAAIwiRgAAgFHECAAAMIoYAQAARhEjAADAKGIEAAAYRYwAAACjiBEAAGAUMQIAAIwiRgAAgFHECAAAMIoYAQAARhEjAADAKGIEAAAYRYwAAACjiBEAAGAUMQIAAIwiRgAAgFHECAAAMIoYAQAARhEjAADAKGIEAAAYRYwAAACjiBEAAGCUl+kJAP1d2KNvm54CDKtcPsv0FACjODMCAACMIkYAAIBRxAgAADCKGAEAAEYRIwAAwChiBAAAGEWMAAAAo4gRAABgFDECAACMIkYAAIBRxAgAADCKGAEAAEYRIwAAwChiBAAAGEWMAAAAo4gRAABgFDECAACMIkYAAIBRxAgAADCKGAEAAEYRIwAAwChiBAAAGEWMAAAAoy4oRvLz8xUWFiYfHx9NmTJF5eXlXY7//e9/rwkTJsjHx0fXXXed3nnnnQuaLAAAuPK4HSOFhYVyOBzKzs5WRUWFoqKilJKSorq6ug7Hf/DBB5o7d67uuece7d69W7Nnz9bs2bO1b9++i548AADo+9yOkdzcXGVkZCg9PV2RkZFau3atfH19VVBQ0OH4NWvW6KabbtJDDz2kiIgILVu2TJMnT9Zzzz130ZMHAAB9n5c7g1taWrRr1y5lZWU5l3l4eCg5OVllZWUdrlNWViaHw+GyLCUlRW+88Uan+2lublZzc7Pzfn19vSSpoaHBnel2S1vz6R7fJvqW3nheuYPnIHgOwrTeeg6e3a5lWV2OcytGTpw4odbWVgUFBbksDwoK0ieffNLhOjU1NR2Or6mp6XQ/OTk5Wrp0abvloaGh7kwX6JaAPNMzQH/HcxCm9fZzsLGxUQEBAZ0+7laMXCpZWVkuZ1Pa2tr0r3/9S8OHD5fNZjM4sytPQ0ODQkNDVV1dLX9/f9PTQT/EcxCm8RzsPZZlqbGxUSNHjuxynFsxEhgYKE9PT9XW1rosr62tVXBwcIfrBAcHuzVekux2u+x2u8uyIUOGuDNVuMnf359/hDCK5yBM4znYO7o6I3KWWxewent7KyYmRsXFxc5lbW1tKi4uVnx8fIfrxMfHu4yXpD//+c+djgcAAP2L2y/TOBwOpaWlKTY2VnFxccrLy1NTU5PS09MlSampqQoJCVFOTo4kadGiRUpMTNSqVas0a9Ysbd68WTt37tQLL7zQs0cCAAD6JLdjZM6cOTp+/LiWLFmimpoaRUdHq6ioyHmRalVVlTw8vjnhkpCQoFdffVWPP/64fvnLX2rs2LF64403NHHixJ47Clwwu92u7Ozsdi+LAZcKz0GYxnPQPJt1vvfbAAAA9CJ+Nw0AADCKGAEAAEYRIwAAwChiBAAAGEWM9GFJSUm6//77O308LCxMeXl5l2x/wLl4vuBysn79+vN+eOb8+fM1e/bsSzIfuLosPw4eAIBLbc2aNS6/0C0pKUnR0dE9+kMdOkaMALjkWlpa5O3tbXoagIvufGw5egcv0/RxX331lTIzMxUQEKDAwEA98cQTnf6q5tzcXF133XUaNGiQQkNDtXDhQn3xxRcuY7Zt26akpCT5+vpq6NChSklJ0cmTJzvc3ttvv62AgABt2rSpx48LfUtTU5NSU1Pl5+enq6++WqtWrXJ5PCwsTMuWLVNqaqr8/f31n//5n5KkRx55ROPGjZOvr69Gjx6tJ554QmfOnJEk1dfXy9PTUzt37pT09a+eGDZsmKZOnerc7iuvvMJv8+7H3nrrLQ0ZMkStra2SpD179shms+nRRx91jlmwYIF+9rOfOe+/++67ioiIkJ+fn2666SZ9/vnnzsfOfZlm/vz5eu+997RmzRrZbDbZbDZVVlZKkvbt26ebb75Zfn5+CgoK0l133aUTJ070/gFfwYiRPm7Dhg3y8vJSeXm51qxZo9zcXL344osdjvXw8NAzzzyj/fv3a8OGDdqyZYsefvhh5+N79uzRD37wA0VGRqqsrEylpaW69dZbnf/Qz/Xqq69q7ty52rRpk+bNm9drx4e+4aGHHtJ7772nN998U3/6059UUlKiiooKlzErV65UVFSUdu/erSeeeEKSNHjwYK1fv14ff/yx1qxZo3Xr1mn16tWSvv4pNTo6WiUlJZKkjz76SDabTbt373ZG9HvvvafExMRLd6C4rEyfPl2NjY3avXu3pK+fD4GBgc7nzNllSUlJkqTTp09r5cqV2rhxo95//31VVVXpwQcf7HDba9asUXx8vDIyMvT555/r888/V2hoqE6dOqXvf//7uv7667Vz504VFRWptrZW//7v/97bh3tls9BnJSYmWhEREVZbW5tz2SOPPGJFRERYlmVZo0aNslavXt3p+r///e+t4cOHO+/PnTvXuuGGG7rc36JFi6znnnvOCggIsEpKSi7+INDnNTY2Wt7e3tbvfvc757J//vOf1sCBA61FixZZlvX1c3H27Nnn3daKFSusmJgY532Hw2HNmjXLsizLysvLs+bMmWNFRUVZf/zjHy3LsqwxY8ZYL7zwQg8eDfqayZMnWytWrLAsy7Jmz55tPfXUU5a3t7fV2NhoHTlyxJJkffrpp9ZLL71kSbIOHjzoXDc/P98KCgpy3k9LS7Nuv/125/2zX/POtWzZMmvmzJkuy6qrqy1J1oEDB3r+APsJzoz0cVOnTpXNZnPej4+P12effdbh2Yy//OUv+sEPfqCQkBANHjxYd911l/75z3/q9OnTkr45M9KV1157TYsXL9af//xnfiKFJOnQoUNqaWnRlClTnMuGDRum8ePHu4yLjY1tt25hYaFuuOEGBQcHy8/PT48//riqqqqcjycmJqq0tFStra3On3CTkpJUUlKiY8eO6eDBg86fetE/JSYmqqSkRJZl6a9//at+/OMfKyIiQqWlpXrvvfc0cuRIjR07VpLk6+ur8PBw57pXX3216urq3Nrf3r17tXXrVvn5+TlvEyZMkPT1vwVcGGKkn6isrNSPfvQjTZo0SX/4wx+0a9cu5efnS/r6YkJJGjhw4Hm3c/3112vEiBEqKCjo9NoUoCODBg1yuV9WVqZ58+bplltu0VtvvaXdu3frsccecz4fJWnGjBlqbGxURUWF3n//fZcY+fY3GvRPSUlJKi0t1d69ezVgwABNmDDB5Tly7g9NAwYMcFnXZrO5/XXsiy++0K233qo9e/a43D777DPNmDGjR46pPyJG+rjt27e73P/www81duxYeXp6uizftWuX2tratGrVKk2dOlXjxo3TsWPHXMZMmjRJxcXFXe4vPDxcW7du1Ztvvqlf/OIXPXMQ6NPCw8M1YMAAl+fiyZMn9emnn3a53gcffKBRo0bpscceU2xsrMaOHavDhw+7jBkyZIgmTZqk5557zvmNZsaMGdq9e7feeustzs7Bed3I6tWrnc+HszFSUlJyUWfOvL29251lnjx5svbv36+wsDCNGTPG5fbt4Eb3ESN9XFVVlRwOhw4cOKDf/va3evbZZ7Vo0aJ248aMGaMzZ87o2Wef1d///ndt3LhRa9eudRmTlZWlHTt2aOHChfrb3/6mTz75RM8//3y7q8THjRunrVu36g9/+AMfagX5+fnpnnvu0UMPPaQtW7Zo3759mj9/vjw8uv7yMnbsWFVVVWnz5s06dOiQnnnmGb3++uvtxiUlJWnTpk3ObzTDhg1TRESECgsLiRFo6NChmjRpkjZt2uQMjxkzZqiiokKffvrpRT1HwsLCtH37dlVWVurEiRNqa2vTvffeq3/961+aO3euduzYoUOHDundd99Venp6hy+Po3uIkT4uNTVVX375peLi4nTvvfdq0aJFzrdNnisqKkq5ubn69a9/rYkTJ2rTpk3KyclxGTNu3Dj96U9/0t69exUXF6f4+Hi9+eab8vJq/3E048eP15YtW/Tb3/5WDzzwQK8dH/qGFStWaPr06br11luVnJysadOmKSYmpst1brvtNi1evFiZmZmKjo7WBx984HyXzbkSExPV2trq8hNuUlJSu2Xov779HBk2bJgiIyMVHBzc7toldzz44IPy9PRUZGSkRowYoaqqKo0cOVLbtm1Ta2urZs6cqeuuu07333+/hgwZct4AR+dsFi/8AwAAg8g4AABgFDECAACMIkYAAIBRxAgAADCKGAEAAEYRIwAAwChiBAAAGEWMAAAAo4gRAABgFDECAACMIkYAAIBR/w9P3TqUzDRJUgAAAABJRU5ErkJggg==\n", - "text/plain": [ - "
" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [] - }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## Curating Q-Learing requirements\n", + "## Creating Q-Learning Policies\n", + "Q-learning is a classic reinforcement learning technique. The Q-function is an action-value function that returns the expected value of an action in a given state.\n", + "\n", + "$Q^\\pi(s_t,a_t)=\\sum^{60}_{t=turn}\\gamma^{60-t} \\cdot R_t$\n", + "\n", + "With this function, all actions in a given state can be evaluated, and the most beneficial action can be taken. With classical reinforcement learning, a table for situations and actions is explored and slowly filled. With ANNs, there is the possibility to use an AI model that can interpolate between situations and should not need to explore the complete game tree to solve some situations.\n", + "\n", "### Calculating discount tables\n", "\n", - "Since the a game stack is contains all steps even if no action is possible this needs to be corrected.\n", - "The normal formula for a reword is:\n", + "Since the game stack contains all steps, even if no action is possible, this needs to be corrected. The normal formula for a reward is:\n", "\n", - "$E(s_{turn},a_{turn}) = \\prod_{t=turn}^{70}\\gamma_t$\n", + "$E(s_{turn},a_{turn}) = \\sum^{60}_{t=turn}\\gamma^{60-t} \\cdot R_t$\n", "\n", - "Since turns that can't be taken do not have the element of uncertanty the discountation has to be excluded by setting the value to $1$ instead of $\\gamma$.\n", + "Since turns that can't be taken do not have the element of uncertainty, the discounting has to be excluded by setting the value to $1$ instead of $\\gamma$.\n", "\n", - "$q_t =\\begin{cases}1 & |a_t|=0\\\\\\gamma & |a_t|>0\\end{cases}$\n", + "$\\gamma^*_t =\\begin{cases}1 & |a_t|=0\\\\gamma & |a_t|>0\\end{cases}$\n", "\n", - "$E(s_{turn},a_{turn}) = \\prod_{t=turn}^{70}q_t$\n", + "$E(s_{turn},a_{turn}) = \\prod_{t=turn}^{70}\\gamma^*_t \\cdot R_t$\n", "\n", - "The table below contains the aggregated discount factors for each reword fitting to the state history." + "The table below contains the aggregated discount factors ($\\prod_{t=turn}^{70}\\gamma^*_t$) for each reward fitting to the state history. This setup also allows to reward the certainty gained by taking the choice of the action from the opponent. It can be argued that also all turns where a player had no choice how to act should not be discounted. But this will increase calculation requirements to nearly double, which is currently not acceptable since computation time and code complexity are bottlenecks." ] }, { @@ -2447,7 +2448,7 @@ "source": [ "def get_gamma_table(board_history: np.ndarray, gamma_value: float) -> np.ndarray:\n", " \"\"\"Calculates a discount table for a board history.\n", - " \n", + "\n", " Args:\n", " board_history: A history of game boards. Shaped (70 * n * 8 * 8)\n", " gamma_value: The default discount factor.\n", @@ -2505,14 +2506,14 @@ "def calculate_q_reword(\n", " board_history: np.ndarray,\n", " who_won_fraction: float = 0.2,\n", - " final_score_fraction: float=0.2,\n", - " gamma: float=0.8,\n", + " final_score_fraction: float = 0.2,\n", + " gamma: float = 0.8,\n", ") -> np.ndarray:\n", " \"\"\"\n", - " \n", + "\n", " Args:\n", - " board_history: \n", - " who_won_fraction: \n", + " board_history:\n", + " who_won_fraction:\n", " final_score_fraction:\n", " gamma:\n", " \"\"\"\n", @@ -3761,19 +3762,21 @@ ] }, { - "cell_type": "raw", - "metadata": { - "tags": [] - }, + "cell_type": "code", + "execution_count": null, + "outputs": [], "source": [ "probes: int = 1000\n", "_ = (\n", - " calculate_board_branching(simulate_game(probes, (ql_policy, ql_policy))[0]) / probes\n", + " calculate_board_branching(simulate_game(probes, (ql_policy1, ql_policy1))[0]) / probes\n", ").plot(\n", " ylim=(0, 1),\n", - " title=f\"Branching rate for a QL policy with epsilon={ql_policy.epsilon}\",\n", + " title=f\"Branching rate for a QL policy with epsilon={ql_policy1.epsilon}\",\n", ")" - ] + ], + "metadata": { + "collapsed": false + } }, { "cell_type": "code", @@ -3815,240 +3818,11 @@ } ], "source": [ + "constant_metric_policies = [RandomPolicy(0), GreedyPolicy(0)]\n", "for i in range(100):\n", " for ql_policy in ql_policys:\n", " ql_policy.load()\n", - " ql_policy.train(1, 10, 1000, 250, [RandomPolicy(0), GreedyPolicy(0)])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "pycharm": { - "is_executing": true - }, - "tags": [] - }, - "outputs": [], - "source": [ - "ql_policy.load()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "pycharm": { - "is_executing": true - }, - "tags": [] - }, - "outputs": [], - "source": [ - "ql_policy.plot_history()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "pycharm": { - "is_executing": true - }, - "tags": [] - }, - "outputs": [], - "source": [ - "ql_policy.train(100, 10, 1000, 250, [RandomPolicy(0), GreedyPolicy(0)])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "pycharm": { - "is_executing": true - }, - "tags": [] - }, - "outputs": [], - "source": [ - "_boards_greedy_l, _action_greedy_l = simulate_game(\n", - " 500, (RandomPolicy(0), GreedyPolicy(0)), tqdm_on=True\n", - ")\n", - "_boards_greedy_r, _action_greedy_r = simulate_game(\n", - " 500, (GreedyPolicy(0), RandomPolicy(0)), tqdm_on=True\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false, - "jupyter": { - "outputs_hidden": false - }, - "pycharm": { - "is_executing": true - } - }, - "outputs": [], - "source": [ - "_boards_greedy_r.shape" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "pycharm": { - "is_executing": true - }, - "tags": [] - }, - "outputs": [], - "source": [ - "np.sum(_boards_greedy_l[-1]) / 500, -np.sum(_boards_greedy_r[-1]) / 500" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "pycharm": { - "is_executing": true - }, - "tags": [] - }, - "outputs": [], - "source": [ - "ql_policy._epsilon = 1\n", - "_boards_l, _actions_l = simulate_game(500, (RandomPolicy(0), ql_policy), tqdm_on=True)\n", - "_boards_r, _actions_r = simulate_game(500, (ql_policy, RandomPolicy(0)), tqdm_on=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false, - "jupyter": { - "outputs_hidden": false - }, - "pycharm": { - "is_executing": true - } - }, - "outputs": [], - "source": [ - "_boards_l.shape" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "pycharm": { - "is_executing": true - }, - "tags": [] - }, - "outputs": [], - "source": [ - "np.sum(_boards_l[-1]) / 500, -np.sum(_boards_r[-1]) / 500" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false, - "jupyter": { - "outputs_hidden": false - }, - "pycharm": { - "is_executing": true - } - }, - "outputs": [], - "source": [ - "_boards_policy_l, _ = simulate_game(500, (ql_policy, GreedyPolicy(0)), tqdm_on=True)\n", - "_boards_policy_r, _ = simulate_game(500, (GreedyPolicy(0), ql_policy), tqdm_on=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false, - "jupyter": { - "outputs_hidden": false - }, - "pycharm": { - "is_executing": true - } - }, - "outputs": [], - "source": [ - "np.sum(_boards_policy_l[-1]) / 500, np.sum(_boards_policy_r[-1]) / 500" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "pycharm": { - "is_executing": true - }, - "tags": [] - }, - "outputs": [], - "source": [ - "calculate_final_evaluation_for_history(\n", - " _boards_policy_l\n", - ").mean() * 64, calculate_final_evaluation_for_history(_boards_policy_r).mean() * 64" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "pycharm": { - "is_executing": true - }, - "tags": [] - }, - "outputs": [], - "source": [ - "@interact(game_start=\"0\")\n", - "def plot_training(game_start: int) -> None:\n", - " boards_at_once = 12\n", - " if not game_start:\n", - " return\n", - " game_start = int(game_start)\n", - " start = game_start * boards_at_once\n", - " end = start + boards_at_once\n", - " boards_selected = _boards_l[start:end, 0]\n", - " scores_selected = _boards_r[start:end, 0]\n", - "\n", - " # noinspection PyProtectedMember\n", - " p_scores = np.max(\n", - " ql_policy._internal_policy(_boards[start:end, 0].cpu().detach().numpy()),\n", - " axis=(1, 2),\n", - " ).tolist()\n", - "\n", - " scores2 = np.array(\n", - " [\n", - " f\"Q:{float(score[0]):2e}@P:{float(score[1]):2e}\"\n", - " for score in zip(scores_selected, p_scores)\n", - " ]\n", - " )\n", - " plot_othello_boards(\n", - " boards_selected,\n", - " scores=scores2,\n", - " )" + " ql_policy.train(1, 10, 1000, 250, constant_metric_policies)" ] }, {