astha commited on
Commit
f8ab485
·
1 Parent(s): 627413e

Upload 54 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. language_models/.DS_Store +0 -0
  2. language_models/gru/.DS_Store +0 -0
  3. language_models/gru/motivating example models/.DS_Store +0 -0
  4. language_models/gru/motivating example models/problem1/.DS_Store +0 -0
  5. language_models/gru/motivating example models/problem1/model_GRU_ME1_original.h5 +3 -0
  6. language_models/gru/motivating example models/problem1/model_GRU_ME1_soln1.h5 +3 -0
  7. language_models/gru/motivating example models/problem1/model_GRU_ME1_soln2.h5 +3 -0
  8. language_models/gru/motivating example models/problem2/.DS_Store +0 -0
  9. language_models/gru/motivating example models/problem2/model_GRU_ME2_original.h5 +3 -0
  10. language_models/gru/motivating example models/problem2/model_GRU_ME2_soln1.h5 +3 -0
  11. language_models/gru/motivating example models/problem2/model_GRU_ME2_soln2.h5 +3 -0
  12. language_models/gru/reuse models/model4_de_fr.h5 +3 -0
  13. language_models/gru/reuse models/model4_de_it.h5 +3 -0
  14. language_models/gru/reuse models/model4_fr_it.h5 +3 -0
  15. language_models/gru/rq1 models/.DS_Store +0 -0
  16. language_models/gru/rq1 models/model_GRU_1layer.h5 +3 -0
  17. language_models/gru/rq1 models/model_GRU_2layer.h5 +3 -0
  18. language_models/gru/rq1 models/model_GRU_3layer.h5 +3 -0
  19. language_models/gru/rq1 models/model_GRU_4layer.h5 +3 -0
  20. language_models/gru/training script/.DS_Store +0 -0
  21. language_models/gru/training script/MNMT_GRU_Experiment.ipynb +1271 -0
  22. language_models/lstm/.DS_Store +0 -0
  23. language_models/lstm/motivating example models/.DS_Store +0 -0
  24. language_models/lstm/motivating example models/problem1/original_problem1.h5 +3 -0
  25. language_models/lstm/motivating example models/problem1/solution1_problem1.h5 +3 -0
  26. language_models/lstm/motivating example models/problem1/solution2_problem1.h5 +3 -0
  27. language_models/lstm/motivating example models/problem2/original_problem2.h5 +3 -0
  28. language_models/lstm/motivating example models/problem2/solution1_problem2.h5 +3 -0
  29. language_models/lstm/motivating example models/problem2/solution2_problem2.h5 +3 -0
  30. language_models/lstm/reuse models/model4_de_fr.h5 +3 -0
  31. language_models/lstm/reuse models/model4_de_it.h5 +3 -0
  32. language_models/lstm/reuse models/model4_fr_it.h5 +3 -0
  33. language_models/lstm/rq1 models/model_LSTM_1layer.h5 +3 -0
  34. language_models/lstm/rq1 models/model_LSTM_2layer.h5 +3 -0
  35. language_models/lstm/rq1 models/model_LSTM_3layer.h5 +3 -0
  36. language_models/lstm/rq1 models/model_LSTM_4layer.h5 +3 -0
  37. language_models/lstm/training script/(LSTM)_NMT_Experiment.ipynb +1317 -0
  38. language_models/lstm/training script/.DS_Store +0 -0
  39. language_models/vanilla_rnn/motivating example models/.DS_Store +0 -0
  40. language_models/vanilla_rnn/motivating example models/problem1/original_problem1.h5 +3 -0
  41. language_models/vanilla_rnn/motivating example models/problem1/solution1_problem1.h5 +3 -0
  42. language_models/vanilla_rnn/motivating example models/problem1/solution2_problem1.h5 +3 -0
  43. language_models/vanilla_rnn/motivating example models/problem2/original_problem2.h5 +3 -0
  44. language_models/vanilla_rnn/motivating example models/problem2/solution1_problem2.h5 +3 -0
  45. language_models/vanilla_rnn/motivating example models/problem2/solution2_problem2.h5 +3 -0
  46. language_models/vanilla_rnn/reuse models/model4_de_fr.h5 +3 -0
  47. language_models/vanilla_rnn/reuse models/model4_de_it.h5 +3 -0
  48. language_models/vanilla_rnn/reuse models/model4_fr_it.h5 +3 -0
  49. language_models/vanilla_rnn/rq1 models/model1.h5 +3 -0
  50. language_models/vanilla_rnn/rq1 models/model2.h5 +3 -0
language_models/.DS_Store ADDED
Binary file (6.15 kB). View file
 
language_models/gru/.DS_Store ADDED
Binary file (10.2 kB). View file
 
language_models/gru/motivating example models/.DS_Store ADDED
Binary file (6.15 kB). View file
 
language_models/gru/motivating example models/problem1/.DS_Store ADDED
Binary file (6.15 kB). View file
 
language_models/gru/motivating example models/problem1/model_GRU_ME1_original.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:835c895c1ffe67e6ad103f31397609de12a0b8ce0a1e724c78f7f01804a151c6
3
+ size 529368600
language_models/gru/motivating example models/problem1/model_GRU_ME1_soln1.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b15628a07a04a0f825a4809fb2e45fa89a3f242d49b11b9f5b2916eea8944d1
3
+ size 529368608
language_models/gru/motivating example models/problem1/model_GRU_ME1_soln2.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca0f245c0351b15bbc4c70dbcaf8d2fb18016ef4af717ea795c994b320e8a1b7
3
+ size 529368600
language_models/gru/motivating example models/problem2/.DS_Store ADDED
Binary file (6.15 kB). View file
 
language_models/gru/motivating example models/problem2/model_GRU_ME2_original.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:419ae2ca52e6460016a7227ad98a2a2779f640d48e48d973934d678b18a42bfb
3
+ size 232568608
language_models/gru/motivating example models/problem2/model_GRU_ME2_soln1.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0553c417cdcf67805e5b31e6b70fc8a2bedd8b556e29bbda038457df06c8bf95
3
+ size 529368600
language_models/gru/motivating example models/problem2/model_GRU_ME2_soln2.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:66e66c458c6044d1a390fbcfd7d9359f6bf8bea6cc649ba8251906818378ab91
3
+ size 232568600
language_models/gru/reuse models/model4_de_fr.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cade5c3c03557d71ec2ce3b1a73c0ad4705b7d47549582d243516fd223e13f31
3
+ size 529368600
language_models/gru/reuse models/model4_de_it.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:377e7812d71a8d177e65f7b8c0adca4811b4433a493ad60a27c118560cd34f7c
3
+ size 529368600
language_models/gru/reuse models/model4_fr_it.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ba7110430b5882b123ca948f7662b15590e79557b5062b60f57f443e6b266790
3
+ size 529368608
language_models/gru/rq1 models/.DS_Store ADDED
Binary file (6.15 kB). View file
 
language_models/gru/rq1 models/model_GRU_1layer.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c0393a7878c072feef7df4e54a1bec8661931324509b748d1cbac7890907e38
3
+ size 378266768
language_models/gru/rq1 models/model_GRU_2layer.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0cc5ac4f8836ad5bb2019f385e6474805e0acf066e52084bddca952097c650f1
3
+ size 428633280
language_models/gru/rq1 models/model_GRU_3layer.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6614568323b11b536b05353909c1d1179700aa0fedb93c497876aa6feb773c0
3
+ size 479002056
language_models/gru/rq1 models/model_GRU_4layer.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77b4fa3b332d39bbd9bd1e2b93a9f92f6f1450b3d0e4fd8774396f32a3509711
3
+ size 529368600
language_models/gru/training script/.DS_Store ADDED
Binary file (6.15 kB). View file
 
language_models/gru/training script/MNMT_GRU_Experiment.ipynb ADDED
@@ -0,0 +1,1271 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "metadata": {
7
+ "colab": {
8
+ "base_uri": "https://localhost:8080/"
9
+ },
10
+ "id": "SIK13ZpDhF_n",
11
+ "outputId": "04cc436b-3b58-4cfa-bd09-b683b7dd6218"
12
+ },
13
+ "outputs": [
14
+ {
15
+ "name": "stdout",
16
+ "output_type": "stream",
17
+ "text": [
18
+ "Mounted at /content/drive\n"
19
+ ]
20
+ }
21
+ ],
22
+ "source": [
23
+ "#connect drive\n",
24
+ "from google.colab import drive\n",
25
+ "drive.mount('/content/drive', force_remount=True)"
26
+ ]
27
+ },
28
+ {
29
+ "cell_type": "code",
30
+ "execution_count": null,
31
+ "metadata": {
32
+ "id": "CB3hzllVhKVC"
33
+ },
34
+ "outputs": [],
35
+ "source": [
36
+ "import csv\n",
37
+ "import os\n",
38
+ "import numpy as np\n",
39
+ "from keras.preprocessing.text import Tokenizer\n",
40
+ "import pandas as pd\n",
41
+ "import tensorflow as tf\n",
42
+ "from keras.utils.np_utils import to_categorical\n",
43
+ "from keras_preprocessing.sequence import pad_sequences\n",
44
+ "import tensorflow_datasets as tfds\n",
45
+ "from nltk.translate.bleu_score import corpus_bleu\n",
46
+ "import os\n",
47
+ "from keras.callbacks import ReduceLROnPlateau\n",
48
+ "from keras.callbacks import ModelCheckpoint\n"
49
+ ]
50
+ },
51
+ {
52
+ "cell_type": "code",
53
+ "execution_count": null,
54
+ "metadata": {
55
+ "id": "OdmG4c70hxQs"
56
+ },
57
+ "outputs": [],
58
+ "source": [
59
+ "def load_data(path):\n",
60
+ " \"\"\"\n",
61
+ " Load dataset\n",
62
+ " \"\"\"\n",
63
+ " input_file = os.path.join(path)\n",
64
+ " with open(input_file, \"r\") as f:\n",
65
+ " data = f.read()\n",
66
+ "\n",
67
+ " return data.split('\\n')\n",
68
+ "\n",
69
+ "def load_glob_embedding(num_words, embed_size=100, word_index=None):\n",
70
+ " from numpy import asarray\n",
71
+ " from numpy import zeros\n",
72
+ "\n",
73
+ " embeddings_dictionary = dict()\n",
74
+ " glove_file = open('/content/drive/MyDrive/NMT/glove.6B.'+str(embed_size)+'d.txt', encoding=\"utf8\")\n",
75
+ "\n",
76
+ " for line in glove_file:\n",
77
+ " records = line.split()\n",
78
+ " word = records[0]\n",
79
+ " vector_dimensions = asarray(records[1:], dtype='float32')\n",
80
+ " embeddings_dictionary[word] = vector_dimensions\n",
81
+ " glove_file.close()\n",
82
+ "\n",
83
+ " embedding_matrix = zeros((num_words, embed_size))\n",
84
+ " for index, word in enumerate(word_index):\n",
85
+ " embedding_vector = embeddings_dictionary.get(word)\n",
86
+ " if embedding_vector is not None:\n",
87
+ " embedding_matrix[index] = embedding_vector\n",
88
+ "\n",
89
+ " return embedding_matrix\n",
90
+ "\n"
91
+ ]
92
+ },
93
+ {
94
+ "cell_type": "code",
95
+ "execution_count": null,
96
+ "metadata": {
97
+ "id": "oTEQAFkOK8B4"
98
+ },
99
+ "outputs": [],
100
+ "source": [
101
+ "english_sentences = load_data('/content/drive/MyDrive/NMT/en_train.txt')\n",
102
+ "french_sentences = load_data('/content/drive/MyDrive/NMT/fr_train.txt')\n",
103
+ "german_sentences = load_data('/content/drive/MyDrive/NMT/de_train.txt')\n",
104
+ "italian_sentences = load_data('/content/drive/MyDrive/NMT/it_train.txt')"
105
+ ]
106
+ },
107
+ {
108
+ "cell_type": "markdown",
109
+ "metadata": {
110
+ "id": "jNH6q4fk63Uc"
111
+ },
112
+ "source": [
113
+ "# Run only for the original model, not for inter-reuse experiment"
114
+ ]
115
+ },
116
+ {
117
+ "cell_type": "code",
118
+ "execution_count": null,
119
+ "metadata": {
120
+ "id": "b4BL1OgOiVFF"
121
+ },
122
+ "outputs": [],
123
+ "source": [
124
+ "text_pairs = []\n",
125
+ "for english,french,german,italian in zip(english_sentences, french_sentences,german_sentences,italian_sentences):\n",
126
+ " # english = \"[starten] \" + english + \" [enden]\"\n",
127
+ " french = \"[startfr] \" + french + \" [endfr]\"\n",
128
+ " german = \"[startde] \" + german + \" [endde]\"\n",
129
+ " italian = \"[startit] \" + italian + \" [endit]\"\n",
130
+ "\n",
131
+ " text_pairs.append((english, french))\n",
132
+ " text_pairs.append((english, german))\n",
133
+ " text_pairs.append((english, italian))\n",
134
+ "\n",
135
+ " # text_pairs.append((french, english))\n",
136
+ " # text_pairs.append((french, german))\n",
137
+ " # text_pairs.append((french, italian))\n",
138
+ "\n",
139
+ " # text_pairs.append((german, english))\n",
140
+ " # text_pairs.append((german, french))\n",
141
+ " # text_pairs.append((german, italian))\n",
142
+ "\n",
143
+ " # text_pairs.append((italian, english))\n",
144
+ " # text_pairs.append((italian, french))\n",
145
+ " # text_pairs.append((italian, german))\n"
146
+ ]
147
+ },
148
+ {
149
+ "cell_type": "markdown",
150
+ "metadata": {
151
+ "id": "fXgeKJYJ2F6r"
152
+ },
153
+ "source": [
154
+ "\n",
155
+ "\n",
156
+ "# InterReuse (1st scenario) En to {de, it}"
157
+ ]
158
+ },
159
+ {
160
+ "cell_type": "code",
161
+ "execution_count": null,
162
+ "metadata": {
163
+ "id": "3UsmQC_159vK"
164
+ },
165
+ "outputs": [],
166
+ "source": [
167
+ "text_pairs = []\n",
168
+ "for english,french,german,italian in zip(english_sentences, french_sentences,german_sentences,italian_sentences):\n",
169
+ " german = \"[startde] \" + german + \" [endde]\"\n",
170
+ " italian = \"[startit] \" + italian + \" [endit]\"\n",
171
+ "\n",
172
+ " text_pairs.append((english, german))\n",
173
+ " text_pairs.append((english, italian))\n",
174
+ " "
175
+ ]
176
+ },
177
+ {
178
+ "cell_type": "markdown",
179
+ "metadata": {
180
+ "id": "rKyB-Jtn2GRW"
181
+ },
182
+ "source": [
183
+ "# InterReuse (2nd scenario) En to {de, fr}"
184
+ ]
185
+ },
186
+ {
187
+ "cell_type": "code",
188
+ "execution_count": null,
189
+ "metadata": {
190
+ "id": "4NME4f-u6ee_"
191
+ },
192
+ "outputs": [],
193
+ "source": [
194
+ "text_pairs = []\n",
195
+ "for english,french,german,italian in zip(english_sentences, french_sentences,german_sentences,italian_sentences):\n",
196
+ " french = \"[startfr] \" + french + \" [endfr]\"\n",
197
+ " german = \"[startde] \" + german + \" [endde]\"\n",
198
+ " \n",
199
+ " text_pairs.append((english, french))\n",
200
+ " text_pairs.append((english, german))\n"
201
+ ]
202
+ },
203
+ {
204
+ "cell_type": "markdown",
205
+ "metadata": {
206
+ "id": "eGBLp-242GUM"
207
+ },
208
+ "source": [
209
+ "# InterReuse (3rd scenario) En to {it, fr}"
210
+ ]
211
+ },
212
+ {
213
+ "cell_type": "code",
214
+ "execution_count": null,
215
+ "metadata": {
216
+ "id": "kiES-xcO6rEf"
217
+ },
218
+ "outputs": [],
219
+ "source": [
220
+ "text_pairs = []\n",
221
+ "for english,french,german,italian in zip(english_sentences, french_sentences,german_sentences,italian_sentences):\n",
222
+ " french = \"[startfr] \" + french + \" [endfr]\"\n",
223
+ " italian = \"[startit] \" + italian + \" [endit]\"\n",
224
+ "\n",
225
+ " text_pairs.append((english, french))\n",
226
+ " text_pairs.append((english, italian))\n",
227
+ " "
228
+ ]
229
+ },
230
+ {
231
+ "cell_type": "markdown",
232
+ "metadata": {
233
+ "id": "-bP39N4viCAV"
234
+ },
235
+ "source": [
236
+ "# Motivating Example 1 Original Model"
237
+ ]
238
+ },
239
+ {
240
+ "cell_type": "code",
241
+ "execution_count": null,
242
+ "metadata": {
243
+ "id": "VcIvoR2LiCoN"
244
+ },
245
+ "outputs": [],
246
+ "source": [
247
+ "english_sentences = load_data('/content/drive/MyDrive/NMT/MotivatingExample 1 (Original Model+dataset)/en_ua_train_original.txt')\n",
248
+ "french_sentences = load_data('/content/drive/MyDrive/NMT/MotivatingExample 1 (Original Model+dataset)/fr_ua_train_original.txt')\n",
249
+ "german_sentences = load_data('/content/drive/MyDrive/NMT/MotivatingExample 1 (Original Model+dataset)/de_ua_train_original.txt')\n",
250
+ "ukranian_sentences = load_data('/content/drive/MyDrive/NMT/MotivatingExample 1 (Original Model+dataset)/ua_ua_train_original.txt')\n",
251
+ "italian_sentences = load_data('/content/drive/MyDrive/NMT/MotivatingExample 1 (Original Model+dataset)/it_ua_train_original.txt')\n",
252
+ "\n",
253
+ "text_pairs = []\n",
254
+ "for english,french,german,italian,ukranian in zip(english_sentences, french_sentences,german_sentences,italian_sentences,ukranian_sentences):\n",
255
+ " french = \"[startfr] \" + french + \" [endfr]\"\n",
256
+ " german = \"[startde] \" + german + \" [endde]\"\n",
257
+ " italian = \"[startit] \" + italian + \" [endit]\"\n",
258
+ " ukranian = \"[startua] \" + ukranian + \" [endua]\"\n",
259
+ "\n",
260
+ " text_pairs.append((english, french))\n",
261
+ " text_pairs.append((english, german))\n",
262
+ " text_pairs.append((english, italian))\n",
263
+ " text_pairs.append((english, ukranian))"
264
+ ]
265
+ },
266
+ {
267
+ "cell_type": "markdown",
268
+ "metadata": {
269
+ "id": "xFmSR42wE1GY"
270
+ },
271
+ "source": [
272
+ "# Motivating Example 1 Solution 1"
273
+ ]
274
+ },
275
+ {
276
+ "cell_type": "code",
277
+ "execution_count": null,
278
+ "metadata": {
279
+ "id": "ENunZM3FE23o"
280
+ },
281
+ "outputs": [],
282
+ "source": [
283
+ "english_sentences = load_data('/content/drive/MyDrive/NMT/en_ua_train.txt')\n",
284
+ "german_sentences = load_data('/content/drive/MyDrive/NMT/de_ua_train.txt')\n",
285
+ "ukranian_sentences = load_data('/content/drive/MyDrive/NMT/ua_train.txt')\n",
286
+ "\n",
287
+ "text_pairs = []\n",
288
+ "for english, german, ukranian in zip(english_sentences,german_sentences, ukranian_sentences):\n",
289
+ " german = \"[startde] \" + german + \" [endde]\"\n",
290
+ " ukranian = \"[startua] \" + ukranian + \" [endua]\"\n",
291
+ "\n",
292
+ " text_pairs.append((english, german))\n",
293
+ " text_pairs.append((english, ukranian))"
294
+ ]
295
+ },
296
+ {
297
+ "cell_type": "markdown",
298
+ "metadata": {
299
+ "id": "IUnNMpAVDpZG"
300
+ },
301
+ "source": [
302
+ "# Motivating Example 1 Solution 2"
303
+ ]
304
+ },
305
+ {
306
+ "cell_type": "code",
307
+ "execution_count": null,
308
+ "metadata": {
309
+ "id": "T5-vlFmeDrtQ"
310
+ },
311
+ "outputs": [],
312
+ "source": [
313
+ "english_sentences = load_data('/content/drive/MyDrive/NMT/en_ua_train.txt')\n",
314
+ "ukranian_sentences = load_data('/content/drive/MyDrive/NMT/ua_train.txt')\n",
315
+ "\n",
316
+ "text_pairs = []\n",
317
+ "for english,ukranian in zip(english_sentences, ukranian_sentences):\n",
318
+ " ukranian = \"[startua] \" + ukranian + \" [endua]\"\n",
319
+ "\n",
320
+ " text_pairs.append((english, ukranian))"
321
+ ]
322
+ },
323
+ {
324
+ "cell_type": "markdown",
325
+ "metadata": {
326
+ "id": "VtPspyGHKpYj"
327
+ },
328
+ "source": [
329
+ "# Motivating Example 2 Original\n",
330
+ "\n"
331
+ ]
332
+ },
333
+ {
334
+ "cell_type": "code",
335
+ "execution_count": null,
336
+ "metadata": {
337
+ "id": "BkgEgbc6Kteo"
338
+ },
339
+ "outputs": [],
340
+ "source": [
341
+ "english_sentences = load_data('/content/drive/MyDrive/NMT/Dataset_ME2/en_et_train.txt')\n",
342
+ "german_sentences = load_data('/content/drive/MyDrive/NMT/Dataset_ME2/de_et_train.txt')\n",
343
+ "italian_sentences = load_data('/content/drive/MyDrive/NMT/Dataset_ME2/it_et_train.txt')\n",
344
+ "estonian_sentences = load_data('/content/drive/MyDrive/NMT/Dataset_ME2/et_et_train.txt')\n",
345
+ "text_pairs = []\n",
346
+ "for english, german, italian, estonian in zip(english_sentences,german_sentences, italian_sentences,estonian_sentences):\n",
347
+ " english = \"[starten] \" + english + \" [enden]\"\n",
348
+ " german = \"[startde] \" + german + \" [endde]\"\n",
349
+ " italian = \"[startit] \" + italian + \" [endit]\"\n",
350
+ "\n",
351
+ " text_pairs.append((estonian, english))\n",
352
+ " text_pairs.append((estonian, german))\n",
353
+ " text_pairs.append((estonian, italian))"
354
+ ]
355
+ },
356
+ {
357
+ "cell_type": "markdown",
358
+ "metadata": {
359
+ "id": "bUyLIVUHn0Xu"
360
+ },
361
+ "source": [
362
+ "# Motivating Example 2 Solution 1\n"
363
+ ]
364
+ },
365
+ {
366
+ "cell_type": "code",
367
+ "execution_count": null,
368
+ "metadata": {
369
+ "id": "9-0JKeqWn4YW"
370
+ },
371
+ "outputs": [],
372
+ "source": [
373
+ "english_sentences = load_data('/content/drive/MyDrive/NMT/Dataset_ME2/en_et_train.txt')\n",
374
+ "italian_sentences = load_data('/content/drive/MyDrive/NMT/Dataset_ME2/it_et_train.txt')\n",
375
+ "estonian_sentences = load_data('/content/drive/MyDrive/NMT/Dataset_ME2/et_et_train.txt')\n",
376
+ "\n",
377
+ "text_pairs = []\n",
378
+ "for english, italian, estonian in zip(english_sentences, italian_sentences,estonian_sentences):\n",
379
+ " english = \"[starten] \" + english + \" [enden]\"\n",
380
+ " italian = \"[startit] \" + italian + \" [endit]\"\n",
381
+ "\n",
382
+ " text_pairs.append((estonian, english))\n",
383
+ " text_pairs.append((estonian, italian))"
384
+ ]
385
+ },
386
+ {
387
+ "cell_type": "markdown",
388
+ "metadata": {
389
+ "id": "MI7Ti6Vnkf3v"
390
+ },
391
+ "source": [
392
+ "# Motivating Example 2 Solution 2\n"
393
+ ]
394
+ },
395
+ {
396
+ "cell_type": "code",
397
+ "execution_count": null,
398
+ "metadata": {
399
+ "id": "rxssKJSUkkJP"
400
+ },
401
+ "outputs": [],
402
+ "source": [
403
+ "english_sentences = load_data('/content/drive/MyDrive/NMT/Dataset_ME2/s2/en_et_train_s2.txt')\n",
404
+ "estonian_sentences = load_data('/content/drive/MyDrive/NMT/Dataset_ME2/s2/et_et_train_s2.txt')\n",
405
+ "text_pairs = []\n",
406
+ "for english, estonian in zip(english_sentences,estonian_sentences):\n",
407
+ " english = \"[starten] \" + english + \" [enden]\"\n",
408
+ "\n",
409
+ " text_pairs.append((estonian, english))"
410
+ ]
411
+ },
412
+ {
413
+ "cell_type": "markdown",
414
+ "metadata": {
415
+ "id": "w8044H00hfK-"
416
+ },
417
+ "source": [
418
+ "# Building the model"
419
+ ]
420
+ },
421
+ {
422
+ "cell_type": "code",
423
+ "execution_count": null,
424
+ "metadata": {
425
+ "colab": {
426
+ "base_uri": "https://localhost:8080/"
427
+ },
428
+ "id": "mrG_3rC8h01E",
429
+ "outputId": "70effc3f-048f-46de-a994-7ddb8d58a249"
430
+ },
431
+ "outputs": [
432
+ {
433
+ "output_type": "stream",
434
+ "name": "stdout",
435
+ "text": [
436
+ "('Ma võtan vihmavarju kaasa.', \"[starten] I'm going to take an umbrella with me. [enden]\")\n"
437
+ ]
438
+ }
439
+ ],
440
+ "source": [
441
+ "import random\n",
442
+ "print(random.choice(text_pairs))"
443
+ ]
444
+ },
445
+ {
446
+ "cell_type": "code",
447
+ "execution_count": null,
448
+ "metadata": {
449
+ "id": "i4bKiSxSLKMI"
450
+ },
451
+ "outputs": [],
452
+ "source": [
453
+ "import random\n",
454
+ "random.shuffle(text_pairs)\n",
455
+ "num_val_samples = int(0.15 * len(text_pairs))\n",
456
+ "num_train_samples = len(text_pairs) - 2 * num_val_samples\n",
457
+ "train_pairs = text_pairs[:num_train_samples]\n",
458
+ "val_pairs = text_pairs[num_train_samples:num_train_samples + num_val_samples]\n",
459
+ "test_pairs = text_pairs[num_train_samples + num_val_samples:]"
460
+ ]
461
+ },
462
+ {
463
+ "cell_type": "code",
464
+ "execution_count": null,
465
+ "metadata": {
466
+ "id": "CB-X3EfULMec"
467
+ },
468
+ "outputs": [],
469
+ "source": [
470
+ "import tensorflow as tf\n",
471
+ "import string\n",
472
+ "import re\n",
473
+ "from tensorflow.keras import layers\n",
474
+ "\n",
475
+ "strip_chars = string.punctuation + \"¿\"\n",
476
+ "strip_chars = strip_chars.replace(\"[\", \"\")\n",
477
+ "strip_chars = strip_chars.replace(\"]\", \"\")\n",
478
+ "\n",
479
+ "def custom_standardization(input_string):\n",
480
+ " lowercase = tf.strings.lower(input_string)\n",
481
+ " return tf.strings.regex_replace(\n",
482
+ " lowercase, f\"[{re.escape(strip_chars)}]\", \"\")\n",
483
+ "\n",
484
+ "vocab_size = 2000\n",
485
+ "# en_vocab_size = 11000\n",
486
+ "# fr_vocab_size = 18000\n",
487
+ "sequence_length = 20\n",
488
+ "\n",
489
+ "source_vectorization = layers.TextVectorization(\n",
490
+ " max_tokens=vocab_size,\n",
491
+ " output_mode=\"int\",\n",
492
+ " output_sequence_length=sequence_length,\n",
493
+ ")\n",
494
+ "target_vectorization = layers.TextVectorization(\n",
495
+ " max_tokens=vocab_size,\n",
496
+ " output_mode=\"int\",\n",
497
+ " output_sequence_length=sequence_length + 1,\n",
498
+ " standardize=custom_standardization,\n",
499
+ ")\n",
500
+ "train_source_texts = [pair[0] for pair in train_pairs]\n",
501
+ "train_target_texts = [pair[1] for pair in train_pairs]\n",
502
+ "source_vectorization.adapt(train_source_texts)\n",
503
+ "target_vectorization.adapt(train_target_texts)\n"
504
+ ]
505
+ },
506
+ {
507
+ "cell_type": "code",
508
+ "execution_count": null,
509
+ "metadata": {
510
+ "id": "IhrYWMyHLPfr"
511
+ },
512
+ "outputs": [],
513
+ "source": [
514
+ "batch_size = 64\n",
515
+ "\n",
516
+ "def format_dataset(eng, spa):\n",
517
+ " eng = source_vectorization(eng)\n",
518
+ " spa = target_vectorization(spa)\n",
519
+ " return ({\n",
520
+ " \"source\": eng,\n",
521
+ " \"target\": spa[:, :-1],\n",
522
+ " }, spa[:, 1:])\n",
523
+ "\n",
524
+ "def make_dataset(pairs):\n",
525
+ " eng_texts, spa_texts = zip(*pairs)\n",
526
+ " eng_texts = list(eng_texts)\n",
527
+ " spa_texts = list(spa_texts)\n",
528
+ " dataset = tf.data.Dataset.from_tensor_slices((eng_texts, spa_texts))\n",
529
+ " dataset = dataset.batch(batch_size)\n",
530
+ " dataset = dataset.map(format_dataset, num_parallel_calls=4)\n",
531
+ " return dataset.shuffle(2048).prefetch(16).cache()\n",
532
+ "\n",
533
+ "train_ds = make_dataset(train_pairs)\n",
534
+ "val_ds = make_dataset(val_pairs)"
535
+ ]
536
+ },
537
+ {
538
+ "cell_type": "code",
539
+ "execution_count": null,
540
+ "metadata": {
541
+ "colab": {
542
+ "base_uri": "https://localhost:8080/"
543
+ },
544
+ "id": "tFFNByKXLRtV",
545
+ "outputId": "2e9a59bd-1a4d-4a67-c3f0-6ee124c81df3"
546
+ },
547
+ "outputs": [
548
+ {
549
+ "output_type": "stream",
550
+ "name": "stdout",
551
+ "text": [
552
+ "inputs['source'].shape: (64, 20)\n",
553
+ "inputs['target'].shape: (64, 20)\n",
554
+ "targets.shape: (64, 20)\n"
555
+ ]
556
+ }
557
+ ],
558
+ "source": [
559
+ "for inputs, targets in train_ds.take(1):\n",
560
+ " print(f\"inputs['source'].shape: {inputs['source'].shape}\")\n",
561
+ " print(f\"inputs['target'].shape: {inputs['target'].shape}\")\n",
562
+ " print(f\"targets.shape: {targets.shape}\")"
563
+ ]
564
+ },
565
+ {
566
+ "cell_type": "code",
567
+ "execution_count": null,
568
+ "metadata": {
569
+ "id": "kawQLBPHLTmr"
570
+ },
571
+ "outputs": [],
572
+ "source": [
573
+ "from tensorflow import keras\n",
574
+ "from tensorflow.keras import layers\n",
575
+ "\n",
576
+ "embed_dim = 200\n",
577
+ "latent_dim = 1024\n",
578
+ "\n",
579
+ "embedding_matrix = load_glob_embedding(vocab_size, 200, target_vectorization.get_vocabulary())\n"
580
+ ]
581
+ },
582
+ {
583
+ "cell_type": "code",
584
+ "execution_count": null,
585
+ "metadata": {
586
+ "colab": {
587
+ "base_uri": "https://localhost:8080/"
588
+ },
589
+ "id": "HUxTSb5ELV_S",
590
+ "outputId": "c8467ef6-02b2-4fa5-d52e-91c5d27bb637"
591
+ },
592
+ "outputs": [
593
+ {
594
+ "output_type": "stream",
595
+ "name": "stdout",
596
+ "text": [
597
+ "WARNING:tensorflow:Layer rnn_encoder1 will not use cuDNN kernels since it doesn't meet the criteria. It will use a generic GPU kernel as fallback when running on GPU.\n",
598
+ "WARNING:tensorflow:Layer rnn_encoder2 will not use cuDNN kernels since it doesn't meet the criteria. It will use a generic GPU kernel as fallback when running on GPU.\n",
599
+ "WARNING:tensorflow:Layer rnn_encoder3 will not use cuDNN kernels since it doesn't meet the criteria. It will use a generic GPU kernel as fallback when running on GPU.\n",
600
+ "WARNING:tensorflow:Layer rnn_encoder4 will not use cuDNN kernels since it doesn't meet the criteria. It will use a generic GPU kernel as fallback when running on GPU.\n",
601
+ "WARNING:tensorflow:Layer rnn_decoder1 will not use cuDNN kernels since it doesn't meet the criteria. It will use a generic GPU kernel as fallback when running on GPU.\n",
602
+ "WARNING:tensorflow:Layer rnn_decoder2 will not use cuDNN kernels since it doesn't meet the criteria. It will use a generic GPU kernel as fallback when running on GPU.\n",
603
+ "WARNING:tensorflow:Layer rnn_decoder3 will not use cuDNN kernels since it doesn't meet the criteria. It will use a generic GPU kernel as fallback when running on GPU.\n",
604
+ "WARNING:tensorflow:Layer rnn_decoder4 will not use cuDNN kernels since it doesn't meet the criteria. It will use a generic GPU kernel as fallback when running on GPU.\n"
605
+ ]
606
+ }
607
+ ],
608
+ "source": [
609
+ "source = keras.Input(shape=(None,), dtype=\"int64\", name=\"source\")\n",
610
+ "\n",
611
+ "# x = layers.Embedding(vocab_size, embed_dim, mask_zero=True)(source)\n",
612
+ "x = layers.Embedding(vocab_size, embed_dim, weights=[embedding_matrix], mask_zero=True,\n",
613
+ " name='embed_encoder', trainable=False)(source)\n",
614
+ "\n",
615
+ "encoded_source = layers.GRU(latent_dim, return_sequences=True, reset_after=False, activation='tanh', name='rnn_encoder1')(x)\n",
616
+ "encoded_source = layers.GRU(latent_dim, return_sequences=True, reset_after=False, activation='tanh', name='rnn_encoder2')(encoded_source)\n",
617
+ "encoded_source = layers.GRU(latent_dim, return_sequences=True, reset_after=False, activation='tanh', name='rnn_encoder3')(encoded_source)\n",
618
+ "encoded_source, encoder_states = layers.GRU(latent_dim, reset_after=False, return_state=True, activation='tanh', name='rnn_encoder4')(x)\n",
619
+ "\n",
620
+ "past_target = keras.Input(shape=(None,), dtype=\"int64\", name=\"target\")\n",
621
+ "x = layers.Embedding(vocab_size, embed_dim, mask_zero=True, name='embed_decoder')(past_target)\n",
622
+ "\n",
623
+ "decoder_gru = layers.GRU(latent_dim, reset_after=False, return_sequences=True, activation='tanh', name='rnn_decoder1')\n",
624
+ "x = decoder_gru(x, initial_state=encoder_states)\n",
625
+ "x = layers.GRU(latent_dim, reset_after=False, return_sequences=True, activation='tanh', name='rnn_decoder2')(x)\n",
626
+ "x = layers.GRU(latent_dim, reset_after=False, return_sequences=True, activation='tanh', name='rnn_decoder3')(x)\n",
627
+ "x = layers.GRU(latent_dim, reset_after=False, return_sequences=True, activation='tanh', name='rnn_decoder4')(x)\n",
628
+ "\n",
629
+ "x = layers.Dropout(0.5)(x)\n",
630
+ "\n",
631
+ "target_next_step = layers.TimeDistributed(layers.Dense(vocab_size, activation=\"softmax\", name='output'))(x)\n",
632
+ "\n",
633
+ "seq2seq_rnn = keras.Model([source, past_target], target_next_step)\n",
634
+ "\n",
635
+ "seq2seq_rnn.compile(\n",
636
+ " optimizer=\"rmsprop\",\n",
637
+ " loss=\"sparse_categorical_crossentropy\",\n",
638
+ " metrics=[\"accuracy\"])"
639
+ ]
640
+ },
641
+ {
642
+ "cell_type": "code",
643
+ "execution_count": null,
644
+ "metadata": {
645
+ "colab": {
646
+ "base_uri": "https://localhost:8080/"
647
+ },
648
+ "id": "twWGTTxHLX0-",
649
+ "outputId": "a994a363-7e6d-4353-a2e8-32a3421ac140"
650
+ },
651
+ "outputs": [
652
+ {
653
+ "output_type": "stream",
654
+ "name": "stdout",
655
+ "text": [
656
+ "Epoch 1/3\n",
657
+ "35/35 [==============================] - 17s 268ms/step - loss: 2.1299 - accuracy: 0.1399 - val_loss: 1.9851 - val_accuracy: 0.1586\n",
658
+ "Epoch 2/3\n",
659
+ "35/35 [==============================] - 8s 241ms/step - loss: 1.9150 - accuracy: 0.1603 - val_loss: 1.9674 - val_accuracy: 0.1561\n",
660
+ "Epoch 3/3\n",
661
+ "35/35 [==============================] - 8s 241ms/step - loss: 1.8826 - accuracy: 0.1795 - val_loss: 1.8760 - val_accuracy: 0.1873\n"
662
+ ]
663
+ }
664
+ ],
665
+ "source": [
666
+ "# checkpoint = ModelCheckpoint(filepath='model_1LSTM_original_chkpt.h5',\n",
667
+ "# monitor='val_loss', verbose=1, save_best_only=True,\n",
668
+ "# mode='min')\n",
669
+ "\n",
670
+ "seq2seq_rnn.fit(train_ds, epochs=3, validation_data=val_ds)\n",
671
+ "seq2seq_rnn.save('model_GRU_ME2_soln2.h5')"
672
+ ]
673
+ },
674
+ {
675
+ "cell_type": "code",
676
+ "execution_count": null,
677
+ "metadata": {
678
+ "colab": {
679
+ "background_save": true
680
+ },
681
+ "id": "0SkImFO6os8J",
682
+ "outputId": "c2d98cb7-90f6-484e-d3c3-7b2f094abdf7"
683
+ },
684
+ "outputs": [
685
+ {
686
+ "ename": "KeyboardInterrupt",
687
+ "evalue": "ignored",
688
+ "output_type": "error",
689
+ "traceback": [
690
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
691
+ "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
692
+ "\u001b[0;32m<ipython-input-31-9a163dae5324>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 33\u001b[0m \u001b[0;31m# print(\"predicted: \", decode_sequence(test_eng_texts[i],acts[0], acts[-1]))\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 34\u001b[0m \u001b[0mactual\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mappend\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0macts\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 35\u001b[0;31m \u001b[0mpredicted\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mappend\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdecode_sequence\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtest_eng_texts\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0macts\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0macts\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m-\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msplit\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 36\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 37\u001b[0m \u001b[0;31m#print(\"actual: \", actual, '\\n', \"predicted:\", predicted)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
693
+ "\u001b[0;32m<ipython-input-31-9a163dae5324>\u001b[0m in \u001b[0;36mdecode_sequence\u001b[0;34m(input_sentence, decoded_sentence, end_tag)\u001b[0m\n\u001b[1;32m 11\u001b[0m \u001b[0mtokenized_target_sentence\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtarget_vectorization\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mdecoded_sentence\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 12\u001b[0m next_token_predictions = seq2seq_rnn.predict(\n\u001b[0;32m---> 13\u001b[0;31m [tokenized_input_sentence, tokenized_target_sentence])\n\u001b[0m\u001b[1;32m 14\u001b[0m \u001b[0msampled_token_index\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0margmax\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnext_token_predictions\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mi\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m:\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 15\u001b[0m \u001b[0msampled_token\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mspa_index_lookup\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0msampled_token_index\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
694
+ "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/keras/utils/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 62\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 63\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 64\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 65\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;31m# pylint: disable=broad-except\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 66\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_process_traceback_frames\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__traceback__\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
695
+ "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/keras/engine/training.py\u001b[0m in \u001b[0;36mpredict\u001b[0;34m(self, x, batch_size, verbose, steps, callbacks, max_queue_size, workers, use_multiprocessing)\u001b[0m\n\u001b[1;32m 1976\u001b[0m \u001b[0mcallbacks\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mon_predict_begin\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1977\u001b[0m \u001b[0mbatch_outputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1978\u001b[0;31m \u001b[0;32mfor\u001b[0m \u001b[0m_\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0miterator\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mdata_handler\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0menumerate_epochs\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;31m# Single epoch.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1979\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mdata_handler\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcatch_stop_iteration\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1980\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mstep\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mdata_handler\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msteps\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
696
+ "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/keras/engine/data_adapter.py\u001b[0m in \u001b[0;36menumerate_epochs\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 1189\u001b[0m \u001b[0;34m\"\"\"Yields `(epoch, tf.data.Iterator)`.\"\"\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1190\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_truncate_execution_to_epoch\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1191\u001b[0;31m \u001b[0mdata_iterator\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0miter\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_dataset\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1192\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mepoch\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_initial_epoch\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_epochs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1193\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_insufficient_data\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;31m# Set by `catch_stop_iteration`.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
697
+ "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/tensorflow/python/data/ops/dataset_ops.py\u001b[0m in \u001b[0;36m__iter__\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 484\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mcontext\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexecuting_eagerly\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minside_function\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 485\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcolocate_with\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_variant_tensor\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 486\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0miterator_ops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mOwnedIterator\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 487\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 488\u001b[0m raise RuntimeError(\"`tf.data.Dataset` only supports Python-style \"\n",
698
+ "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/tensorflow/python/data/ops/iterator_ops.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, dataset, components, element_spec)\u001b[0m\n\u001b[1;32m 753\u001b[0m \u001b[0;34m\"When `dataset` is provided, `element_spec` and `components` must \"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 754\u001b[0m \"not be specified.\")\n\u001b[0;32m--> 755\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_create_iterator\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdataset\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 756\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 757\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_get_next_call_count\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
699
+ "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/tensorflow/python/data/ops/iterator_ops.py\u001b[0m in \u001b[0;36m_create_iterator\u001b[0;34m(self, dataset)\u001b[0m\n\u001b[1;32m 785\u001b[0m \u001b[0moutput_types\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_flat_output_types\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 786\u001b[0m output_shapes=self._flat_output_shapes))\n\u001b[0;32m--> 787\u001b[0;31m \u001b[0mgen_dataset_ops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmake_iterator\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mds_variant\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_iterator_resource\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 788\u001b[0m \u001b[0;31m# Delete the resource when this object is deleted\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 789\u001b[0m self._resource_deleter = IteratorResourceDeleter(\n",
700
+ "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/tensorflow/python/ops/gen_dataset_ops.py\u001b[0m in \u001b[0;36mmake_iterator\u001b[0;34m(dataset, iterator, name)\u001b[0m\n\u001b[1;32m 3314\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3315\u001b[0m _result = pywrap_tfe.TFE_Py_FastPathExecute(\n\u001b[0;32m-> 3316\u001b[0;31m _ctx, \"MakeIterator\", name, dataset, iterator)\n\u001b[0m\u001b[1;32m 3317\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0m_result\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3318\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0m_core\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_NotOkStatusException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
701
+ "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
702
+ ]
703
+ }
704
+ ],
705
+ "source": [
706
+ "import numpy as np\n",
707
+ "spa_vocab = target_vectorization.get_vocabulary()\n",
708
+ "spa_index_lookup = dict(zip(range(len(spa_vocab)), spa_vocab))\n",
709
+ "max_decoded_sentence_length = 20\n",
710
+ "\n",
711
+ "def decode_sequence(input_sentence, decoded_sentence, end_tag):\n",
712
+ " tokenized_input_sentence = source_vectorization([input_sentence])\n",
713
+ " #decoded_sentence = \"[start]\"\n",
714
+ "\n",
715
+ " for i in range(max_decoded_sentence_length):\n",
716
+ " tokenized_target_sentence = target_vectorization([decoded_sentence])\n",
717
+ " next_token_predictions = seq2seq_rnn.predict(\n",
718
+ " [tokenized_input_sentence, tokenized_target_sentence])\n",
719
+ " sampled_token_index = np.argmax(next_token_predictions[0, i, :])\n",
720
+ " sampled_token = spa_index_lookup[sampled_token_index]\n",
721
+ " decoded_sentence += \" \" + sampled_token\n",
722
+ " if sampled_token == end_tag:\n",
723
+ " break\n",
724
+ " return decoded_sentence\n",
725
+ "\n",
726
+ "bleu_dic = {}\n",
727
+ "test_eng_texts = [pair[0] for pair in test_pairs]\n",
728
+ "test_fr_texts = [pair[1] for pair in test_pairs]\n",
729
+ "actual, predicted = [], []\n",
730
+ "for i in range(len(test_pairs)):\n",
731
+ " # input_sentence = \n",
732
+ " # target_sentence = random.choice(test_fr_texts)\n",
733
+ " acts=test_fr_texts[i].split()\n",
734
+ " # print(\"-\")\n",
735
+ " # print(acts[0], acts[-1])\n",
736
+ " # print(\"source: \", test_eng_texts[i])\n",
737
+ " # print(\"actual target: \", test_fr_texts[i])\n",
738
+ " # print(\"predicted: \", decode_sequence(test_eng_texts[i],acts[0], acts[-1]))\n",
739
+ " actual.append([acts])\n",
740
+ " predicted.append(decode_sequence(test_eng_texts[i],acts[0], acts[-1]).split())\n",
741
+ "\n",
742
+ "#print(\"actual: \", actual, '\\n', \"predicted:\", predicted)\n",
743
+ "bleu_dic['1-grams'] = corpus_bleu(actual, predicted, weights=(1.0, 0, 0, 0))\n",
744
+ "bleu_dic['1-2-grams'] = corpus_bleu(actual, predicted, weights=(0.5, 0.5, 0, 0))\n",
745
+ "bleu_dic['1-3-grams'] = corpus_bleu(actual, predicted, weights=(0.3, 0.3, 0.3, 0))\n",
746
+ "bleu_dic['1-4-grams'] = corpus_bleu(actual, predicted, weights=(0.25, 0.25, 0.25, 0.25))\n",
747
+ "print(bleu_dic)"
748
+ ]
749
+ },
750
+ {
751
+ "cell_type": "code",
752
+ "execution_count": null,
753
+ "metadata": {
754
+ "colab": {
755
+ "base_uri": "https://localhost:8080/",
756
+ "height": 281
757
+ },
758
+ "id": "D_Qf1ZnRNual",
759
+ "outputId": "ef5196ee-1377-4041-cc41-1c0fdd71a911"
760
+ },
761
+ "outputs": [
762
+ {
763
+ "data": {
764
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXQAAAEICAYAAABPgw/pAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+WH4yJAAAWk0lEQVR4nO3de9RddX3n8feHBESFQguxIwEBK2ojrbcITHEp9daACq6OtVB1tEtlnFUcOlotXqqIugo6te04OIq3oFYQQW3EKGoFL4xowt2A1BiiCVoIGBC8gIHv/LF35PDwnDznyXNy+z3v11pnZV9+e+/f+Z1zPs/ev31JqgpJ0o5vp21dAUnSeBjoktQIA12SGmGgS1IjDHRJaoSBLkmNMNA1KyRZkeSITcy/KMnLZ7D+k5N8fHOX31qSvDDJl7Z1PbRlGOjbuSSrk/wyyR1J1if5fJL9BuYvTvL2IctWkp/3y258vW7YckkO6JeZO2R9xyS5IsnPktyc5KtJDhzn+91SquoxVXURzDx8kxyRZO3YKnf/9Q/9TKe5nvt9nlX1L1X1rJmue1ym+s5pegz0HcNzq2o34KHAjcB7prHsY6tqt4HXOzenAkkeAXwUeA2wB3AgcDpw9+asb8g2ksTvpLSZ/PHsQKrqV8C5wIJtsPnHAddX1b9V5/aqOq+qfgSQZE6SNyT5QZLbk1y68UgiyR8lWZbktv7fP9q40r6r4x1JLgZ+ATw8yaOTfDnJT5Ncl+QFk1UoyR8nuXpg/MtJlg2MfyPJ8/rh1UmekWQR8Abgz/sjlisHVrl/kov7+n8pyd6TbPPBwBeAfQaOevbpZ++S5KP98iuSLBxYbp8k5yVZl+T6JP9jyHs6Hngh8Lp+3Z+bavkkhyRZ3h853Zjk3f2sr/f/3tqv6z8neWmSbw4sW0lemeT7SW5NcnqSDHym/9AfjV2f5IQpjuD+NskN/fu/LsnT++k7JTmp/27ckuScJL8zrI6TrVsjqipf2/ELWA08ox9+EHAm8NGB+YuBtw9ZtoBHDJl3v+WAA/pl5k5S/uHAr4B/BP4Y2G3C/NcCVwOPAgI8FtgL+B1gPfBiYC5wXD++V7/cRcCPgMf08/cA1gB/2Y8/HrgZWDBJnR7Y12lvYGe6o5cbgN37eb8c2M5gO54MfHzCui4CfgA8sl/2IuDUIW13BLB2wrST+7ocBcwB/h64pJ+3E3Ap8GZgl74tVwF/MspnM9XywLeAF/fDuwGHDfs8gZcC35zwHTkf2BN4GLAOWNTPeyVwDbAv8NvAVzbx/XhU/7ntM7Dt3+uHTwQu6dfzAOD9wFlTfed8Tf/lHvqO4bNJbgVuA54JvGsay17W73ltfP3J5lSgqlbRBdl84Bzg5r6vd7e+yMuBN1XVddW5sqpuAZ4NfL+qPlZVG6rqLOB7wHMHVr+4qlZU1QZgEbC6qj7Sl78cOA/4s0nq9EtgGfAU4InAlcDFwOHAYf12b5nG2/xIVf17v95z6I5KpuObVbW0qu4GPkb3Rw3gScC8qjqlqu7q2/IDwLEjrneq5X8NPCLJ3lV1R1VdMs16n1pVt1Z3tHUh977vFwD/XFVrq2o9cOom1nE3XVgvSLJzVa2uqh/0814JvLFfz510f/yeb7/5+NmgO4bnVdVXkswBjgG+lmRBVf3HCMs+oapWTjJ9A91e7aCdgXv61/30QfECgCRPAj4JvBF4PbAf3R7uRPsAP5ww7Yd0fxg2WjMwvD9waP8HbKO5dAE5ma/R7zH3w+uBpwJ39uPTMdiev6Db253J8rv2obU/XRfN4HuaA3xjxPVOtfzLgFOA7yW5HnhrVZ0/g3pvfN/7cN/PZnD4PqpqZZK/pgvrxyS5AHh1Vf24r/9nkgx+r+4GfncaddQI3EPfgVTV3VX1abofw5NnuLof0R3uDjoQWFNVkwb6hLosAz4NHNxPWgP83iRFN/6gBz2MrmvkN6sbGF4DfK2q9hx47VZV/31IVTYG+lP64a/RBfpTGR7oM33E6HSXX0N3/mHwPe1eVUeNuP5NLl9V36+q44CHAKcB5/Z9/TN9nz+h6ybZaL9hBft6fKKqnkz3eVdfl431P3JC/XetqhvGUEcNMNB3IP1VIMfQ9WdeOzBrTpJdB167jLC684BnJ3lWf/JrH+BNwNlDtv3kJK9I8pB+/NHA0XR9owAfBN6W5KC+nn+YZC9gKfDIJH+RZG6SP6c7qTtsD/L8vvyLk+zcv56U5PeHlP9/dP23hwDfqaoV9Hv53HvCbaIbgQOy+VfU3AjslWSPEct/B7i9P2n4wL69D+6Pcoat/+GjLp/kRUnm9X+IN+7F30PXH37PhHVNxznAiUnmJ9kT+NthBZM8KsnTkjyA7lzCL7n3SO99wDuS7N+Xndd/jxlDHTXAQN8xfC7JHcDPgHcAL+mDa6OT6H5AG19fHZh3Ze57Hfo/AfTLH0d38u6ndCfWvg28dUgdbqUL8Kv7unwR+Ayw8TLId9MFwJf6en4IeGDfh/0cussdbwFeBzynqm6ebCNVdTvwLLr+4R/TdQecRtc/O1n5nwOXASuq6q5+8reAH1bVTUPey6f6f29JctmQMkNV1feAs4BV/XmJfaYofzddGzwOuJ7uJO8H6U4AT+ZDdH3Rtyb57AjLLwJW9J/LPwPHVtUvq+oXdN+Xi/t1HTbNt/oBus/zKuByuj/OG5j8UtUH0PWx30z3mT2EriuOvk5LgC8luZ1uJ+DQvm1mWkcNSJVHPJKmluRI4H1VNbELTdsJ99AlTarv3jmq7yqbD7yF7qhM26kpAz3Jh5PclOS7Q+Ynyf9OsjLJVUmeMP5qStoGQtcFt56uy+VaumvhtZ2assslyVOAO+huZjl4kvlHAa+iu6HiULrrVg/dAnWVJG3ClHvoVfV1upNmwxxDF/bVX6e8Z5KHjquCkqTRjOPGovnc94aDtf20n0wsmO45FccDPPjBD37iox/96DFsXpJmj0svvfTmqpo32byteqdoVZ0BnAGwcOHCWr58+dbcvCTt8JJMvPP6N8ZxlcsN3PcOsn25712AkqStYByBvgT4r/3VLocBt1XV/bpbJElb1pRdLknOontWxt7p/peWt9A/1Kmq3kd399hRwEq6B/v85ZaqrCRpuCkDvX/oz6bmF/BXY6uRJGmzeKeoJDXCQJekRhjoktQIA12SGmGgS1IjDHRJaoSBLkmNMNAlqREGuiQ1wkCXpEYY6JLUCANdkhphoEtSIwx0SWqEgS5JjTDQJakRBrokNcJAl6RGGOiS1AgDXZIaYaBLUiMMdElqhIEuSY0w0CWpEQa6JDXCQJekRhjoktQIA12SGmGgS1IjDHRJaoSBLkmNMNAlqREGuiQ1wkCXpEYY6JLUiJECPcmiJNclWZnkpEnmPyzJhUkuT3JVkqPGX1VJ0qZMGehJ5gCnA0cCC4DjkiyYUOxNwDlV9XjgWOC9466oJGnTRtlDPwRYWVWrquou4GzgmAllCvitfngP4Mfjq6IkaRSjBPp8YM3A+Np+2qCTgRclWQssBV412YqSHJ9keZLl69at24zqSpKGGddJ0eOAxVW1L3AU8LEk91t3VZ1RVQurauG8efPGtGlJEowW6DcA+w2M79tPG/Qy4ByAqvoWsCuw9zgqKEkazSiBvgw4KMmBSXahO+m5ZEKZHwFPB0jy+3SBbp+KJG1FUwZ6VW0ATgAuAK6lu5plRZJTkhzdF3sN8IokVwJnAS+tqtpSlZYk3d/cUQpV1VK6k52D0948MHwNcPh4qyZJmg7vFJWkRhjoktQIA12SGmGgS1IjDHRJaoSBLkmNMNAlqREGuiQ1wkCXpEYY6JLUCANdkhphoEtSIwx0SWqEgS5JjTDQJakRBrokNcJAl6RGGOiS1AgDXZIaYaBLUiMMdElqhIEuSY0w0CWpEQa6JDXCQJekRhjoktQIA12SGmGgS1IjDHRJaoSBLkmNMNAlqREGuiQ1wkCXpEYY6JLUiJECPcmiJNclWZnkpCFlXpDkmiQrknxivNWUJE1l7lQFkswBTgeeCawFliVZUlXXDJQ5CHg9cHhVrU/ykC1VYUnS5EbZQz8EWFlVq6rqLuBs4JgJZV4BnF5V6wGq6qbxVlOSNJVRAn0+sGZgfG0/bdAjgUcmuTjJJUkWTbaiJMcnWZ5k+bp16zavxpKkSY3rpOhc4CDgCOA44ANJ9pxYqKrOqKqFVbVw3rx5Y9q0JAlGC/QbgP0Gxvftpw1aCyypql9X1fXAv9MFvCRpKxkl0JcBByU5MMkuwLHAkgllPku3d06Svem6YFaNsZ6SpClMGehVtQE4AbgAuBY4p6pWJDklydF9sQuAW5JcA1wIvLaqbtlSlZYk3V+qaptseOHChbV8+fJtsm1J2lElubSqFk42zztFJakRBrokNcJAl6RGGOiS1AgDXZIaYaBLUiMMdElqhIEuSY0w0CWpEQa6JDXCQJekRhjoktQIA12SGmGgS1IjDHRJaoSBLkmNMNAlqREGuiQ1wkCXpEYY6JLUCANdkhphoEtSIwx0SWqEgS5JjTDQJakRBrokNcJAl6RGGOiS1AgDXZIaYaBLUiMMdElqhIEuSY0w0CWpEQa6JDXCQJekRowU6EkWJbkuycokJ22i3H9JUkkWjq+KkqRRTBnoSeYApwNHAguA45IsmKTc7sCJwLfHXUlJ0tRG2UM/BFhZVauq6i7gbOCYScq9DTgN+NUY6ydJGtEogT4fWDMwvraf9htJngDsV1Wf39SKkhyfZHmS5evWrZt2ZSVJw834pGiSnYB3A6+ZqmxVnVFVC6tq4bx582a6aUnSgFEC/QZgv4HxfftpG+0OHAxclGQ1cBiwxBOjkrR1jRLoy4CDkhyYZBfgWGDJxplVdVtV7V1VB1TVAcAlwNFVtXyL1FiSNKkpA72qNgAnABcA1wLnVNWKJKckOXpLV1CSNJq5oxSqqqXA0gnT3jyk7BEzr5Ykabq8U1SSGmGgS1IjDHRJaoSBLkmNMNAlqREGuiQ1wkCXpEYY6JLUCANdkhphoEtSIwx0SWrESM9y2d4ccNIm/x+N5q0+9dnbugqStkPuoUtSIwx0SWqEgS5JjTDQJakRBrokNcJAl6RGGOiS1AgDXZIaYaBLUiMMdElqhIEuSY0w0CWpEQa6JDXCQJekRhjoktQIA12SGmGgS1IjDHRJaoSBLkmNMNAlqREGuiQ1wkCXpEaMFOhJFiW5LsnKJCdNMv/VSa5JclWSf0uy//irKknalCkDPckc4HTgSGABcFySBROKXQ4srKo/BM4F3jnuikqSNm2UPfRDgJVVtaqq7gLOBo4ZLFBVF1bVL/rRS4B9x1tNSdJURgn0+cCagfG1/bRhXgZ8YbIZSY5PsjzJ8nXr1o1eS0nSlMZ6UjTJi4CFwLsmm19VZ1TVwqpaOG/evHFuWpJmvbkjlLkB2G9gfN9+2n0keQbwRuCpVXXneKonSRrVKIG+DDgoyYF0QX4s8BeDBZI8Hng/sKiqbhp7LSU15YCTPr+tq7BNrT712VtkvVN2uVTVBuAE4ALgWuCcqlqR5JQkR/fF3gXsBnwqyRVJlmyR2kqShhplD52qWgosnTDtzQPDzxhzvSRJ0+SdopLUCANdkhphoEtSIwx0SWqEgS5JjTDQJakRBrokNcJAl6RGjHRjkaR7zfbb1mHL3bqumXEPXZIaYaBLUiMMdElqhIEuSY0w0CWpEQa6JDXCQJekRhjoktQIA12SGmGgS1IjDHRJaoSBLkmNMNAlqREGuiQ1wkCXpEYY6JLUCANdkhphoEtSIwx0SWqEgS5JjfA/iZ6FZvt/cux/cKxWuYcuSY0w0CWpEQa6JDXCQJekRhjoktSIkQI9yaIk1yVZmeSkSeY/IMkn+/nfTnLAuCsqSdq0KQM9yRzgdOBIYAFwXJIFE4q9DFhfVY8A/hE4bdwVlSRt2ih76IcAK6tqVVXdBZwNHDOhzDHAmf3wucDTk2R81ZQkTWWUG4vmA2sGxtcChw4rU1UbktwG7AXcPFgoyfHA8f3oHUmu25xKbwf2ZsJ725qy4x//2H4zZxvOzI7cfvsPm7FV7xStqjOAM7bmNreEJMurauG2rseOyvabOdtwZlptv1G6XG4A9hsY37efNmmZJHOBPYBbxlFBSdJoRgn0ZcBBSQ5MsgtwLLBkQpklwEv64ecDX62qGl81JUlTmbLLpe8TPwG4AJgDfLiqViQ5BVheVUuADwEfS7IS+Cld6Ldsh+822sZsv5mzDWemyfaLO9KS1AbvFJWkRhjoktSIWRPoST6c5KYk393WddneTdVWSfZLcmGSa5KsSHLi1q7j9myE9ts1yXeSXNm331u3dh23Z6P+VpPMSXJ5kvO3Vt22d7Mm0IHFwKKZrqS/LLN1i9l0W20AXlNVC4DDgL+a5HEQI2uwTRez6fa7E3haVT0WeBywKMlhm7ux/vEcLVnMaL/VE4FrZ7qxltpv1gR6VX2d7gqcoZL8Xf8Qsm8mOSvJ3/TTL0ryT0mWAycmeW7/ELLLk3wlye/25U5OcmaSbyT5YZI/TfLOJFcn+WKSnftyp/Z7t1cl+V9b+r1P11RtVVU/qarL+uHb6X5U8ycrOxvbdIT2q6q6ox/duX/d7+qEJDsleW+S7yX5cpKlSZ7fz1ud5LQklwF/luQVSZb1e/3nJXlQX25xkv+b5JIkq5Ic0e8BX5tkcV9mTl/uu327/s/xtsj0jPhb3Rd4NvDBTZSZfe1XVbPmBRwAfHfIvCcBVwC7ArsD3wf+pp93EfDegbK/zb1XCL0c+Id++GTgm3Q/0McCvwCO7Od9Bnge3SMRrhtYfs9t3S7TbatJyv0I+C3bdPT2o7sE+ArgDuC0IWWeDyyl2/H6T8B64Pn9vNXA6wbK7jUw/HbgVf3wYrrnL4XumUs/A/6gX+eldEcITwS+PLD8jtB+5/b1PgI43/brXrNmD30EhwP/WlW/qm6v83MT5n9yYHhf4IIkVwOvBR4zMO8LVfVr4Gq6H+0X++lX031JbwN+BXwoyZ/SBdQOKcluwHnAX1fVzyYpYpsOUVV3V9Xj6N73IUkOnqTYk4FPVdU9VfUfwIUT5g+238H9UczVwAu5b/t9rrqkuRq4saqurqp7gBV07bcKeHiS9yRZRBda260kzwFuqqpLpyg669pv1gZ6uhN7V/SvV46wyM8Hht8D/J+q+gPgv9HtgW50J0D/gf+6/yIA3APMraoNdE+wPBd4DveG03ZrsrbquzrOA/6lqj49rNwUZkWbbqpdqupWuqBZlOTQgXJHj7DqwfZbDJzQt99bmaT96NrrzoHpG9tvPd3Rz0XAK9lEN8a2MEn7HQ4cnWQ13d7z05J83Pbbyg/n2p5U1Rq6wyUAkjwJeH+Sv6drl+cw/G6yPbj3eTYvGVJmUv1e7YOqammSi+n+um/XJmmr0N0dfG1VvXsT5WxTJm2XeXR/mG5N8kDgmXTdLt+eUO4BwEuSnAnMo+te+MSQzewO/KT/Q/tC7v+8paGS7A3cVVXnpXsC6sen8/62tInt13s9QJIj6LrxXtRPn9XtN2sCPclZdB/o3knWAm+pqg9tnF9Vy5IsAa4CbqQ7vLptyOpOBj6VZD3wVeDAaVRld+Bfk+xK1y/36mm+lS1uqrai20N6MXB1kiv6aW+oqqWD65mtbTpC+z0UODPd1RU7AedU1WSX3p0HPB24hu7x1JcxvP3+Dvg2sK7/d/dpVHk+8JEkG4/YXz+NZcduhPYb1axrP2/9H5Bkt6q6oz/D/XXg+Oqv5tDmsU1nZqD99gK+Axze9wdrBLOt/WbNHvqIzkh3PfWuwJkGz1jYpjNzfpI9gV2At7UcRlvIrGo/99AlqRGz9ioXSWqNgS5JjTDQJakRBrokNcJAl6RG/H+DonqsxNINWwAAAABJRU5ErkJggg==\n",
765
+ "text/plain": [
766
+ "<Figure size 432x288 with 1 Axes>"
767
+ ]
768
+ },
769
+ "metadata": {},
770
+ "output_type": "display_data"
771
+ }
772
+ ],
773
+ "source": [
774
+ "from nltk.translate.bleu_score import corpus_bleu\n",
775
+ "import matplotlib.pyplot as plt\n",
776
+ "\n",
777
+ "plt.bar(x = bleu_dic.keys(), height = bleu_dic.values())\n",
778
+ "plt.title(\"BLEU Score with the testing set\")\n",
779
+ "plt.ylim((0,1))\n",
780
+ "plt.show()"
781
+ ]
782
+ },
783
+ {
784
+ "cell_type": "code",
785
+ "execution_count": null,
786
+ "metadata": {
787
+ "colab": {
788
+ "base_uri": "https://localhost:8080/"
789
+ },
790
+ "id": "ThXu38YNo6LQ",
791
+ "outputId": "540b3456-f792-4b1e-d605-c7d320bf5729"
792
+ },
793
+ "outputs": [
794
+ {
795
+ "name": "stdout",
796
+ "output_type": "stream",
797
+ "text": [
798
+ "Mounted at /content/drive/\n"
799
+ ]
800
+ }
801
+ ],
802
+ "source": [
803
+ "from google.colab import drive\n",
804
+ "\n",
805
+ "drive.mount(\"/content/drive/\")\n",
806
+ "\n",
807
+ "#files.download(\"/content/model4_final_reuse_2.h5\")\n"
808
+ ]
809
+ },
810
+ {
811
+ "cell_type": "code",
812
+ "execution_count": null,
813
+ "metadata": {
814
+ "colab": {
815
+ "base_uri": "https://localhost:8080/",
816
+ "height": 425
817
+ },
818
+ "id": "6WX179UgqihS",
819
+ "outputId": "7ba02150-d3d8-4bbc-8186-015ad7c1e92e"
820
+ },
821
+ "outputs": [
822
+ {
823
+ "ename": "ValueError",
824
+ "evalue": "ignored",
825
+ "output_type": "error",
826
+ "traceback": [
827
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
828
+ "\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)",
829
+ "\u001b[0;32m<ipython-input-15-38f909bf98ba>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 14\u001b[0m \u001b[0mx\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mlayers\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mDropout\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m0.5\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 15\u001b[0m \u001b[0mx\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mlayers\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mSimpleRNN\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlatent_dim\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mactivation\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'relu'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'rnn_decoder2'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 16\u001b[0;31m \u001b[0mx\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mlayers\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mSimpleRNN\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlatent_dim\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mactivation\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'relu'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'rnn_decoder3'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 17\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 18\u001b[0m \u001b[0mtarget_next_step\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mlayers\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTimeDistributed\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlayers\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mDense\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mvocab_size\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mactivation\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m\"softmax\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'output'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
830
+ "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/keras/layers/recurrent.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, inputs, initial_state, constants, **kwargs)\u001b[0m\n\u001b[1;32m 677\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 678\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0minitial_state\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mconstants\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 679\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0msuper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mRNN\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__call__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minputs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 680\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 681\u001b[0m \u001b[0;31m# If any of `initial_state` or `constants` are specified and are Keras\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
831
+ "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/keras/utils/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 65\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;31m# pylint: disable=broad-except\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 66\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_process_traceback_frames\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__traceback__\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 67\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mwith_traceback\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfiltered_tb\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 68\u001b[0m \u001b[0;32mfinally\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 69\u001b[0m \u001b[0;32mdel\u001b[0m \u001b[0mfiltered_tb\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
832
+ "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/keras/engine/input_spec.py\u001b[0m in \u001b[0;36massert_input_compatibility\u001b[0;34m(input_spec, inputs, layer_name)\u001b[0m\n\u001b[1;32m 212\u001b[0m \u001b[0mndim\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mshape\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrank\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 213\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mndim\u001b[0m \u001b[0;34m!=\u001b[0m \u001b[0mspec\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mndim\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 214\u001b[0;31m raise ValueError(f'Input {input_index} of layer \"{layer_name}\" '\n\u001b[0m\u001b[1;32m 215\u001b[0m \u001b[0;34m'is incompatible with the layer: '\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 216\u001b[0m \u001b[0;34mf'expected ndim={spec.ndim}, found ndim={ndim}. '\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
833
+ "\u001b[0;31mValueError\u001b[0m: Input 0 of layer \"rnn_decoder3\" is incompatible with the layer: expected ndim=3, found ndim=2. Full shape received: (None, 1024)"
834
+ ]
835
+ }
836
+ ],
837
+ "source": [
838
+ "source = keras.Input(shape=(None,), dtype=\"int64\", name=\"source\")\n",
839
+ "\n",
840
+ "# x = layers.Embedding(vocab_size, embed_dim, mask_zero=True)(source)\n",
841
+ "x = layers.Embedding(vocab_size, embed_dim, weights=[embedding_matrix], mask_zero=True,\n",
842
+ " name='embed_encoder', trainable=False)(source)\n",
843
+ "\n",
844
+ "encoded_source = layers.SimpleRNN(latent_dim, activation='relu', name='rnn_encoder4')(x)\n",
845
+ "\n",
846
+ "past_target = keras.Input(shape=(None,), dtype=\"int64\", name=\"target\")\n",
847
+ "x = layers.Embedding(vocab_size, embed_dim, mask_zero=True, name='embed_decoder')(past_target)\n",
848
+ "\n",
849
+ "decoder_gru = layers.SimpleRNN(latent_dim, return_sequences=True, activation='relu', name='rnn_decoder1')\n",
850
+ "x = decoder_gru(x, initial_state=encoded_source)\n",
851
+ "x = layers.Dropout(0.5)(x)\n",
852
+ "x = layers.SimpleRNN(latent_dim, activation='relu', name='rnn_decoder2')(x)\n",
853
+ "x = layers.SimpleRNN(latent_dim, return_sequences=True, activation='relu', name='rnn_decoder3')(x)\n",
854
+ "\n",
855
+ "target_next_step = layers.TimeDistributed(layers.Dense(vocab_size, activation=\"softmax\", name='output'))(x)\n",
856
+ "\n",
857
+ "seq2seq_rnn = keras.Model([source, past_target], target_next_step)\n",
858
+ "\n",
859
+ "seq2seq_rnn.compile(\n",
860
+ " optimizer=\"rmsprop\",\n",
861
+ " loss=\"sparse_categorical_crossentropy\",\n",
862
+ " metrics=[\"accuracy\"])"
863
+ ]
864
+ },
865
+ {
866
+ "cell_type": "code",
867
+ "execution_count": null,
868
+ "metadata": {
869
+ "colab": {
870
+ "base_uri": "https://localhost:8080/",
871
+ "height": 222
872
+ },
873
+ "id": "NvlZdKQ2qk6a",
874
+ "outputId": "c6dd1258-df31-4b5a-e79d-91e2d83f21af"
875
+ },
876
+ "outputs": [
877
+ {
878
+ "ename": "NameError",
879
+ "evalue": "ignored",
880
+ "output_type": "error",
881
+ "traceback": [
882
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
883
+ "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)",
884
+ "\u001b[0;32m<ipython-input-1-26deb6ad3f93>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 6\u001b[0m \u001b[0;31m# mode='min')\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 7\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 8\u001b[0;31m \u001b[0mseq2seq_rnn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfit\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtrain_ds\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mepochs\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m3\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mvalidation_data\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mval_ds\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcallbacks\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mcheckpoint\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 9\u001b[0m \u001b[0;31m# seq2seq_rnn.save('model4_final_ME_2.h5')\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
885
+ "\u001b[0;31mNameError\u001b[0m: name 'seq2seq_rnn' is not defined"
886
+ ]
887
+ }
888
+ ],
889
+ "source": [
890
+ "vocab_size = 30000\n",
891
+ "sequence_length = 20\n",
892
+ "\n",
893
+ "# checkpoint = ModelCheckpoint(filepath='model4_ME_2.h5',\n",
894
+ "# monitor='val_loss', verbose=1, save_best_only=True,\n",
895
+ "# mode='min')\n",
896
+ "\n",
897
+ "seq2seq_rnn.fit(train_ds, epochs=3, validation_data=val_ds, callbacks=[checkpoint])\n",
898
+ "# seq2seq_rnn.save('model4_final_ME_2.h5')\n"
899
+ ]
900
+ },
901
+ {
902
+ "cell_type": "markdown",
903
+ "metadata": {
904
+ "id": "BRc0Z_ELHpdD"
905
+ },
906
+ "source": [
907
+ "### **The Codes after this are from prior experiment.**"
908
+ ]
909
+ },
910
+ {
911
+ "cell_type": "code",
912
+ "execution_count": null,
913
+ "metadata": {
914
+ "id": "stJYZc1eh2Kw"
915
+ },
916
+ "outputs": [],
917
+ "source": [
918
+ "def tokenize(x):\n",
919
+ " \"\"\"\n",
920
+ " Tokenize x\n",
921
+ " :param x: List of sentences/strings to be tokenized\n",
922
+ " :return: Tuple of (tokenized x data, tokenizer used to tokenize x)\n",
923
+ " \"\"\"\n",
924
+ " # TODO: Implement\n",
925
+ " tokenizer = Tokenizer()\n",
926
+ " tokenizer.fit_on_texts(x)\n",
927
+ " return tokenizer.texts_to_sequences(x), tokenizer"
928
+ ]
929
+ },
930
+ {
931
+ "cell_type": "code",
932
+ "execution_count": null,
933
+ "metadata": {
934
+ "id": "CVZgVHkzh3tr"
935
+ },
936
+ "outputs": [],
937
+ "source": [
938
+ "def pad(x, length=None):\n",
939
+ " \"\"\"\n",
940
+ " Pad x\n",
941
+ " :param x: List of sequences.\n",
942
+ " :param length: Length to pad the sequence to. If None, use length of longest sequence in x.\n",
943
+ " :return: Padded numpy array of sequences\n",
944
+ " \"\"\"\n",
945
+ "# # TODO: Implement\n",
946
+ "# if length is None:\n",
947
+ "# length=max([len(sentence) for sentence in x])\n",
948
+ "# print(length)\n",
949
+ " \n",
950
+ " return pad_sequences(x, maxlen=20, padding ='post')"
951
+ ]
952
+ },
953
+ {
954
+ "cell_type": "code",
955
+ "execution_count": null,
956
+ "metadata": {
957
+ "id": "msE26JGmh5TA"
958
+ },
959
+ "outputs": [],
960
+ "source": [
961
+ "import collections\n",
962
+ "\n",
963
+ "english_words_counter = collections.Counter([word for sentence in english_sentences for word in sentence.split()])\n",
964
+ "french_words_counter = collections.Counter([word for sentence in french_sentences for word in sentence.split()])\n",
965
+ "\n",
966
+ "print('{} English words.'.format(len([word for sentence in english_sentences for word in sentence.split()])))\n",
967
+ "print('{} unique English words.'.format(len(english_words_counter)))\n",
968
+ "print('10 Most common words in the English dataset:')\n",
969
+ "print('\"' + '\" \"'.join(list(zip(*english_words_counter.most_common(10)))[0]) + '\"')\n",
970
+ "print()\n",
971
+ "print('{} French words.'.format(len([word for sentence in french_sentences for word in sentence.split()])))\n",
972
+ "print('{} unique French words.'.format(len(french_words_counter)))\n",
973
+ "print('10 Most common words in the French dataset:')\n",
974
+ "print('\"' + '\" \"'.join(list(zip(*french_words_counter.most_common(10)))[0]) + '\"')"
975
+ ]
976
+ },
977
+ {
978
+ "cell_type": "code",
979
+ "execution_count": null,
980
+ "metadata": {
981
+ "id": "GHOxz_1Fh7Ha"
982
+ },
983
+ "outputs": [],
984
+ "source": [
985
+ "for sample_i in range(5):\n",
986
+ " print('English sample {}: {}'.format(sample_i + 1, english_sentences[sample_i+10000]))\n",
987
+ " print('French sample {}: {}\\n'.format(sample_i + 1, french_sentences[sample_i+10000]))\n",
988
+ " print('German sample {}: {}\\n'.format(sample_i + 1, german_sentences[sample_i+10000]))\n",
989
+ " print('Italian sample {}: {}\\n'.format(sample_i + 1, italian_sentences[sample_i+10000]))\n"
990
+ ]
991
+ },
992
+ {
993
+ "cell_type": "code",
994
+ "execution_count": null,
995
+ "metadata": {
996
+ "id": "ogGPGCf7h9Gw"
997
+ },
998
+ "outputs": [],
999
+ "source": [
1000
+ "def preprocess(x, y1, y2, y3):\n",
1001
+ " \"\"\"\n",
1002
+ " Preprocess x and y\n",
1003
+ " :param x: Feature List of sentences\n",
1004
+ " :param y: Label List of sentences\n",
1005
+ " :return: Tuple of (Preprocessed x, Preprocessed y, x tokenizer, y tokenizer)\n",
1006
+ " \"\"\"\n",
1007
+ " preprocess_en, en_tk = tokenize(x)\n",
1008
+ " preprocess_fr, fr_tk = tokenize(y1)\n",
1009
+ " preprocess_de, de_tk = tokenize(y2)\n",
1010
+ " preprocess_it, it_tk = tokenize(y3)\n",
1011
+ " \n",
1012
+ " preprocess_en = pad(preprocess_en)\n",
1013
+ " preprocess_fr = pad(preprocess_fr)\n",
1014
+ " preprocess_de = pad(preprocess_de)\n",
1015
+ " preprocess_it = pad(preprocess_it)\n",
1016
+ "\n",
1017
+ " \n",
1018
+ " # Keras's sparse_categorical_crossentropy function requires the labels to be in 3 dimensions\n",
1019
+ " preprocess_fr = preprocess_fr.reshape(*preprocess_fr.shape, 1)\n",
1020
+ " preprocess_de = preprocess_de.reshape(*preprocess_de.shape, 1)\n",
1021
+ " preprocess_it = preprocess_it.reshape(*preprocess_it.shape, 1)\n",
1022
+ "\n",
1023
+ " return preprocess_en,preprocess_fr,preprocess_de,preprocess_it, en_tk, fr_tk, de_tk, it_tk\n",
1024
+ "\n",
1025
+ "inputTimestep = 30\n",
1026
+ "outputTimestep = 30\n",
1027
+ "\n",
1028
+ "preproc_english_sentences,preproc_french_sentences, preproc_german_sentences, preproc_italian_sentences, en_tokenizer,fr_tokenizer, de_tokenizer, it_tokenizer =\\\n",
1029
+ " preprocess(english_sentences, french_sentences,german_sentences,italian_sentences )\n",
1030
+ "\n",
1031
+ "\n",
1032
+ "max_english_sequence_length = preproc_english_sentences.shape[1]\n",
1033
+ "max_french_sequence_length = preproc_french_sentences.shape[1]\n",
1034
+ "max_german_sequence_length = preproc_german_sentences.shape[1]\n",
1035
+ "max_italian_sequence_length = preproc_italian_sentences.shape[1]\n",
1036
+ "\n",
1037
+ "english_vocab_size = len(en_tokenizer.word_index)\n",
1038
+ "french_vocab_size = len(fr_tokenizer.word_index)\n",
1039
+ "german_vocab_size = len(de_tokenizer.word_index)\n",
1040
+ "italian_vocab_size = len(it_tokenizer.word_index)\n",
1041
+ "\n",
1042
+ "print('Data Preprocessed')\n",
1043
+ "\n",
1044
+ "print(\"Max English sentence length:\", max_english_sequence_length)\n",
1045
+ "print(\"Max French sentence length:\", max_french_sequence_length)\n",
1046
+ "print(\"Max German sentence length:\", max_german_sequence_length)\n",
1047
+ "print(\"Max Italian sentence length:\", max_italian_sequence_length)\n",
1048
+ "\n",
1049
+ "print(\"English vocabulary size:\", english_vocab_size)\n",
1050
+ "print(\"French vocabulary size:\", french_vocab_size)\n",
1051
+ "print(\"German vocabulary size:\", german_vocab_size)\n",
1052
+ "print(\"Italian vocabulary size:\", italian_vocab_size)"
1053
+ ]
1054
+ },
1055
+ {
1056
+ "cell_type": "code",
1057
+ "execution_count": null,
1058
+ "metadata": {
1059
+ "id": "zdLpnbLHiEk3"
1060
+ },
1061
+ "outputs": [],
1062
+ "source": [
1063
+ "from keras.layers import GRU, Input, Dense, TimeDistributed, Activation, RepeatVector, Bidirectional, Dropout, LSTM\n",
1064
+ "from keras.losses import sparse_categorical_crossentropy\n",
1065
+ "from keras.models import Sequential\n",
1066
+ "from keras.layers import Dense, Activation, TimeDistributed, RepeatVector, Flatten, Conv2D, Embedding\n",
1067
+ "from keras.layers.recurrent import SimpleRNN, LSTM\n",
1068
+ "from keras.utils import np_utils\n",
1069
+ "from tensorflow.keras.models import Model\n",
1070
+ "import keras\n",
1071
+ "\n",
1072
+ "\n",
1073
+ "def many_many_tangled(input_shape, fr_output_sequence_length, english_vocab_size, french_vocab_size):\n",
1074
+ "\n",
1075
+ "\n",
1076
+ " # Hyperparameters\n",
1077
+ " opt = tf.keras.optimizers.Adam(learning_rate=1e-3)\n",
1078
+ " \n",
1079
+ " # Build the layers \n",
1080
+ " model = Sequential()\n",
1081
+ " # Embedding\n",
1082
+ " model.add(Embedding(english_vocab_size, 256, input_length=input_shape[1],\n",
1083
+ " input_shape=input_shape[1:]))\n",
1084
+ " # Encoder\n",
1085
+ " model.add(SimpleRNN(256))\n",
1086
+ " model.add(RepeatVector(fr_output_sequence_length))\n",
1087
+ " # Decoder\n",
1088
+ " model.add(SimpleRNN(256, return_sequences=True))\n",
1089
+ " model.add(TimeDistributed(Dense(512, activation='relu')))\n",
1090
+ " model.add(Dropout(0.5))\n",
1091
+ " model.add(TimeDistributed(Dense((french_vocab_size), activation='softmax')))\n",
1092
+ " model.compile(loss=sparse_categorical_crossentropy,\n",
1093
+ " optimizer=opt,\n",
1094
+ " metrics=['accuracy'])\n",
1095
+ " \n",
1096
+ " print(model.summary())\n",
1097
+ "\n",
1098
+ " return model\n",
1099
+ "\n",
1100
+ "def many_many_functional(input_shape, output_sequence_length, english_vocab_size, french_vocab_size, german_vocab_size,italian_vocab_size):\n",
1101
+ " \n",
1102
+ " #input\n",
1103
+ " eng_input = Input(shape=(None,), dtype=\"int64\", name=\"english\")\n",
1104
+ "\n",
1105
+ " #embedding\n",
1106
+ " embedding_layer = Embedding(english_vocab_size, 256)(eng_input)\n",
1107
+ "\n",
1108
+ " rnn_layer_1 = SimpleRNN(256)(embedding_layer)\n",
1109
+ "\n",
1110
+ " fr_input = Input(shape=(None,), dtype=\"int64\", name=\"spanish\")\n",
1111
+ "\n",
1112
+ " embedding_layer = Embedding(french_vocab_size, 256)(fr_input)\n",
1113
+ "\n",
1114
+ " #Encoder for two langauges\n",
1115
+ " rnn_layer_1 = SimpleRNN(256)(embedding_layer)\n",
1116
+ " repeat_vector = RepeatVector(output_sequence_length)(rnn_layer_1)\n",
1117
+ "\n",
1118
+ " #Common decoder for all languages\n",
1119
+ " rnn_layer2 = SimpleRNN(256, return_sequences=True)(repeat_vector)\n",
1120
+ " time_distributed_1 = Dense(1024, activation='relu')(rnn_layer2)\n",
1121
+ " dropout_1 = Dropout(0.5)(time_distributed_1)\n",
1122
+ " \n",
1123
+ " output_fr = Dense(french_vocab_size, activation='softmax')(dropout_1)\n",
1124
+ " #output_de = TimeDistributed(Dense(german_vocab_size, activation='softmax'))(dropout_1)\n",
1125
+ " #output_it = TimeDistributed(Dense(italian_vocab_size, activation='softmax'))(dropout_1)\n",
1126
+ " \n",
1127
+ " #Create model\n",
1128
+ " #model = Model(inputs=eng_input, outputs=[output_fr,output_de,output_it])\n",
1129
+ " model = Model(inputs=[eng_input, fr_input], outputs=output_fr)\n",
1130
+ "\n",
1131
+ "\n",
1132
+ " model.compile(loss=sparse_categorical_crossentropy, optimizer='adam',metrics=['accuracy'])\n",
1133
+ "\n",
1134
+ " print(model.summary())\n",
1135
+ " \n",
1136
+ " return model\n",
1137
+ "\n",
1138
+ "def embed_model(output_sequence_length, english_vocab_size, french_vocab_size):\n",
1139
+ "\n",
1140
+ " # Hyperparameters\n",
1141
+ " opt = tf.keras.optimizers.Adam(learning_rate=1e-3)\n",
1142
+ " # Build the layers \n",
1143
+ " model = Sequential()\n",
1144
+ " # Embedding\n",
1145
+ " model.add(Embedding(english_vocab_size, 256))\n",
1146
+ " # Encoder\n",
1147
+ " model.add(SimpleRNN(256))\n",
1148
+ " model.add(RepeatVector(output_sequence_length))\n",
1149
+ " # Decoder\n",
1150
+ " model.add(SimpleRNN(256, return_sequences=True))\n",
1151
+ " model.add(TimeDistributed(Dense(516, activation='relu')))\n",
1152
+ " model.add(Dropout(0.5))\n",
1153
+ " model.add(TimeDistributed(Dense(516, activation='relu')))\n",
1154
+ " model.add(Dropout(0.5))\n",
1155
+ " model.add(TimeDistributed(Dense(french_vocab_size, activation='softmax')))\n",
1156
+ " model.compile(loss=sparse_categorical_crossentropy,\n",
1157
+ " optimizer=opt,\n",
1158
+ " metrics=['accuracy'])\n",
1159
+ " return model\n",
1160
+ "\n"
1161
+ ]
1162
+ },
1163
+ {
1164
+ "cell_type": "code",
1165
+ "execution_count": null,
1166
+ "metadata": {
1167
+ "id": "VdDy5uL1t8Sy"
1168
+ },
1169
+ "outputs": [],
1170
+ "source": []
1171
+ },
1172
+ {
1173
+ "cell_type": "code",
1174
+ "execution_count": null,
1175
+ "metadata": {
1176
+ "id": "CeQ4Z6j1spTP"
1177
+ },
1178
+ "outputs": [],
1179
+ "source": []
1180
+ },
1181
+ {
1182
+ "cell_type": "code",
1183
+ "execution_count": null,
1184
+ "metadata": {
1185
+ "id": "Qr25F675iIyX"
1186
+ },
1187
+ "outputs": [],
1188
+ "source": [
1189
+ "# tmp_x = pad(preproc_english_sentences, preproc_french_sentences.shape[1])\n",
1190
+ "# tmp_x = tmp_x.reshape((-1, preproc_french_sentences.shape[-2]))\n",
1191
+ "\n",
1192
+ "reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,\n",
1193
+ " patience=5, min_lr=0.001)\n",
1194
+ "\n",
1195
+ "callback = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5)\n",
1196
+ "# TODO: Train the neural network\n",
1197
+ "\n",
1198
+ "many_many = many_many_functional(\n",
1199
+ " None,\n",
1200
+ " 20,\n",
1201
+ " en_vocab_size+1,\n",
1202
+ " fr_vocab_size+1,\n",
1203
+ " None, None)\n",
1204
+ "\n",
1205
+ "many_many.summary()\n",
1206
+ "\n",
1207
+ "many_many.fit(train_ds, validation_data=val_ds, batch_size=64, epochs=30, callbacks=[callback, reduce_lr])"
1208
+ ]
1209
+ },
1210
+ {
1211
+ "cell_type": "code",
1212
+ "execution_count": null,
1213
+ "metadata": {
1214
+ "id": "Zt2YCfXosq05"
1215
+ },
1216
+ "outputs": [],
1217
+ "source": [
1218
+ "def logits_to_text(logits, tokenizer):\n",
1219
+ " \"\"\"\n",
1220
+ " Turn logits from a neural network into text using the tokenizer\n",
1221
+ " :param logits: Logits from a neural network\n",
1222
+ " :param tokenizer: Keras Tokenizer fit on the labels\n",
1223
+ " :return: String that represents the text of the logits\n",
1224
+ " \"\"\"\n",
1225
+ " index_to_words = {id: word for word, id in tokenizer.word_index.items()}\n",
1226
+ " index_to_words[0] = '<PAD>'\n",
1227
+ "\n",
1228
+ " return ' '.join([index_to_words[prediction] for prediction in np.argmax(logits, 1)])\n",
1229
+ "\n",
1230
+ "print('`logits_to_text` function loaded.')"
1231
+ ]
1232
+ },
1233
+ {
1234
+ "cell_type": "code",
1235
+ "execution_count": null,
1236
+ "metadata": {
1237
+ "id": "AS88MAqtstIC"
1238
+ },
1239
+ "outputs": [],
1240
+ "source": [
1241
+ "# Print prediction(s)\n",
1242
+ "print(\"Prediction:\")\n",
1243
+ "\n",
1244
+ "print(logits_to_text(many_many.predict(tmp_x[6:7])[0], fr_tokenizer))\n",
1245
+ "\n",
1246
+ "print(\"\\nCorrect Translation French:\")\n",
1247
+ "print(french_sentences[6:7])\n",
1248
+ "\n",
1249
+ "print(\"\\nOriginal text:\")\n",
1250
+ "print()"
1251
+ ]
1252
+ }
1253
+ ],
1254
+ "metadata": {
1255
+ "accelerator": "GPU",
1256
+ "colab": {
1257
+ "machine_shape": "hm",
1258
+ "provenance": []
1259
+ },
1260
+ "gpuClass": "standard",
1261
+ "kernelspec": {
1262
+ "display_name": "Python 3",
1263
+ "name": "python3"
1264
+ },
1265
+ "language_info": {
1266
+ "name": "python"
1267
+ }
1268
+ },
1269
+ "nbformat": 4,
1270
+ "nbformat_minor": 0
1271
+ }
language_models/lstm/.DS_Store ADDED
Binary file (6.15 kB). View file
 
language_models/lstm/motivating example models/.DS_Store ADDED
Binary file (6.15 kB). View file
 
language_models/lstm/motivating example models/problem1/original_problem1.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:85158ef279de626273555fd37d4035e9125ba3e486205e353c69378aa0a88574
3
+ size 801261536
language_models/lstm/motivating example models/problem1/solution1_problem1.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6e0016869178720578a6fb4c3964426ca121c5919d0ba9e820d57f58aa37bca
3
+ size 801261536
language_models/lstm/motivating example models/problem1/solution2_problem1.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ccd208ba23a1f099fe940d0a9e94141acf41b5017c85510a66df5d005932b970
3
+ size 801261536
language_models/lstm/motivating example models/problem2/original_problem2.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d9104b7d00bdb138f183dccefc022243ad0475c4d28680a619f44b31b333356
3
+ size 801261536
language_models/lstm/motivating example models/problem2/solution1_problem2.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e3a1a3265df7eace0ca51e249a28526b3497e407f91e8a122ba38c48c4516c8a
3
+ size 504461536
language_models/lstm/motivating example models/problem2/solution2_problem2.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31d7d5e98dddccb4ce2c57388e2aff13981dc63afe94b6757d797761d69f74c9
3
+ size 801261536
language_models/lstm/reuse models/model4_de_fr.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed88701982c60ffc1b66c7fafd749e88a4abbd3cf887267ec61fecd9344da293
3
+ size 801261536
language_models/lstm/reuse models/model4_de_it.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c1fff9de19afa7e8ebc88bcc1815084c6c68180c92027942ce2d53ac5856552a
3
+ size 801261560
language_models/lstm/reuse models/model4_fr_it.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09992211a9e0a2e3a92f6a901c732beff9606472b43cc6f9dcb8deafcdbaa6ee
3
+ size 801261536
language_models/lstm/rq1 models/model_LSTM_1layer.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e37bf0e31b2a0de88b361c8863a5a7fd636c5e1971c0f9a84519fe407dd090d
3
+ size 398337168
language_models/lstm/rq1 models/model_LSTM_2layer.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee8fe0fe89df20dac87f6d9c42277ed86cc2805832147f0f7b82de8ba2d84f2e
3
+ size 532645160
language_models/lstm/rq1 models/model_LSTM_3layer.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c73a7407b3e824f07571ccbb94ec1f47ef3923c55312b31158a873c308cf959
3
+ size 666952624
language_models/lstm/rq1 models/model_LSTM_4layer.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b67e5a14e3a14cfcde75627f69d54a1efec08a848f4a799af0783ac56265e02f
3
+ size 801261560
language_models/lstm/training script/(LSTM)_NMT_Experiment.ipynb ADDED
@@ -0,0 +1,1317 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "metadata": {
7
+ "colab": {
8
+ "base_uri": "https://localhost:8080/"
9
+ },
10
+ "id": "SIK13ZpDhF_n",
11
+ "outputId": "3349e413-0325-4873-8022-5cf975e03643"
12
+ },
13
+ "outputs": [
14
+ {
15
+ "output_type": "stream",
16
+ "name": "stdout",
17
+ "text": [
18
+ "Mounted at /content/drive\n"
19
+ ]
20
+ }
21
+ ],
22
+ "source": [
23
+ "#connect drive\n",
24
+ "from google.colab import drive\n",
25
+ "drive.mount('/content/drive', force_remount=True)"
26
+ ]
27
+ },
28
+ {
29
+ "cell_type": "code",
30
+ "execution_count": null,
31
+ "metadata": {
32
+ "id": "CB3hzllVhKVC"
33
+ },
34
+ "outputs": [],
35
+ "source": [
36
+ "import csv\n",
37
+ "import os\n",
38
+ "import numpy as np\n",
39
+ "from keras.preprocessing.text import Tokenizer\n",
40
+ "import pandas as pd\n",
41
+ "import tensorflow as tf\n",
42
+ "from keras.utils.np_utils import to_categorical\n",
43
+ "from keras_preprocessing.sequence import pad_sequences\n",
44
+ "import tensorflow_datasets as tfds\n",
45
+ "from nltk.translate.bleu_score import corpus_bleu\n",
46
+ "import os\n",
47
+ "from keras.callbacks import ReduceLROnPlateau\n",
48
+ "from keras.callbacks import ModelCheckpoint\n"
49
+ ]
50
+ },
51
+ {
52
+ "cell_type": "code",
53
+ "execution_count": null,
54
+ "metadata": {
55
+ "id": "OdmG4c70hxQs"
56
+ },
57
+ "outputs": [],
58
+ "source": [
59
+ "def load_data(path):\n",
60
+ " \"\"\"\n",
61
+ " Load dataset\n",
62
+ " \"\"\"\n",
63
+ " input_file = os.path.join(path)\n",
64
+ " with open(input_file, \"r\") as f:\n",
65
+ " data = f.read()\n",
66
+ "\n",
67
+ " return data.split('\\n')\n",
68
+ "\n",
69
+ "def load_glob_embedding(num_words, embed_size=100, word_index=None):\n",
70
+ " from numpy import asarray\n",
71
+ " from numpy import zeros\n",
72
+ "\n",
73
+ " embeddings_dictionary = dict()\n",
74
+ " glove_file = open('/content/drive/MyDrive/NMT/glove.6B.'+str(embed_size)+'d.txt', encoding=\"utf8\")\n",
75
+ "\n",
76
+ " for line in glove_file:\n",
77
+ " records = line.split()\n",
78
+ " word = records[0]\n",
79
+ " vector_dimensions = asarray(records[1:], dtype='float32')\n",
80
+ " embeddings_dictionary[word] = vector_dimensions\n",
81
+ " glove_file.close()\n",
82
+ "\n",
83
+ " embedding_matrix = zeros((num_words, embed_size))\n",
84
+ " for index, word in enumerate(word_index):\n",
85
+ " embedding_vector = embeddings_dictionary.get(word)\n",
86
+ " if embedding_vector is not None:\n",
87
+ " embedding_matrix[index] = embedding_vector\n",
88
+ "\n",
89
+ " return embedding_matrix\n",
90
+ "\n"
91
+ ]
92
+ },
93
+ {
94
+ "cell_type": "code",
95
+ "execution_count": null,
96
+ "metadata": {
97
+ "id": "oTEQAFkOK8B4"
98
+ },
99
+ "outputs": [],
100
+ "source": [
101
+ "english_sentences = load_data('/content/drive/MyDrive/NMT/en_train.txt')\n",
102
+ "french_sentences = load_data('/content/drive/MyDrive/NMT/fr_train.txt')\n",
103
+ "german_sentences = load_data('/content/drive/MyDrive/NMT/de_train.txt')\n",
104
+ "italian_sentences = load_data('/content/drive/MyDrive/NMT/it_train.txt')"
105
+ ]
106
+ },
107
+ {
108
+ "cell_type": "markdown",
109
+ "metadata": {
110
+ "id": "jNH6q4fk63Uc"
111
+ },
112
+ "source": [
113
+ "# Run only for the original model, not for inter-reuse experiment"
114
+ ]
115
+ },
116
+ {
117
+ "cell_type": "code",
118
+ "execution_count": null,
119
+ "metadata": {
120
+ "id": "b4BL1OgOiVFF"
121
+ },
122
+ "outputs": [],
123
+ "source": [
124
+ "text_pairs = []\n",
125
+ "for english,french,german,italian in zip(english_sentences, french_sentences,german_sentences,italian_sentences):\n",
126
+ " # english = \"[starten] \" + english + \" [enden]\"\n",
127
+ " french = \"[startfr] \" + french + \" [endfr]\"\n",
128
+ " german = \"[startde] \" + german + \" [endde]\"\n",
129
+ " italian = \"[startit] \" + italian + \" [endit]\"\n",
130
+ "\n",
131
+ " text_pairs.append((english, french))\n",
132
+ " text_pairs.append((english, german))\n",
133
+ " text_pairs.append((english, italian))\n",
134
+ "\n",
135
+ " # text_pairs.append((french, english))\n",
136
+ " # text_pairs.append((french, german))\n",
137
+ " # text_pairs.append((french, italian))\n",
138
+ "\n",
139
+ " # text_pairs.append((german, english))\n",
140
+ " # text_pairs.append((german, french))\n",
141
+ " # text_pairs.append((german, italian))\n",
142
+ "\n",
143
+ " # text_pairs.append((italian, english))\n",
144
+ " # text_pairs.append((italian, french))\n",
145
+ " # text_pairs.append((italian, german))\n"
146
+ ]
147
+ },
148
+ {
149
+ "cell_type": "markdown",
150
+ "metadata": {
151
+ "id": "fXgeKJYJ2F6r"
152
+ },
153
+ "source": [
154
+ "\n",
155
+ "\n",
156
+ "# InterReuse (1st scenario) En to {de, it}"
157
+ ]
158
+ },
159
+ {
160
+ "cell_type": "code",
161
+ "execution_count": null,
162
+ "metadata": {
163
+ "id": "3UsmQC_159vK"
164
+ },
165
+ "outputs": [],
166
+ "source": [
167
+ "text_pairs = []\n",
168
+ "for english,french,german,italian in zip(english_sentences, french_sentences,german_sentences,italian_sentences):\n",
169
+ " german = \"[startde] \" + german + \" [endde]\"\n",
170
+ " italian = \"[startit] \" + italian + \" [endit]\"\n",
171
+ "\n",
172
+ " text_pairs.append((english, german))\n",
173
+ " text_pairs.append((english, italian))\n",
174
+ " "
175
+ ]
176
+ },
177
+ {
178
+ "cell_type": "markdown",
179
+ "metadata": {
180
+ "id": "rKyB-Jtn2GRW"
181
+ },
182
+ "source": [
183
+ "# InterReuse (2nd scenario) En to {de, fr}"
184
+ ]
185
+ },
186
+ {
187
+ "cell_type": "code",
188
+ "execution_count": null,
189
+ "metadata": {
190
+ "id": "4NME4f-u6ee_"
191
+ },
192
+ "outputs": [],
193
+ "source": [
194
+ "text_pairs = []\n",
195
+ "for english,french,german,italian in zip(english_sentences, french_sentences,german_sentences,italian_sentences):\n",
196
+ " french = \"[startfr] \" + french + \" [endfr]\"\n",
197
+ " german = \"[startde] \" + german + \" [endde]\"\n",
198
+ " \n",
199
+ " text_pairs.append((english, french))\n",
200
+ " text_pairs.append((english, german))\n"
201
+ ]
202
+ },
203
+ {
204
+ "cell_type": "markdown",
205
+ "metadata": {
206
+ "id": "eGBLp-242GUM"
207
+ },
208
+ "source": [
209
+ "# InterReuse (3rd scenario) En to {it, fr}"
210
+ ]
211
+ },
212
+ {
213
+ "cell_type": "code",
214
+ "execution_count": null,
215
+ "metadata": {
216
+ "id": "kiES-xcO6rEf"
217
+ },
218
+ "outputs": [],
219
+ "source": [
220
+ "text_pairs = []\n",
221
+ "for english,french,german,italian in zip(english_sentences, french_sentences,german_sentences,italian_sentences):\n",
222
+ " french = \"[startfr] \" + french + \" [endfr]\"\n",
223
+ " italian = \"[startit] \" + italian + \" [endit]\"\n",
224
+ "\n",
225
+ " text_pairs.append((english, french))\n",
226
+ " text_pairs.append((english, italian))\n",
227
+ " "
228
+ ]
229
+ },
230
+ {
231
+ "cell_type": "markdown",
232
+ "metadata": {
233
+ "id": "-bP39N4viCAV"
234
+ },
235
+ "source": [
236
+ "# Motivating Example 1 Original Model"
237
+ ]
238
+ },
239
+ {
240
+ "cell_type": "code",
241
+ "execution_count": null,
242
+ "metadata": {
243
+ "id": "VcIvoR2LiCoN"
244
+ },
245
+ "outputs": [],
246
+ "source": [
247
+ "english_sentences = load_data('/content/drive/MyDrive/NMT/MotivatingExample 1 (Original Model+dataset)/en_ua_train_original.txt')\n",
248
+ "french_sentences = load_data('/content/drive/MyDrive/NMT/MotivatingExample 1 (Original Model+dataset)/fr_ua_train_original.txt')\n",
249
+ "german_sentences = load_data('/content/drive/MyDrive/NMT/MotivatingExample 1 (Original Model+dataset)/de_ua_train_original.txt')\n",
250
+ "ukranian_sentences = load_data('/content/drive/MyDrive/NMT/MotivatingExample 1 (Original Model+dataset)/ua_ua_train_original.txt')\n",
251
+ "italian_sentences = load_data('/content/drive/MyDrive/NMT/MotivatingExample 1 (Original Model+dataset)/it_ua_train_original.txt')\n",
252
+ "\n",
253
+ "text_pairs = []\n",
254
+ "for english,french,german,italian,ukranian in zip(english_sentences, french_sentences,german_sentences,italian_sentences,ukranian_sentences):\n",
255
+ " french = \"[startfr] \" + french + \" [endfr]\"\n",
256
+ " german = \"[startde] \" + german + \" [endde]\"\n",
257
+ " italian = \"[startit] \" + italian + \" [endit]\"\n",
258
+ " ukranian = \"[startua] \" + ukranian + \" [endua]\"\n",
259
+ "\n",
260
+ " text_pairs.append((english, french))\n",
261
+ " text_pairs.append((english, german))\n",
262
+ " text_pairs.append((english, italian))\n",
263
+ " text_pairs.append((english, ukranian))"
264
+ ]
265
+ },
266
+ {
267
+ "cell_type": "markdown",
268
+ "metadata": {
269
+ "id": "xFmSR42wE1GY"
270
+ },
271
+ "source": [
272
+ "# Motivating Example 1 Solution 1"
273
+ ]
274
+ },
275
+ {
276
+ "cell_type": "code",
277
+ "execution_count": null,
278
+ "metadata": {
279
+ "id": "ENunZM3FE23o"
280
+ },
281
+ "outputs": [],
282
+ "source": [
283
+ "english_sentences = load_data('/content/drive/MyDrive/NMT/en_ua_train.txt')\n",
284
+ "german_sentences = load_data('/content/drive/MyDrive/NMT/de_ua_train.txt')\n",
285
+ "ukranian_sentences = load_data('/content/drive/MyDrive/NMT/ua_train.txt')\n",
286
+ "\n",
287
+ "text_pairs = []\n",
288
+ "for english, german, ukranian in zip(english_sentences,german_sentences, ukranian_sentences):\n",
289
+ " german = \"[startde] \" + german + \" [endde]\"\n",
290
+ " ukranian = \"[startua] \" + ukranian + \" [endua]\"\n",
291
+ "\n",
292
+ " text_pairs.append((english, german))\n",
293
+ " text_pairs.append((english, ukranian))"
294
+ ]
295
+ },
296
+ {
297
+ "cell_type": "markdown",
298
+ "metadata": {
299
+ "id": "IUnNMpAVDpZG"
300
+ },
301
+ "source": [
302
+ "# Motivating Example 1 Solution 2"
303
+ ]
304
+ },
305
+ {
306
+ "cell_type": "code",
307
+ "execution_count": null,
308
+ "metadata": {
309
+ "id": "T5-vlFmeDrtQ"
310
+ },
311
+ "outputs": [],
312
+ "source": [
313
+ "english_sentences = load_data('/content/drive/MyDrive/NMT/en_ua_train.txt')\n",
314
+ "ukranian_sentences = load_data('/content/drive/MyDrive/NMT/ua_train.txt')\n",
315
+ "\n",
316
+ "text_pairs = []\n",
317
+ "for english,ukranian in zip(english_sentences, ukranian_sentences):\n",
318
+ " ukranian = \"[startua] \" + ukranian + \" [endua]\"\n",
319
+ "\n",
320
+ " text_pairs.append((english, ukranian))"
321
+ ]
322
+ },
323
+ {
324
+ "cell_type": "markdown",
325
+ "metadata": {
326
+ "id": "VtPspyGHKpYj"
327
+ },
328
+ "source": [
329
+ "# Motivating Example 2 Original\n",
330
+ "\n"
331
+ ]
332
+ },
333
+ {
334
+ "cell_type": "code",
335
+ "execution_count": null,
336
+ "metadata": {
337
+ "id": "BkgEgbc6Kteo"
338
+ },
339
+ "outputs": [],
340
+ "source": [
341
+ "english_sentences = load_data('/content/drive/MyDrive/NMT/Dataset_ME2/en_et_train.txt')\n",
342
+ "german_sentences = load_data('/content/drive/MyDrive/NMT/Dataset_ME2/de_et_train.txt')\n",
343
+ "italian_sentences = load_data('/content/drive/MyDrive/NMT/Dataset_ME2/it_et_train.txt')\n",
344
+ "estonian_sentences = load_data('/content/drive/MyDrive/NMT/Dataset_ME2/et_et_train.txt')\n",
345
+ "text_pairs = []\n",
346
+ "for english, german, italian, estonian in zip(english_sentences,german_sentences, italian_sentences,estonian_sentences):\n",
347
+ " english = \"[starten] \" + english + \" [enden]\"\n",
348
+ " german = \"[startde] \" + german + \" [endde]\"\n",
349
+ " italian = \"[startit] \" + italian + \" [endit]\"\n",
350
+ "\n",
351
+ " text_pairs.append((estonian, english))\n",
352
+ " text_pairs.append((estonian, german))\n",
353
+ " text_pairs.append((estonian, italian))"
354
+ ]
355
+ },
356
+ {
357
+ "cell_type": "markdown",
358
+ "metadata": {
359
+ "id": "bUyLIVUHn0Xu"
360
+ },
361
+ "source": [
362
+ "# Motivating Example 2 Solution 1\n"
363
+ ]
364
+ },
365
+ {
366
+ "cell_type": "code",
367
+ "execution_count": null,
368
+ "metadata": {
369
+ "id": "9-0JKeqWn4YW"
370
+ },
371
+ "outputs": [],
372
+ "source": [
373
+ "english_sentences = load_data('/content/drive/MyDrive/NMT/Dataset_ME2/en_et_train.txt')\n",
374
+ "italian_sentences = load_data('/content/drive/MyDrive/NMT/Dataset_ME2/it_et_train.txt')\n",
375
+ "estonian_sentences = load_data('/content/drive/MyDrive/NMT/Dataset_ME2/et_et_train.txt')\n",
376
+ "\n",
377
+ "text_pairs = []\n",
378
+ "for english, italian, estonian in zip(english_sentences, italian_sentences,estonian_sentences):\n",
379
+ " english = \"[starten] \" + english + \" [enden]\"\n",
380
+ " italian = \"[startit] \" + italian + \" [endit]\"\n",
381
+ "\n",
382
+ " text_pairs.append((estonian, english))\n",
383
+ " text_pairs.append((estonian, italian))"
384
+ ]
385
+ },
386
+ {
387
+ "cell_type": "markdown",
388
+ "metadata": {
389
+ "id": "MI7Ti6Vnkf3v"
390
+ },
391
+ "source": [
392
+ "# Motivating Example 2 Solution 2\n"
393
+ ]
394
+ },
395
+ {
396
+ "cell_type": "code",
397
+ "execution_count": null,
398
+ "metadata": {
399
+ "id": "rxssKJSUkkJP"
400
+ },
401
+ "outputs": [],
402
+ "source": [
403
+ "english_sentences = load_data('/content/drive/MyDrive/NMT/Dataset_ME2/s2/en_et_train_s2.txt')\n",
404
+ "estonian_sentences = load_data('/content/drive/MyDrive/NMT/Dataset_ME2/s2/et_et_train_s2.txt')\n",
405
+ "text_pairs = []\n",
406
+ "for english, estonian in zip(english_sentences,estonian_sentences):\n",
407
+ " english = \"[starten] \" + english + \" [enden]\"\n",
408
+ "\n",
409
+ " text_pairs.append((estonian, english))"
410
+ ]
411
+ },
412
+ {
413
+ "cell_type": "markdown",
414
+ "metadata": {
415
+ "id": "w8044H00hfK-"
416
+ },
417
+ "source": [
418
+ "# Building the model"
419
+ ]
420
+ },
421
+ {
422
+ "cell_type": "code",
423
+ "execution_count": null,
424
+ "metadata": {
425
+ "colab": {
426
+ "base_uri": "https://localhost:8080/"
427
+ },
428
+ "id": "mrG_3rC8h01E",
429
+ "outputId": "4e2ea5f3-444b-4386-d326-2c26d849f1cb"
430
+ },
431
+ "outputs": [
432
+ {
433
+ "output_type": "stream",
434
+ "name": "stdout",
435
+ "text": [
436
+ "('Do you like peanut butter?', '[startde] Magst du Erdnussbutter? [endde]')\n"
437
+ ]
438
+ }
439
+ ],
440
+ "source": [
441
+ "import random\n",
442
+ "print(random.choice(text_pairs))"
443
+ ]
444
+ },
445
+ {
446
+ "cell_type": "code",
447
+ "execution_count": null,
448
+ "metadata": {
449
+ "id": "i4bKiSxSLKMI"
450
+ },
451
+ "outputs": [],
452
+ "source": [
453
+ "import random\n",
454
+ "random.shuffle(text_pairs)\n",
455
+ "num_val_samples = int(0.15 * len(text_pairs))\n",
456
+ "num_train_samples = len(text_pairs) - 2 * num_val_samples\n",
457
+ "train_pairs = text_pairs[:num_train_samples]\n",
458
+ "val_pairs = text_pairs[num_train_samples:num_train_samples + num_val_samples]\n",
459
+ "test_pairs = text_pairs[num_train_samples + num_val_samples:]"
460
+ ]
461
+ },
462
+ {
463
+ "cell_type": "code",
464
+ "execution_count": null,
465
+ "metadata": {
466
+ "id": "CB-X3EfULMec"
467
+ },
468
+ "outputs": [],
469
+ "source": [
470
+ "import tensorflow as tf\n",
471
+ "import string\n",
472
+ "import re\n",
473
+ "from tensorflow.keras import layers\n",
474
+ "\n",
475
+ "strip_chars = string.punctuation + \"¿\"\n",
476
+ "strip_chars = strip_chars.replace(\"[\", \"\")\n",
477
+ "strip_chars = strip_chars.replace(\"]\", \"\")\n",
478
+ "\n",
479
+ "def custom_standardization(input_string):\n",
480
+ " lowercase = tf.strings.lower(input_string)\n",
481
+ " return tf.strings.regex_replace(\n",
482
+ " lowercase, f\"[{re.escape(strip_chars)}]\", \"\")\n",
483
+ "\n",
484
+ "vocab_size = 30000\n",
485
+ "\n",
486
+ "#change vocab size for ME2\n",
487
+ "# vocab_size = 2000\n",
488
+ "\n",
489
+ "# en_vocab_size = 11000\n",
490
+ "# fr_vocab_size = 18000\n",
491
+ "sequence_length = 20\n",
492
+ "\n",
493
+ "source_vectorization = layers.TextVectorization(\n",
494
+ " max_tokens=vocab_size,\n",
495
+ " output_mode=\"int\",\n",
496
+ " output_sequence_length=sequence_length,\n",
497
+ ")\n",
498
+ "target_vectorization = layers.TextVectorization(\n",
499
+ " max_tokens=vocab_size,\n",
500
+ " output_mode=\"int\",\n",
501
+ " output_sequence_length=sequence_length + 1,\n",
502
+ " standardize=custom_standardization,\n",
503
+ ")\n",
504
+ "train_source_texts = [pair[0] for pair in train_pairs]\n",
505
+ "train_target_texts = [pair[1] for pair in train_pairs]\n",
506
+ "source_vectorization.adapt(train_source_texts)\n",
507
+ "target_vectorization.adapt(train_target_texts)\n"
508
+ ]
509
+ },
510
+ {
511
+ "cell_type": "code",
512
+ "execution_count": null,
513
+ "metadata": {
514
+ "id": "IhrYWMyHLPfr"
515
+ },
516
+ "outputs": [],
517
+ "source": [
518
+ "batch_size = 64\n",
519
+ "\n",
520
+ "def format_dataset(eng, spa):\n",
521
+ " eng = source_vectorization(eng)\n",
522
+ " spa = target_vectorization(spa)\n",
523
+ " return ({\n",
524
+ " \"source\": eng,\n",
525
+ " \"target\": spa[:, :-1],\n",
526
+ " }, spa[:, 1:])\n",
527
+ "\n",
528
+ "def make_dataset(pairs):\n",
529
+ " eng_texts, spa_texts = zip(*pairs)\n",
530
+ " eng_texts = list(eng_texts)\n",
531
+ " spa_texts = list(spa_texts)\n",
532
+ " dataset = tf.data.Dataset.from_tensor_slices((eng_texts, spa_texts))\n",
533
+ " dataset = dataset.batch(batch_size)\n",
534
+ " dataset = dataset.map(format_dataset, num_parallel_calls=4)\n",
535
+ " return dataset.shuffle(2048).prefetch(16).cache()\n",
536
+ "\n",
537
+ "train_ds = make_dataset(train_pairs)\n",
538
+ "val_ds = make_dataset(val_pairs)"
539
+ ]
540
+ },
541
+ {
542
+ "cell_type": "code",
543
+ "execution_count": null,
544
+ "metadata": {
545
+ "colab": {
546
+ "base_uri": "https://localhost:8080/"
547
+ },
548
+ "id": "tFFNByKXLRtV",
549
+ "outputId": "c11030c5-1d46-45b0-e2f1-ed45f27eb087"
550
+ },
551
+ "outputs": [
552
+ {
553
+ "output_type": "stream",
554
+ "name": "stdout",
555
+ "text": [
556
+ "inputs['source'].shape: (64, 20)\n",
557
+ "inputs['target'].shape: (64, 20)\n",
558
+ "targets.shape: (64, 20)\n"
559
+ ]
560
+ }
561
+ ],
562
+ "source": [
563
+ "for inputs, targets in train_ds.take(1):\n",
564
+ " print(f\"inputs['source'].shape: {inputs['source'].shape}\")\n",
565
+ " print(f\"inputs['target'].shape: {inputs['target'].shape}\")\n",
566
+ " print(f\"targets.shape: {targets.shape}\")"
567
+ ]
568
+ },
569
+ {
570
+ "cell_type": "code",
571
+ "execution_count": null,
572
+ "metadata": {
573
+ "id": "kawQLBPHLTmr"
574
+ },
575
+ "outputs": [],
576
+ "source": [
577
+ "\n",
578
+ "from tensorflow import keras\n",
579
+ "from tensorflow.keras import layers\n",
580
+ "\n",
581
+ "embed_dim = 200\n",
582
+ "latent_dim = 1024\n",
583
+ "\n",
584
+ "embedding_matrix = load_glob_embedding(vocab_size, 200, target_vectorization.get_vocabulary())\n"
585
+ ]
586
+ },
587
+ {
588
+ "cell_type": "code",
589
+ "execution_count": null,
590
+ "metadata": {
591
+ "id": "HUxTSb5ELV_S",
592
+ "colab": {
593
+ "base_uri": "https://localhost:8080/"
594
+ },
595
+ "outputId": "a601a9f6-14f9-4634-de7d-e27846c88dc8"
596
+ },
597
+ "outputs": [
598
+ {
599
+ "output_type": "stream",
600
+ "name": "stdout",
601
+ "text": [
602
+ "Model: \"model_1\"\n",
603
+ "__________________________________________________________________________________________________\n",
604
+ " Layer (type) Output Shape Param # Connected to \n",
605
+ "==================================================================================================\n",
606
+ " source (InputLayer) [(None, None)] 0 [] \n",
607
+ " \n",
608
+ " embed_encoder (Embedding) (None, None, 200) 6000000 ['source[0][0]'] \n",
609
+ " \n",
610
+ " rnn_encoder1 (LSTM) (None, None, 1024) 5017600 ['embed_encoder[0][0]'] \n",
611
+ " \n",
612
+ " rnn_encoder2 (LSTM) (None, None, 1024) 8392704 ['rnn_encoder1[0][0]'] \n",
613
+ " \n",
614
+ " target (InputLayer) [(None, None)] 0 [] \n",
615
+ " \n",
616
+ " rnn_encoder3 (LSTM) (None, None, 1024) 8392704 ['rnn_encoder2[0][0]'] \n",
617
+ " \n",
618
+ " embed_decoder (Embedding) (None, None, 200) 6000000 ['target[0][0]'] \n",
619
+ " \n",
620
+ " rnn_encoder4 (LSTM) [(None, 1024), 8392704 ['rnn_encoder3[0][0]'] \n",
621
+ " (None, 1024), \n",
622
+ " (None, 1024)] \n",
623
+ " \n",
624
+ " rnn_decoder1 (LSTM) (None, None, 1024) 5017600 ['embed_decoder[0][0]', \n",
625
+ " 'rnn_encoder4[0][1]', \n",
626
+ " 'rnn_encoder4[0][2]'] \n",
627
+ " \n",
628
+ " rnn_decoder2 (LSTM) (None, None, 1024) 8392704 ['rnn_decoder1[0][0]'] \n",
629
+ " \n",
630
+ " rnn_decoder3 (LSTM) (None, None, 1024) 8392704 ['rnn_decoder2[0][0]'] \n",
631
+ " \n",
632
+ " rnn_decoder4 (LSTM) (None, None, 1024) 8392704 ['rnn_decoder3[0][0]'] \n",
633
+ " \n",
634
+ " dropout_1 (Dropout) (None, None, 1024) 0 ['rnn_decoder4[0][0]'] \n",
635
+ " \n",
636
+ " time_distributed_1 (TimeDistri (None, None, 30000) 30750000 ['dropout_1[0][0]'] \n",
637
+ " buted) \n",
638
+ " \n",
639
+ "==================================================================================================\n",
640
+ "Total params: 103,141,424\n",
641
+ "Trainable params: 97,141,424\n",
642
+ "Non-trainable params: 6,000,000\n",
643
+ "__________________________________________________________________________________________________\n"
644
+ ]
645
+ }
646
+ ],
647
+ "source": [
648
+ "source = keras.Input(shape=(None,), dtype=\"int64\", name=\"source\")\n",
649
+ "\n",
650
+ "# x = layers.Embedding(vocab_size, embed_dim, mask_zero=True)(source)\n",
651
+ "x = layers.Embedding(vocab_size, embed_dim, weights=[embedding_matrix], mask_zero=True,\n",
652
+ " name='embed_encoder', trainable=False)(source)\n",
653
+ "\n",
654
+ "encoded_source = layers.LSTM(latent_dim, return_sequences=True, activation='tanh', name='rnn_encoder1')(x)\n",
655
+ "encoded_source = layers.LSTM(latent_dim, return_sequences=True, activation='tanh', name='rnn_encoder2')(encoded_source)\n",
656
+ "encoded_source = layers.LSTM(latent_dim, return_sequences=True, activation='tanh', name='rnn_encoder3')(encoded_source)\n",
657
+ "encoded_source, state_h, state_c = layers.LSTM(latent_dim, return_state=True, activation='tanh', name='rnn_encoder4')(encoded_source)\n",
658
+ "\n",
659
+ "encoder_states = [state_h, state_c]\n",
660
+ "\n",
661
+ "past_target = keras.Input(shape=(None,), dtype=\"int64\", name=\"target\")\n",
662
+ "x = layers.Embedding(vocab_size, embed_dim, mask_zero=True, name='embed_decoder')(past_target)\n",
663
+ "\n",
664
+ "decoder_gru = layers.LSTM(latent_dim, return_sequences=True, activation='tanh', name='rnn_decoder1')\n",
665
+ "x = decoder_gru(x, initial_state=encoder_states)\n",
666
+ "x = layers.LSTM(latent_dim, return_sequences=True, activation='tanh', name='rnn_decoder2')(x)\n",
667
+ "x = layers.LSTM(latent_dim, return_sequences=True, activation='tanh', name='rnn_decoder3')(x)\n",
668
+ "x = layers.LSTM(latent_dim, return_sequences=True, activation='tanh', name='rnn_decoder4')(x)\n",
669
+ "\n",
670
+ "x = layers.Dropout(0.5)(x)\n",
671
+ "\n",
672
+ "target_next_step = layers.TimeDistributed(layers.Dense(vocab_size, activation=\"softmax\", name='output'))(x)\n",
673
+ "\n",
674
+ "seq2seq_rnn = keras.Model([source, past_target], target_next_step)\n",
675
+ "\n",
676
+ "seq2seq_rnn.compile(\n",
677
+ " optimizer=\"rmsprop\",\n",
678
+ " loss=\"sparse_categorical_crossentropy\",\n",
679
+ " metrics=[\"accuracy\"])\n",
680
+ "\n",
681
+ "seq2seq_rnn.summary()"
682
+ ]
683
+ },
684
+ {
685
+ "cell_type": "code",
686
+ "execution_count": null,
687
+ "metadata": {
688
+ "colab": {
689
+ "base_uri": "https://localhost:8080/"
690
+ },
691
+ "id": "twWGTTxHLX0-",
692
+ "outputId": "565dea86-8562-4561-bb1b-ba936d14f7d9"
693
+ },
694
+ "outputs": [
695
+ {
696
+ "output_type": "stream",
697
+ "name": "stdout",
698
+ "text": [
699
+ "Epoch 1/3\n",
700
+ "26/26 [==============================] - 38s 436ms/step - loss: 1.9507 - accuracy: 0.1266 - val_loss: 1.5340 - val_accuracy: 0.1645\n",
701
+ "Epoch 2/3\n",
702
+ "26/26 [==============================] - 2s 88ms/step - loss: 1.6646 - accuracy: 0.1742 - val_loss: 1.4814 - val_accuracy: 0.1751\n",
703
+ "Epoch 3/3\n",
704
+ "26/26 [==============================] - 2s 88ms/step - loss: 1.6099 - accuracy: 0.1869 - val_loss: 1.4740 - val_accuracy: 0.1777\n"
705
+ ]
706
+ }
707
+ ],
708
+ "source": [
709
+ "# vocab_size = 30000\n",
710
+ "# sequence_length = 20\n",
711
+ "\n",
712
+ "# checkpoint = ModelCheckpoint(filepath='model_1LSTM_original_chkpt.h5',\n",
713
+ "# monitor='val_loss', verbose=1, save_best_only=True,\n",
714
+ "# mode='min')\n",
715
+ "\n",
716
+ "seq2seq_rnn.fit(train_ds, epochs=3, validation_data=val_ds)\n",
717
+ "seq2seq_rnn.save('solution1_problem2.h5')"
718
+ ]
719
+ },
720
+ {
721
+ "cell_type": "code",
722
+ "execution_count": null,
723
+ "metadata": {
724
+ "colab": {
725
+ "background_save": true
726
+ },
727
+ "id": "0SkImFO6os8J",
728
+ "outputId": "c2d98cb7-90f6-484e-d3c3-7b2f094abdf7"
729
+ },
730
+ "outputs": [
731
+ {
732
+ "ename": "KeyboardInterrupt",
733
+ "evalue": "ignored",
734
+ "output_type": "error",
735
+ "traceback": [
736
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
737
+ "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
738
+ "\u001b[0;32m<ipython-input-31-9a163dae5324>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 33\u001b[0m \u001b[0;31m# print(\"predicted: \", decode_sequence(test_eng_texts[i],acts[0], acts[-1]))\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 34\u001b[0m \u001b[0mactual\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mappend\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0macts\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 35\u001b[0;31m \u001b[0mpredicted\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mappend\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdecode_sequence\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtest_eng_texts\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0macts\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0macts\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m-\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msplit\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 36\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 37\u001b[0m \u001b[0;31m#print(\"actual: \", actual, '\\n', \"predicted:\", predicted)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
739
+ "\u001b[0;32m<ipython-input-31-9a163dae5324>\u001b[0m in \u001b[0;36mdecode_sequence\u001b[0;34m(input_sentence, decoded_sentence, end_tag)\u001b[0m\n\u001b[1;32m 11\u001b[0m \u001b[0mtokenized_target_sentence\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtarget_vectorization\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mdecoded_sentence\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 12\u001b[0m next_token_predictions = seq2seq_rnn.predict(\n\u001b[0;32m---> 13\u001b[0;31m [tokenized_input_sentence, tokenized_target_sentence])\n\u001b[0m\u001b[1;32m 14\u001b[0m \u001b[0msampled_token_index\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0margmax\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnext_token_predictions\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mi\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m:\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 15\u001b[0m \u001b[0msampled_token\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mspa_index_lookup\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0msampled_token_index\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
740
+ "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/keras/utils/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 62\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 63\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 64\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 65\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;31m# pylint: disable=broad-except\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 66\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_process_traceback_frames\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__traceback__\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
741
+ "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/keras/engine/training.py\u001b[0m in \u001b[0;36mpredict\u001b[0;34m(self, x, batch_size, verbose, steps, callbacks, max_queue_size, workers, use_multiprocessing)\u001b[0m\n\u001b[1;32m 1976\u001b[0m \u001b[0mcallbacks\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mon_predict_begin\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1977\u001b[0m \u001b[0mbatch_outputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1978\u001b[0;31m \u001b[0;32mfor\u001b[0m \u001b[0m_\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0miterator\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mdata_handler\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0menumerate_epochs\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;31m# Single epoch.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1979\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mdata_handler\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcatch_stop_iteration\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1980\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mstep\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mdata_handler\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msteps\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
742
+ "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/keras/engine/data_adapter.py\u001b[0m in \u001b[0;36menumerate_epochs\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 1189\u001b[0m \u001b[0;34m\"\"\"Yields `(epoch, tf.data.Iterator)`.\"\"\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1190\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_truncate_execution_to_epoch\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1191\u001b[0;31m \u001b[0mdata_iterator\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0miter\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_dataset\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1192\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mepoch\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_initial_epoch\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_epochs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1193\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_insufficient_data\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;31m# Set by `catch_stop_iteration`.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
743
+ "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/tensorflow/python/data/ops/dataset_ops.py\u001b[0m in \u001b[0;36m__iter__\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 484\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mcontext\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexecuting_eagerly\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minside_function\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 485\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcolocate_with\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_variant_tensor\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 486\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0miterator_ops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mOwnedIterator\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 487\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 488\u001b[0m raise RuntimeError(\"`tf.data.Dataset` only supports Python-style \"\n",
744
+ "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/tensorflow/python/data/ops/iterator_ops.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, dataset, components, element_spec)\u001b[0m\n\u001b[1;32m 753\u001b[0m \u001b[0;34m\"When `dataset` is provided, `element_spec` and `components` must \"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 754\u001b[0m \"not be specified.\")\n\u001b[0;32m--> 755\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_create_iterator\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdataset\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 756\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 757\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_get_next_call_count\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
745
+ "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/tensorflow/python/data/ops/iterator_ops.py\u001b[0m in \u001b[0;36m_create_iterator\u001b[0;34m(self, dataset)\u001b[0m\n\u001b[1;32m 785\u001b[0m \u001b[0moutput_types\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_flat_output_types\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 786\u001b[0m output_shapes=self._flat_output_shapes))\n\u001b[0;32m--> 787\u001b[0;31m \u001b[0mgen_dataset_ops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmake_iterator\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mds_variant\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_iterator_resource\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 788\u001b[0m \u001b[0;31m# Delete the resource when this object is deleted\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 789\u001b[0m self._resource_deleter = IteratorResourceDeleter(\n",
746
+ "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/tensorflow/python/ops/gen_dataset_ops.py\u001b[0m in \u001b[0;36mmake_iterator\u001b[0;34m(dataset, iterator, name)\u001b[0m\n\u001b[1;32m 3314\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3315\u001b[0m _result = pywrap_tfe.TFE_Py_FastPathExecute(\n\u001b[0;32m-> 3316\u001b[0;31m _ctx, \"MakeIterator\", name, dataset, iterator)\n\u001b[0m\u001b[1;32m 3317\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0m_result\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3318\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0m_core\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_NotOkStatusException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
747
+ "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
748
+ ]
749
+ }
750
+ ],
751
+ "source": [
752
+ "import numpy as np\n",
753
+ "spa_vocab = target_vectorization.get_vocabulary()\n",
754
+ "spa_index_lookup = dict(zip(range(len(spa_vocab)), spa_vocab))\n",
755
+ "max_decoded_sentence_length = 20\n",
756
+ "\n",
757
+ "def decode_sequence(input_sentence, decoded_sentence, end_tag):\n",
758
+ " tokenized_input_sentence = source_vectorization([input_sentence])\n",
759
+ " #decoded_sentence = \"[start]\"\n",
760
+ "\n",
761
+ " for i in range(max_decoded_sentence_length):\n",
762
+ " tokenized_target_sentence = target_vectorization([decoded_sentence])\n",
763
+ " next_token_predictions = seq2seq_rnn.predict(\n",
764
+ " [tokenized_input_sentence, tokenized_target_sentence])\n",
765
+ " sampled_token_index = np.argmax(next_token_predictions[0, i, :])\n",
766
+ " sampled_token = spa_index_lookup[sampled_token_index]\n",
767
+ " decoded_sentence += \" \" + sampled_token\n",
768
+ " if sampled_token == end_tag:\n",
769
+ " break\n",
770
+ " return decoded_sentence\n",
771
+ "\n",
772
+ "bleu_dic = {}\n",
773
+ "test_eng_texts = [pair[0] for pair in test_pairs]\n",
774
+ "test_fr_texts = [pair[1] for pair in test_pairs]\n",
775
+ "actual, predicted = [], []\n",
776
+ "for i in range(len(test_pairs)):\n",
777
+ " # input_sentence = \n",
778
+ " # target_sentence = random.choice(test_fr_texts)\n",
779
+ " acts=test_fr_texts[i].split()\n",
780
+ " # print(\"-\")\n",
781
+ " # print(acts[0], acts[-1])\n",
782
+ " # print(\"source: \", test_eng_texts[i])\n",
783
+ " # print(\"actual target: \", test_fr_texts[i])\n",
784
+ " # print(\"predicted: \", decode_sequence(test_eng_texts[i],acts[0], acts[-1]))\n",
785
+ " actual.append([acts])\n",
786
+ " predicted.append(decode_sequence(test_eng_texts[i],acts[0], acts[-1]).split())\n",
787
+ "\n",
788
+ "#print(\"actual: \", actual, '\\n', \"predicted:\", predicted)\n",
789
+ "bleu_dic['1-grams'] = corpus_bleu(actual, predicted, weights=(1.0, 0, 0, 0))\n",
790
+ "bleu_dic['1-2-grams'] = corpus_bleu(actual, predicted, weights=(0.5, 0.5, 0, 0))\n",
791
+ "bleu_dic['1-3-grams'] = corpus_bleu(actual, predicted, weights=(0.3, 0.3, 0.3, 0))\n",
792
+ "bleu_dic['1-4-grams'] = corpus_bleu(actual, predicted, weights=(0.25, 0.25, 0.25, 0.25))\n",
793
+ "print(bleu_dic)"
794
+ ]
795
+ },
796
+ {
797
+ "cell_type": "code",
798
+ "execution_count": null,
799
+ "metadata": {
800
+ "colab": {
801
+ "base_uri": "https://localhost:8080/",
802
+ "height": 281
803
+ },
804
+ "id": "D_Qf1ZnRNual",
805
+ "outputId": "ef5196ee-1377-4041-cc41-1c0fdd71a911"
806
+ },
807
+ "outputs": [
808
+ {
809
+ "data": {
810
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXQAAAEICAYAAABPgw/pAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+WH4yJAAAWk0lEQVR4nO3de9RddX3n8feHBESFQguxIwEBK2ojrbcITHEp9daACq6OtVB1tEtlnFUcOlotXqqIugo6te04OIq3oFYQQW3EKGoFL4xowt2A1BiiCVoIGBC8gIHv/LF35PDwnDznyXNy+z3v11pnZV9+e+/f+Z1zPs/ev31JqgpJ0o5vp21dAUnSeBjoktQIA12SGmGgS1IjDHRJaoSBLkmNMNA1KyRZkeSITcy/KMnLZ7D+k5N8fHOX31qSvDDJl7Z1PbRlGOjbuSSrk/wyyR1J1if5fJL9BuYvTvL2IctWkp/3y258vW7YckkO6JeZO2R9xyS5IsnPktyc5KtJDhzn+91SquoxVXURzDx8kxyRZO3YKnf/9Q/9TKe5nvt9nlX1L1X1rJmue1ym+s5pegz0HcNzq2o34KHAjcB7prHsY6tqt4HXOzenAkkeAXwUeA2wB3AgcDpw9+asb8g2ksTvpLSZ/PHsQKrqV8C5wIJtsPnHAddX1b9V5/aqOq+qfgSQZE6SNyT5QZLbk1y68UgiyR8lWZbktv7fP9q40r6r4x1JLgZ+ATw8yaOTfDnJT5Ncl+QFk1UoyR8nuXpg/MtJlg2MfyPJ8/rh1UmekWQR8Abgz/sjlisHVrl/kov7+n8pyd6TbPPBwBeAfQaOevbpZ++S5KP98iuSLBxYbp8k5yVZl+T6JP9jyHs6Hngh8Lp+3Z+bavkkhyRZ3h853Zjk3f2sr/f/3tqv6z8neWmSbw4sW0lemeT7SW5NcnqSDHym/9AfjV2f5IQpjuD+NskN/fu/LsnT++k7JTmp/27ckuScJL8zrI6TrVsjqipf2/ELWA08ox9+EHAm8NGB+YuBtw9ZtoBHDJl3v+WAA/pl5k5S/uHAr4B/BP4Y2G3C/NcCVwOPAgI8FtgL+B1gPfBiYC5wXD++V7/cRcCPgMf08/cA1gB/2Y8/HrgZWDBJnR7Y12lvYGe6o5cbgN37eb8c2M5gO54MfHzCui4CfgA8sl/2IuDUIW13BLB2wrST+7ocBcwB/h64pJ+3E3Ap8GZgl74tVwF/MspnM9XywLeAF/fDuwGHDfs8gZcC35zwHTkf2BN4GLAOWNTPeyVwDbAv8NvAVzbx/XhU/7ntM7Dt3+uHTwQu6dfzAOD9wFlTfed8Tf/lHvqO4bNJbgVuA54JvGsay17W73ltfP3J5lSgqlbRBdl84Bzg5r6vd7e+yMuBN1XVddW5sqpuAZ4NfL+qPlZVG6rqLOB7wHMHVr+4qlZU1QZgEbC6qj7Sl78cOA/4s0nq9EtgGfAU4InAlcDFwOHAYf12b5nG2/xIVf17v95z6I5KpuObVbW0qu4GPkb3Rw3gScC8qjqlqu7q2/IDwLEjrneq5X8NPCLJ3lV1R1VdMs16n1pVt1Z3tHUh977vFwD/XFVrq2o9cOom1nE3XVgvSLJzVa2uqh/0814JvLFfz510f/yeb7/5+NmgO4bnVdVXkswBjgG+lmRBVf3HCMs+oapWTjJ9A91e7aCdgXv61/30QfECgCRPAj4JvBF4PbAf3R7uRPsAP5ww7Yd0fxg2WjMwvD9waP8HbKO5dAE5ma/R7zH3w+uBpwJ39uPTMdiev6Db253J8rv2obU/XRfN4HuaA3xjxPVOtfzLgFOA7yW5HnhrVZ0/g3pvfN/7cN/PZnD4PqpqZZK/pgvrxyS5AHh1Vf24r/9nkgx+r+4GfncaddQI3EPfgVTV3VX1abofw5NnuLof0R3uDjoQWFNVkwb6hLosAz4NHNxPWgP83iRFN/6gBz2MrmvkN6sbGF4DfK2q9hx47VZV/31IVTYG+lP64a/RBfpTGR7oM33E6HSXX0N3/mHwPe1eVUeNuP5NLl9V36+q44CHAKcB5/Z9/TN9nz+h6ybZaL9hBft6fKKqnkz3eVdfl431P3JC/XetqhvGUEcNMNB3IP1VIMfQ9WdeOzBrTpJdB167jLC684BnJ3lWf/JrH+BNwNlDtv3kJK9I8pB+/NHA0XR9owAfBN6W5KC+nn+YZC9gKfDIJH+RZG6SP6c7qTtsD/L8vvyLk+zcv56U5PeHlP9/dP23hwDfqaoV9Hv53HvCbaIbgQOy+VfU3AjslWSPEct/B7i9P2n4wL69D+6Pcoat/+GjLp/kRUnm9X+IN+7F30PXH37PhHVNxznAiUnmJ9kT+NthBZM8KsnTkjyA7lzCL7n3SO99wDuS7N+Xndd/jxlDHTXAQN8xfC7JHcDPgHcAL+mDa6OT6H5AG19fHZh3Ze57Hfo/AfTLH0d38u6ndCfWvg28dUgdbqUL8Kv7unwR+Ayw8TLId9MFwJf6en4IeGDfh/0cussdbwFeBzynqm6ebCNVdTvwLLr+4R/TdQecRtc/O1n5nwOXASuq6q5+8reAH1bVTUPey6f6f29JctmQMkNV1feAs4BV/XmJfaYofzddGzwOuJ7uJO8H6U4AT+ZDdH3Rtyb57AjLLwJW9J/LPwPHVtUvq+oXdN+Xi/t1HTbNt/oBus/zKuByuj/OG5j8UtUH0PWx30z3mT2EriuOvk5LgC8luZ1uJ+DQvm1mWkcNSJVHPJKmluRI4H1VNbELTdsJ99AlTarv3jmq7yqbD7yF7qhM26kpAz3Jh5PclOS7Q+Ynyf9OsjLJVUmeMP5qStoGQtcFt56uy+VaumvhtZ2assslyVOAO+huZjl4kvlHAa+iu6HiULrrVg/dAnWVJG3ClHvoVfV1upNmwxxDF/bVX6e8Z5KHjquCkqTRjOPGovnc94aDtf20n0wsmO45FccDPPjBD37iox/96DFsXpJmj0svvfTmqpo32byteqdoVZ0BnAGwcOHCWr58+dbcvCTt8JJMvPP6N8ZxlcsN3PcOsn25712AkqStYByBvgT4r/3VLocBt1XV/bpbJElb1pRdLknOontWxt7p/peWt9A/1Kmq3kd399hRwEq6B/v85ZaqrCRpuCkDvX/oz6bmF/BXY6uRJGmzeKeoJDXCQJekRhjoktQIA12SGmGgS1IjDHRJaoSBLkmNMNAlqREGuiQ1wkCXpEYY6JLUCANdkhphoEtSIwx0SWqEgS5JjTDQJakRBrokNcJAl6RGGOiS1AgDXZIaYaBLUiMMdElqhIEuSY0w0CWpEQa6JDXCQJekRhjoktQIA12SGmGgS1IjDHRJaoSBLkmNMNAlqREGuiQ1wkCXpEYY6JLUiJECPcmiJNclWZnkpEnmPyzJhUkuT3JVkqPGX1VJ0qZMGehJ5gCnA0cCC4DjkiyYUOxNwDlV9XjgWOC9466oJGnTRtlDPwRYWVWrquou4GzgmAllCvitfngP4Mfjq6IkaRSjBPp8YM3A+Np+2qCTgRclWQssBV412YqSHJ9keZLl69at24zqSpKGGddJ0eOAxVW1L3AU8LEk91t3VZ1RVQurauG8efPGtGlJEowW6DcA+w2M79tPG/Qy4ByAqvoWsCuw9zgqKEkazSiBvgw4KMmBSXahO+m5ZEKZHwFPB0jy+3SBbp+KJG1FUwZ6VW0ATgAuAK6lu5plRZJTkhzdF3sN8IokVwJnAS+tqtpSlZYk3d/cUQpV1VK6k52D0948MHwNcPh4qyZJmg7vFJWkRhjoktQIA12SGmGgS1IjDHRJaoSBLkmNMNAlqREGuiQ1wkCXpEYY6JLUCANdkhphoEtSIwx0SWqEgS5JjTDQJakRBrokNcJAl6RGGOiS1AgDXZIaYaBLUiMMdElqhIEuSY0w0CWpEQa6JDXCQJekRhjoktQIA12SGmGgS1IjDHRJaoSBLkmNMNAlqREGuiQ1wkCXpEYY6JLUiJECPcmiJNclWZnkpCFlXpDkmiQrknxivNWUJE1l7lQFkswBTgeeCawFliVZUlXXDJQ5CHg9cHhVrU/ykC1VYUnS5EbZQz8EWFlVq6rqLuBs4JgJZV4BnF5V6wGq6qbxVlOSNJVRAn0+sGZgfG0/bdAjgUcmuTjJJUkWTbaiJMcnWZ5k+bp16zavxpKkSY3rpOhc4CDgCOA44ANJ9pxYqKrOqKqFVbVw3rx5Y9q0JAlGC/QbgP0Gxvftpw1aCyypql9X1fXAv9MFvCRpKxkl0JcBByU5MMkuwLHAkgllPku3d06Svem6YFaNsZ6SpClMGehVtQE4AbgAuBY4p6pWJDklydF9sQuAW5JcA1wIvLaqbtlSlZYk3V+qaptseOHChbV8+fJtsm1J2lElubSqFk42zztFJakRBrokNcJAl6RGGOiS1AgDXZIaYaBLUiMMdElqhIEuSY0w0CWpEQa6JDXCQJekRhjoktQIA12SGmGgS1IjDHRJaoSBLkmNMNAlqREGuiQ1wkCXpEYY6JLUCANdkhphoEtSIwx0SWqEgS5JjTDQJakRBrokNcJAl6RGGOiS1AgDXZIaYaBLUiMMdElqhIEuSY0w0CWpEQa6JDXCQJekRowU6EkWJbkuycokJ22i3H9JUkkWjq+KkqRRTBnoSeYApwNHAguA45IsmKTc7sCJwLfHXUlJ0tRG2UM/BFhZVauq6i7gbOCYScq9DTgN+NUY6ydJGtEogT4fWDMwvraf9htJngDsV1Wf39SKkhyfZHmS5evWrZt2ZSVJw834pGiSnYB3A6+ZqmxVnVFVC6tq4bx582a6aUnSgFEC/QZgv4HxfftpG+0OHAxclGQ1cBiwxBOjkrR1jRLoy4CDkhyYZBfgWGDJxplVdVtV7V1VB1TVAcAlwNFVtXyL1FiSNKkpA72qNgAnABcA1wLnVNWKJKckOXpLV1CSNJq5oxSqqqXA0gnT3jyk7BEzr5Ykabq8U1SSGmGgS1IjDHRJaoSBLkmNMNAlqREGuiQ1wkCXpEYY6JLUCANdkhphoEtSIwx0SWrESM9y2d4ccNIm/x+N5q0+9dnbugqStkPuoUtSIwx0SWqEgS5JjTDQJakRBrokNcJAl6RGGOiS1AgDXZIaYaBLUiMMdElqhIEuSY0w0CWpEQa6JDXCQJekRhjoktQIA12SGmGgS1IjDHRJaoSBLkmNMNAlqREGuiQ1wkCXpEaMFOhJFiW5LsnKJCdNMv/VSa5JclWSf0uy//irKknalCkDPckc4HTgSGABcFySBROKXQ4srKo/BM4F3jnuikqSNm2UPfRDgJVVtaqq7gLOBo4ZLFBVF1bVL/rRS4B9x1tNSdJURgn0+cCagfG1/bRhXgZ8YbIZSY5PsjzJ8nXr1o1eS0nSlMZ6UjTJi4CFwLsmm19VZ1TVwqpaOG/evHFuWpJmvbkjlLkB2G9gfN9+2n0keQbwRuCpVXXneKonSRrVKIG+DDgoyYF0QX4s8BeDBZI8Hng/sKiqbhp7LSU15YCTPr+tq7BNrT712VtkvVN2uVTVBuAE4ALgWuCcqlqR5JQkR/fF3gXsBnwqyRVJlmyR2kqShhplD52qWgosnTDtzQPDzxhzvSRJ0+SdopLUCANdkhphoEtSIwx0SWqEgS5JjTDQJakRBrokNcJAl6RGjHRjkaR7zfbb1mHL3bqumXEPXZIaYaBLUiMMdElqhIEuSY0w0CWpEQa6JDXCQJekRhjoktQIA12SGmGgS1IjDHRJaoSBLkmNMNAlqREGuiQ1wkCXpEYY6JLUCANdkhphoEtSIwx0SWqEgS5JjfA/iZ6FZvt/cux/cKxWuYcuSY0w0CWpEQa6JDXCQJekRhjoktSIkQI9yaIk1yVZmeSkSeY/IMkn+/nfTnLAuCsqSdq0KQM9yRzgdOBIYAFwXJIFE4q9DFhfVY8A/hE4bdwVlSRt2ih76IcAK6tqVVXdBZwNHDOhzDHAmf3wucDTk2R81ZQkTWWUG4vmA2sGxtcChw4rU1UbktwG7AXcPFgoyfHA8f3oHUmu25xKbwf2ZsJ725qy4x//2H4zZxvOzI7cfvsPm7FV7xStqjOAM7bmNreEJMurauG2rseOyvabOdtwZlptv1G6XG4A9hsY37efNmmZJHOBPYBbxlFBSdJoRgn0ZcBBSQ5MsgtwLLBkQpklwEv64ecDX62qGl81JUlTmbLLpe8TPwG4AJgDfLiqViQ5BVheVUuADwEfS7IS+Cld6Ldsh+822sZsv5mzDWemyfaLO9KS1AbvFJWkRhjoktSIWRPoST6c5KYk393WddneTdVWSfZLcmGSa5KsSHLi1q7j9myE9ts1yXeSXNm331u3dh23Z6P+VpPMSXJ5kvO3Vt22d7Mm0IHFwKKZrqS/LLN1i9l0W20AXlNVC4DDgL+a5HEQI2uwTRez6fa7E3haVT0WeBywKMlhm7ux/vEcLVnMaL/VE4FrZ7qxltpv1gR6VX2d7gqcoZL8Xf8Qsm8mOSvJ3/TTL0ryT0mWAycmeW7/ELLLk3wlye/25U5OcmaSbyT5YZI/TfLOJFcn+WKSnftyp/Z7t1cl+V9b+r1P11RtVVU/qarL+uHb6X5U8ycrOxvbdIT2q6q6ox/duX/d7+qEJDsleW+S7yX5cpKlSZ7fz1ud5LQklwF/luQVSZb1e/3nJXlQX25xkv+b5JIkq5Ic0e8BX5tkcV9mTl/uu327/s/xtsj0jPhb3Rd4NvDBTZSZfe1XVbPmBRwAfHfIvCcBVwC7ArsD3wf+pp93EfDegbK/zb1XCL0c+Id++GTgm3Q/0McCvwCO7Od9Bnge3SMRrhtYfs9t3S7TbatJyv0I+C3bdPT2o7sE+ArgDuC0IWWeDyyl2/H6T8B64Pn9vNXA6wbK7jUw/HbgVf3wYrrnL4XumUs/A/6gX+eldEcITwS+PLD8jtB+5/b1PgI43/brXrNmD30EhwP/WlW/qm6v83MT5n9yYHhf4IIkVwOvBR4zMO8LVfVr4Gq6H+0X++lX031JbwN+BXwoyZ/SBdQOKcluwHnAX1fVzyYpYpsOUVV3V9Xj6N73IUkOnqTYk4FPVdU9VfUfwIUT5g+238H9UczVwAu5b/t9rrqkuRq4saqurqp7gBV07bcKeHiS9yRZRBda260kzwFuqqpLpyg669pv1gZ6uhN7V/SvV46wyM8Hht8D/J+q+gPgv9HtgW50J0D/gf+6/yIA3APMraoNdE+wPBd4DveG03ZrsrbquzrOA/6lqj49rNwUZkWbbqpdqupWuqBZlOTQgXJHj7DqwfZbDJzQt99bmaT96NrrzoHpG9tvPd3Rz0XAK9lEN8a2MEn7HQ4cnWQ13d7z05J83Pbbyg/n2p5U1Rq6wyUAkjwJeH+Sv6drl+cw/G6yPbj3eTYvGVJmUv1e7YOqammSi+n+um/XJmmr0N0dfG1VvXsT5WxTJm2XeXR/mG5N8kDgmXTdLt+eUO4BwEuSnAnMo+te+MSQzewO/KT/Q/tC7v+8paGS7A3cVVXnpXsC6sen8/62tInt13s9QJIj6LrxXtRPn9XtN2sCPclZdB/o3knWAm+pqg9tnF9Vy5IsAa4CbqQ7vLptyOpOBj6VZD3wVeDAaVRld+Bfk+xK1y/36mm+lS1uqrai20N6MXB1kiv6aW+oqqWD65mtbTpC+z0UODPd1RU7AedU1WSX3p0HPB24hu7x1JcxvP3+Dvg2sK7/d/dpVHk+8JEkG4/YXz+NZcduhPYb1axrP2/9H5Bkt6q6oz/D/XXg+Oqv5tDmsU1nZqD99gK+Axze9wdrBLOt/WbNHvqIzkh3PfWuwJkGz1jYpjNzfpI9gV2At7UcRlvIrGo/99AlqRGz9ioXSWqNgS5JjTDQJakRBrokNcJAl6RG/H+DonqsxNINWwAAAABJRU5ErkJggg==\n",
811
+ "text/plain": [
812
+ "<Figure size 432x288 with 1 Axes>"
813
+ ]
814
+ },
815
+ "metadata": {},
816
+ "output_type": "display_data"
817
+ }
818
+ ],
819
+ "source": [
820
+ "from nltk.translate.bleu_score import corpus_bleu\n",
821
+ "import matplotlib.pyplot as plt\n",
822
+ "\n",
823
+ "plt.bar(x = bleu_dic.keys(), height = bleu_dic.values())\n",
824
+ "plt.title(\"BLEU Score with the testing set\")\n",
825
+ "plt.ylim((0,1))\n",
826
+ "plt.show()"
827
+ ]
828
+ },
829
+ {
830
+ "cell_type": "code",
831
+ "execution_count": null,
832
+ "metadata": {
833
+ "colab": {
834
+ "base_uri": "https://localhost:8080/"
835
+ },
836
+ "id": "ThXu38YNo6LQ",
837
+ "outputId": "540b3456-f792-4b1e-d605-c7d320bf5729"
838
+ },
839
+ "outputs": [
840
+ {
841
+ "name": "stdout",
842
+ "output_type": "stream",
843
+ "text": [
844
+ "Mounted at /content/drive/\n"
845
+ ]
846
+ }
847
+ ],
848
+ "source": [
849
+ "from google.colab import drive\n",
850
+ "\n",
851
+ "drive.mount(\"/content/drive/\")\n",
852
+ "\n",
853
+ "#files.download(\"/content/model4_final_reuse_2.h5\")\n"
854
+ ]
855
+ },
856
+ {
857
+ "cell_type": "code",
858
+ "execution_count": null,
859
+ "metadata": {
860
+ "colab": {
861
+ "base_uri": "https://localhost:8080/",
862
+ "height": 425
863
+ },
864
+ "id": "6WX179UgqihS",
865
+ "outputId": "7ba02150-d3d8-4bbc-8186-015ad7c1e92e"
866
+ },
867
+ "outputs": [
868
+ {
869
+ "ename": "ValueError",
870
+ "evalue": "ignored",
871
+ "output_type": "error",
872
+ "traceback": [
873
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
874
+ "\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)",
875
+ "\u001b[0;32m<ipython-input-15-38f909bf98ba>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 14\u001b[0m \u001b[0mx\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mlayers\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mDropout\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m0.5\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 15\u001b[0m \u001b[0mx\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mlayers\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mSimpleRNN\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlatent_dim\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mactivation\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'relu'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'rnn_decoder2'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 16\u001b[0;31m \u001b[0mx\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mlayers\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mSimpleRNN\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlatent_dim\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mactivation\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'relu'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'rnn_decoder3'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 17\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 18\u001b[0m \u001b[0mtarget_next_step\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mlayers\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTimeDistributed\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlayers\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mDense\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mvocab_size\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mactivation\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m\"softmax\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'output'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
876
+ "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/keras/layers/recurrent.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, inputs, initial_state, constants, **kwargs)\u001b[0m\n\u001b[1;32m 677\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 678\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0minitial_state\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mconstants\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 679\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0msuper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mRNN\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__call__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minputs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 680\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 681\u001b[0m \u001b[0;31m# If any of `initial_state` or `constants` are specified and are Keras\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
877
+ "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/keras/utils/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 65\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;31m# pylint: disable=broad-except\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 66\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_process_traceback_frames\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__traceback__\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 67\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mwith_traceback\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfiltered_tb\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 68\u001b[0m \u001b[0;32mfinally\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 69\u001b[0m \u001b[0;32mdel\u001b[0m \u001b[0mfiltered_tb\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
878
+ "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/keras/engine/input_spec.py\u001b[0m in \u001b[0;36massert_input_compatibility\u001b[0;34m(input_spec, inputs, layer_name)\u001b[0m\n\u001b[1;32m 212\u001b[0m \u001b[0mndim\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mshape\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrank\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 213\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mndim\u001b[0m \u001b[0;34m!=\u001b[0m \u001b[0mspec\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mndim\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 214\u001b[0;31m raise ValueError(f'Input {input_index} of layer \"{layer_name}\" '\n\u001b[0m\u001b[1;32m 215\u001b[0m \u001b[0;34m'is incompatible with the layer: '\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 216\u001b[0m \u001b[0;34mf'expected ndim={spec.ndim}, found ndim={ndim}. '\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
879
+ "\u001b[0;31mValueError\u001b[0m: Input 0 of layer \"rnn_decoder3\" is incompatible with the layer: expected ndim=3, found ndim=2. Full shape received: (None, 1024)"
880
+ ]
881
+ }
882
+ ],
883
+ "source": [
884
+ "source = keras.Input(shape=(None,), dtype=\"int64\", name=\"source\")\n",
885
+ "\n",
886
+ "# x = layers.Embedding(vocab_size, embed_dim, mask_zero=True)(source)\n",
887
+ "x = layers.Embedding(vocab_size, embed_dim, weights=[embedding_matrix], mask_zero=True,\n",
888
+ " name='embed_encoder', trainable=False)(source)\n",
889
+ "\n",
890
+ "encoded_source = layers.SimpleRNN(latent_dim, activation='relu', name='rnn_encoder4')(x)\n",
891
+ "\n",
892
+ "past_target = keras.Input(shape=(None,), dtype=\"int64\", name=\"target\")\n",
893
+ "x = layers.Embedding(vocab_size, embed_dim, mask_zero=True, name='embed_decoder')(past_target)\n",
894
+ "\n",
895
+ "decoder_gru = layers.SimpleRNN(latent_dim, return_sequences=True, activation='relu', name='rnn_decoder1')\n",
896
+ "x = decoder_gru(x, initial_state=encoded_source)\n",
897
+ "x = layers.Dropout(0.5)(x)\n",
898
+ "x = layers.SimpleRNN(latent_dim, activation='relu', name='rnn_decoder2')(x)\n",
899
+ "x = layers.SimpleRNN(latent_dim, return_sequences=True, activation='relu', name='rnn_decoder3')(x)\n",
900
+ "\n",
901
+ "target_next_step = layers.TimeDistributed(layers.Dense(vocab_size, activation=\"softmax\", name='output'))(x)\n",
902
+ "\n",
903
+ "seq2seq_rnn = keras.Model([source, past_target], target_next_step)\n",
904
+ "\n",
905
+ "seq2seq_rnn.compile(\n",
906
+ " optimizer=\"rmsprop\",\n",
907
+ " loss=\"sparse_categorical_crossentropy\",\n",
908
+ " metrics=[\"accuracy\"])"
909
+ ]
910
+ },
911
+ {
912
+ "cell_type": "code",
913
+ "execution_count": null,
914
+ "metadata": {
915
+ "colab": {
916
+ "base_uri": "https://localhost:8080/",
917
+ "height": 222
918
+ },
919
+ "id": "NvlZdKQ2qk6a",
920
+ "outputId": "c6dd1258-df31-4b5a-e79d-91e2d83f21af"
921
+ },
922
+ "outputs": [
923
+ {
924
+ "ename": "NameError",
925
+ "evalue": "ignored",
926
+ "output_type": "error",
927
+ "traceback": [
928
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
929
+ "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)",
930
+ "\u001b[0;32m<ipython-input-1-26deb6ad3f93>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 6\u001b[0m \u001b[0;31m# mode='min')\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 7\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 8\u001b[0;31m \u001b[0mseq2seq_rnn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfit\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtrain_ds\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mepochs\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m3\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mvalidation_data\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mval_ds\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcallbacks\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mcheckpoint\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 9\u001b[0m \u001b[0;31m# seq2seq_rnn.save('model4_final_ME_2.h5')\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
931
+ "\u001b[0;31mNameError\u001b[0m: name 'seq2seq_rnn' is not defined"
932
+ ]
933
+ }
934
+ ],
935
+ "source": [
936
+ "vocab_size = 30000\n",
937
+ "sequence_length = 20\n",
938
+ "\n",
939
+ "# checkpoint = ModelCheckpoint(filepath='model4_ME_2.h5',\n",
940
+ "# monitor='val_loss', verbose=1, save_best_only=True,\n",
941
+ "# mode='min')\n",
942
+ "\n",
943
+ "seq2seq_rnn.fit(train_ds, epochs=3, validation_data=val_ds, callbacks=[checkpoint])\n",
944
+ "# seq2seq_rnn.save('model4_final_ME_2.h5')\n"
945
+ ]
946
+ },
947
+ {
948
+ "cell_type": "markdown",
949
+ "metadata": {
950
+ "id": "BRc0Z_ELHpdD"
951
+ },
952
+ "source": [
953
+ "### **The Codes after this are from prior experiment.**"
954
+ ]
955
+ },
956
+ {
957
+ "cell_type": "code",
958
+ "execution_count": null,
959
+ "metadata": {
960
+ "id": "stJYZc1eh2Kw"
961
+ },
962
+ "outputs": [],
963
+ "source": [
964
+ "def tokenize(x):\n",
965
+ " \"\"\"\n",
966
+ " Tokenize x\n",
967
+ " :param x: List of sentences/strings to be tokenized\n",
968
+ " :return: Tuple of (tokenized x data, tokenizer used to tokenize x)\n",
969
+ " \"\"\"\n",
970
+ " # TODO: Implement\n",
971
+ " tokenizer = Tokenizer()\n",
972
+ " tokenizer.fit_on_texts(x)\n",
973
+ " return tokenizer.texts_to_sequences(x), tokenizer"
974
+ ]
975
+ },
976
+ {
977
+ "cell_type": "code",
978
+ "execution_count": null,
979
+ "metadata": {
980
+ "id": "CVZgVHkzh3tr"
981
+ },
982
+ "outputs": [],
983
+ "source": [
984
+ "def pad(x, length=None):\n",
985
+ " \"\"\"\n",
986
+ " Pad x\n",
987
+ " :param x: List of sequences.\n",
988
+ " :param length: Length to pad the sequence to. If None, use length of longest sequence in x.\n",
989
+ " :return: Padded numpy array of sequences\n",
990
+ " \"\"\"\n",
991
+ "# # TODO: Implement\n",
992
+ "# if length is None:\n",
993
+ "# length=max([len(sentence) for sentence in x])\n",
994
+ "# print(length)\n",
995
+ " \n",
996
+ " return pad_sequences(x, maxlen=20, padding ='post')"
997
+ ]
998
+ },
999
+ {
1000
+ "cell_type": "code",
1001
+ "execution_count": null,
1002
+ "metadata": {
1003
+ "id": "msE26JGmh5TA"
1004
+ },
1005
+ "outputs": [],
1006
+ "source": [
1007
+ "import collections\n",
1008
+ "\n",
1009
+ "english_words_counter = collections.Counter([word for sentence in english_sentences for word in sentence.split()])\n",
1010
+ "french_words_counter = collections.Counter([word for sentence in french_sentences for word in sentence.split()])\n",
1011
+ "\n",
1012
+ "print('{} English words.'.format(len([word for sentence in english_sentences for word in sentence.split()])))\n",
1013
+ "print('{} unique English words.'.format(len(english_words_counter)))\n",
1014
+ "print('10 Most common words in the English dataset:')\n",
1015
+ "print('\"' + '\" \"'.join(list(zip(*english_words_counter.most_common(10)))[0]) + '\"')\n",
1016
+ "print()\n",
1017
+ "print('{} French words.'.format(len([word for sentence in french_sentences for word in sentence.split()])))\n",
1018
+ "print('{} unique French words.'.format(len(french_words_counter)))\n",
1019
+ "print('10 Most common words in the French dataset:')\n",
1020
+ "print('\"' + '\" \"'.join(list(zip(*french_words_counter.most_common(10)))[0]) + '\"')"
1021
+ ]
1022
+ },
1023
+ {
1024
+ "cell_type": "code",
1025
+ "execution_count": null,
1026
+ "metadata": {
1027
+ "id": "GHOxz_1Fh7Ha"
1028
+ },
1029
+ "outputs": [],
1030
+ "source": [
1031
+ "for sample_i in range(5):\n",
1032
+ " print('English sample {}: {}'.format(sample_i + 1, english_sentences[sample_i+10000]))\n",
1033
+ " print('French sample {}: {}\\n'.format(sample_i + 1, french_sentences[sample_i+10000]))\n",
1034
+ " print('German sample {}: {}\\n'.format(sample_i + 1, german_sentences[sample_i+10000]))\n",
1035
+ " print('Italian sample {}: {}\\n'.format(sample_i + 1, italian_sentences[sample_i+10000]))\n"
1036
+ ]
1037
+ },
1038
+ {
1039
+ "cell_type": "code",
1040
+ "execution_count": null,
1041
+ "metadata": {
1042
+ "id": "ogGPGCf7h9Gw"
1043
+ },
1044
+ "outputs": [],
1045
+ "source": [
1046
+ "def preprocess(x, y1, y2, y3):\n",
1047
+ " \"\"\"\n",
1048
+ " Preprocess x and y\n",
1049
+ " :param x: Feature List of sentences\n",
1050
+ " :param y: Label List of sentences\n",
1051
+ " :return: Tuple of (Preprocessed x, Preprocessed y, x tokenizer, y tokenizer)\n",
1052
+ " \"\"\"\n",
1053
+ " preprocess_en, en_tk = tokenize(x)\n",
1054
+ " preprocess_fr, fr_tk = tokenize(y1)\n",
1055
+ " preprocess_de, de_tk = tokenize(y2)\n",
1056
+ " preprocess_it, it_tk = tokenize(y3)\n",
1057
+ " \n",
1058
+ " preprocess_en = pad(preprocess_en)\n",
1059
+ " preprocess_fr = pad(preprocess_fr)\n",
1060
+ " preprocess_de = pad(preprocess_de)\n",
1061
+ " preprocess_it = pad(preprocess_it)\n",
1062
+ "\n",
1063
+ " \n",
1064
+ " # Keras's sparse_categorical_crossentropy function requires the labels to be in 3 dimensions\n",
1065
+ " preprocess_fr = preprocess_fr.reshape(*preprocess_fr.shape, 1)\n",
1066
+ " preprocess_de = preprocess_de.reshape(*preprocess_de.shape, 1)\n",
1067
+ " preprocess_it = preprocess_it.reshape(*preprocess_it.shape, 1)\n",
1068
+ "\n",
1069
+ " return preprocess_en,preprocess_fr,preprocess_de,preprocess_it, en_tk, fr_tk, de_tk, it_tk\n",
1070
+ "\n",
1071
+ "inputTimestep = 30\n",
1072
+ "outputTimestep = 30\n",
1073
+ "\n",
1074
+ "preproc_english_sentences,preproc_french_sentences, preproc_german_sentences, preproc_italian_sentences, en_tokenizer,fr_tokenizer, de_tokenizer, it_tokenizer =\\\n",
1075
+ " preprocess(english_sentences, french_sentences,german_sentences,italian_sentences )\n",
1076
+ "\n",
1077
+ "\n",
1078
+ "max_english_sequence_length = preproc_english_sentences.shape[1]\n",
1079
+ "max_french_sequence_length = preproc_french_sentences.shape[1]\n",
1080
+ "max_german_sequence_length = preproc_german_sentences.shape[1]\n",
1081
+ "max_italian_sequence_length = preproc_italian_sentences.shape[1]\n",
1082
+ "\n",
1083
+ "english_vocab_size = len(en_tokenizer.word_index)\n",
1084
+ "french_vocab_size = len(fr_tokenizer.word_index)\n",
1085
+ "german_vocab_size = len(de_tokenizer.word_index)\n",
1086
+ "italian_vocab_size = len(it_tokenizer.word_index)\n",
1087
+ "\n",
1088
+ "print('Data Preprocessed')\n",
1089
+ "\n",
1090
+ "print(\"Max English sentence length:\", max_english_sequence_length)\n",
1091
+ "print(\"Max French sentence length:\", max_french_sequence_length)\n",
1092
+ "print(\"Max German sentence length:\", max_german_sequence_length)\n",
1093
+ "print(\"Max Italian sentence length:\", max_italian_sequence_length)\n",
1094
+ "\n",
1095
+ "print(\"English vocabulary size:\", english_vocab_size)\n",
1096
+ "print(\"French vocabulary size:\", french_vocab_size)\n",
1097
+ "print(\"German vocabulary size:\", german_vocab_size)\n",
1098
+ "print(\"Italian vocabulary size:\", italian_vocab_size)"
1099
+ ]
1100
+ },
1101
+ {
1102
+ "cell_type": "code",
1103
+ "execution_count": null,
1104
+ "metadata": {
1105
+ "id": "zdLpnbLHiEk3"
1106
+ },
1107
+ "outputs": [],
1108
+ "source": [
1109
+ "from keras.layers import GRU, Input, Dense, TimeDistributed, Activation, RepeatVector, Bidirectional, Dropout, LSTM\n",
1110
+ "from keras.losses import sparse_categorical_crossentropy\n",
1111
+ "from keras.models import Sequential\n",
1112
+ "from keras.layers import Dense, Activation, TimeDistributed, RepeatVector, Flatten, Conv2D, Embedding\n",
1113
+ "from keras.layers.recurrent import SimpleRNN, LSTM\n",
1114
+ "from keras.utils import np_utils\n",
1115
+ "from tensorflow.keras.models import Model\n",
1116
+ "import keras\n",
1117
+ "\n",
1118
+ "\n",
1119
+ "def many_many_tangled(input_shape, fr_output_sequence_length, english_vocab_size, french_vocab_size):\n",
1120
+ "\n",
1121
+ "\n",
1122
+ " # Hyperparameters\n",
1123
+ " opt = tf.keras.optimizers.Adam(learning_rate=1e-3)\n",
1124
+ " \n",
1125
+ " # Build the layers \n",
1126
+ " model = Sequential()\n",
1127
+ " # Embedding\n",
1128
+ " model.add(Embedding(english_vocab_size, 256, input_length=input_shape[1],\n",
1129
+ " input_shape=input_shape[1:]))\n",
1130
+ " # Encoder\n",
1131
+ " model.add(SimpleRNN(256))\n",
1132
+ " model.add(RepeatVector(fr_output_sequence_length))\n",
1133
+ " # Decoder\n",
1134
+ " model.add(SimpleRNN(256, return_sequences=True))\n",
1135
+ " model.add(TimeDistributed(Dense(512, activation='relu')))\n",
1136
+ " model.add(Dropout(0.5))\n",
1137
+ " model.add(TimeDistributed(Dense((french_vocab_size), activation='softmax')))\n",
1138
+ " model.compile(loss=sparse_categorical_crossentropy,\n",
1139
+ " optimizer=opt,\n",
1140
+ " metrics=['accuracy'])\n",
1141
+ " \n",
1142
+ " print(model.summary())\n",
1143
+ "\n",
1144
+ " return model\n",
1145
+ "\n",
1146
+ "def many_many_functional(input_shape, output_sequence_length, english_vocab_size, french_vocab_size, german_vocab_size,italian_vocab_size):\n",
1147
+ " \n",
1148
+ " #input\n",
1149
+ " eng_input = Input(shape=(None,), dtype=\"int64\", name=\"english\")\n",
1150
+ "\n",
1151
+ " #embedding\n",
1152
+ " embedding_layer = Embedding(english_vocab_size, 256)(eng_input)\n",
1153
+ "\n",
1154
+ " rnn_layer_1 = SimpleRNN(256)(embedding_layer)\n",
1155
+ "\n",
1156
+ " fr_input = Input(shape=(None,), dtype=\"int64\", name=\"spanish\")\n",
1157
+ "\n",
1158
+ " embedding_layer = Embedding(french_vocab_size, 256)(fr_input)\n",
1159
+ "\n",
1160
+ " #Encoder for two langauges\n",
1161
+ " rnn_layer_1 = SimpleRNN(256)(embedding_layer)\n",
1162
+ " repeat_vector = RepeatVector(output_sequence_length)(rnn_layer_1)\n",
1163
+ "\n",
1164
+ " #Common decoder for all languages\n",
1165
+ " rnn_layer2 = SimpleRNN(256, return_sequences=True)(repeat_vector)\n",
1166
+ " time_distributed_1 = Dense(1024, activation='relu')(rnn_layer2)\n",
1167
+ " dropout_1 = Dropout(0.5)(time_distributed_1)\n",
1168
+ " \n",
1169
+ " output_fr = Dense(french_vocab_size, activation='softmax')(dropout_1)\n",
1170
+ " #output_de = TimeDistributed(Dense(german_vocab_size, activation='softmax'))(dropout_1)\n",
1171
+ " #output_it = TimeDistributed(Dense(italian_vocab_size, activation='softmax'))(dropout_1)\n",
1172
+ " \n",
1173
+ " #Create model\n",
1174
+ " #model = Model(inputs=eng_input, outputs=[output_fr,output_de,output_it])\n",
1175
+ " model = Model(inputs=[eng_input, fr_input], outputs=output_fr)\n",
1176
+ "\n",
1177
+ "\n",
1178
+ " model.compile(loss=sparse_categorical_crossentropy, optimizer='adam',metrics=['accuracy'])\n",
1179
+ "\n",
1180
+ " print(model.summary())\n",
1181
+ " \n",
1182
+ " return model\n",
1183
+ "\n",
1184
+ "def embed_model(output_sequence_length, english_vocab_size, french_vocab_size):\n",
1185
+ "\n",
1186
+ " # Hyperparameters\n",
1187
+ " opt = tf.keras.optimizers.Adam(learning_rate=1e-3)\n",
1188
+ " # Build the layers \n",
1189
+ " model = Sequential()\n",
1190
+ " # Embedding\n",
1191
+ " model.add(Embedding(english_vocab_size, 256))\n",
1192
+ " # Encoder\n",
1193
+ " model.add(SimpleRNN(256))\n",
1194
+ " model.add(RepeatVector(output_sequence_length))\n",
1195
+ " # Decoder\n",
1196
+ " model.add(SimpleRNN(256, return_sequences=True))\n",
1197
+ " model.add(TimeDistributed(Dense(516, activation='relu')))\n",
1198
+ " model.add(Dropout(0.5))\n",
1199
+ " model.add(TimeDistributed(Dense(516, activation='relu')))\n",
1200
+ " model.add(Dropout(0.5))\n",
1201
+ " model.add(TimeDistributed(Dense(french_vocab_size, activation='softmax')))\n",
1202
+ " model.compile(loss=sparse_categorical_crossentropy,\n",
1203
+ " optimizer=opt,\n",
1204
+ " metrics=['accuracy'])\n",
1205
+ " return model\n",
1206
+ "\n"
1207
+ ]
1208
+ },
1209
+ {
1210
+ "cell_type": "code",
1211
+ "execution_count": null,
1212
+ "metadata": {
1213
+ "id": "VdDy5uL1t8Sy"
1214
+ },
1215
+ "outputs": [],
1216
+ "source": []
1217
+ },
1218
+ {
1219
+ "cell_type": "code",
1220
+ "execution_count": null,
1221
+ "metadata": {
1222
+ "id": "CeQ4Z6j1spTP"
1223
+ },
1224
+ "outputs": [],
1225
+ "source": []
1226
+ },
1227
+ {
1228
+ "cell_type": "code",
1229
+ "execution_count": null,
1230
+ "metadata": {
1231
+ "id": "Qr25F675iIyX"
1232
+ },
1233
+ "outputs": [],
1234
+ "source": [
1235
+ "# tmp_x = pad(preproc_english_sentences, preproc_french_sentences.shape[1])\n",
1236
+ "# tmp_x = tmp_x.reshape((-1, preproc_french_sentences.shape[-2]))\n",
1237
+ "\n",
1238
+ "reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,\n",
1239
+ " patience=5, min_lr=0.001)\n",
1240
+ "\n",
1241
+ "callback = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5)\n",
1242
+ "# TODO: Train the neural network\n",
1243
+ "\n",
1244
+ "many_many = many_many_functional(\n",
1245
+ " None,\n",
1246
+ " 20,\n",
1247
+ " en_vocab_size+1,\n",
1248
+ " fr_vocab_size+1,\n",
1249
+ " None, None)\n",
1250
+ "\n",
1251
+ "many_many.summary()\n",
1252
+ "\n",
1253
+ "many_many.fit(train_ds, validation_data=val_ds, batch_size=64, epochs=30, callbacks=[callback, reduce_lr])"
1254
+ ]
1255
+ },
1256
+ {
1257
+ "cell_type": "code",
1258
+ "execution_count": null,
1259
+ "metadata": {
1260
+ "id": "Zt2YCfXosq05"
1261
+ },
1262
+ "outputs": [],
1263
+ "source": [
1264
+ "def logits_to_text(logits, tokenizer):\n",
1265
+ " \"\"\"\n",
1266
+ " Turn logits from a neural network into text using the tokenizer\n",
1267
+ " :param logits: Logits from a neural network\n",
1268
+ " :param tokenizer: Keras Tokenizer fit on the labels\n",
1269
+ " :return: String that represents the text of the logits\n",
1270
+ " \"\"\"\n",
1271
+ " index_to_words = {id: word for word, id in tokenizer.word_index.items()}\n",
1272
+ " index_to_words[0] = '<PAD>'\n",
1273
+ "\n",
1274
+ " return ' '.join([index_to_words[prediction] for prediction in np.argmax(logits, 1)])\n",
1275
+ "\n",
1276
+ "print('`logits_to_text` function loaded.')"
1277
+ ]
1278
+ },
1279
+ {
1280
+ "cell_type": "code",
1281
+ "execution_count": null,
1282
+ "metadata": {
1283
+ "id": "AS88MAqtstIC"
1284
+ },
1285
+ "outputs": [],
1286
+ "source": [
1287
+ "# Print prediction(s)\n",
1288
+ "print(\"Prediction:\")\n",
1289
+ "\n",
1290
+ "print(logits_to_text(many_many.predict(tmp_x[6:7])[0], fr_tokenizer))\n",
1291
+ "\n",
1292
+ "print(\"\\nCorrect Translation French:\")\n",
1293
+ "print(french_sentences[6:7])\n",
1294
+ "\n",
1295
+ "print(\"\\nOriginal text:\")\n",
1296
+ "print()"
1297
+ ]
1298
+ }
1299
+ ],
1300
+ "metadata": {
1301
+ "accelerator": "GPU",
1302
+ "colab": {
1303
+ "machine_shape": "hm",
1304
+ "provenance": []
1305
+ },
1306
+ "gpuClass": "standard",
1307
+ "kernelspec": {
1308
+ "display_name": "Python 3",
1309
+ "name": "python3"
1310
+ },
1311
+ "language_info": {
1312
+ "name": "python"
1313
+ }
1314
+ },
1315
+ "nbformat": 4,
1316
+ "nbformat_minor": 0
1317
+ }
language_models/lstm/training script/.DS_Store ADDED
Binary file (6.15 kB). View file
 
language_models/vanilla_rnn/motivating example models/.DS_Store ADDED
Binary file (6.15 kB). View file
 
language_models/vanilla_rnn/motivating example models/problem1/original_problem1.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33bff93b68beb81d69eaa91f563eead037f4279934e9c7a76a9f6d5aceb1a652
3
+ size 438911736
language_models/vanilla_rnn/motivating example models/problem1/solution1_problem1.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9389ca9eaeb58d3faf80fd6e263e7564bdda7fbc0e0ee29a2584b57e4d606fef
3
+ size 438911712
language_models/vanilla_rnn/motivating example models/problem1/solution2_problem1.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2e7ae72ab4ca1d09ee686d252ee560c89973a8e1fd4f6a5c6781f98441db458
3
+ size 438911712
language_models/vanilla_rnn/motivating example models/problem2/original_problem2.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4938d3a6be16e2dae5c2d9aa600ffbcb8c503a3a7bf7bce800b3bc91d0719864
3
+ size 438911736
language_models/vanilla_rnn/motivating example models/problem2/solution1_problem2.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e11189c864a9b76e8d1574de90d6651ce3b16513b89eb607a2bcdbc2703199c6
3
+ size 438911712
language_models/vanilla_rnn/motivating example models/problem2/solution2_problem2.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c539a9274240f3fa281bfb5f7b1ce68d9c08096e9b2f5dc712b2e62696df5fd
3
+ size 438911712
language_models/vanilla_rnn/reuse models/model4_de_fr.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ee9e1beb8bb17f529b0fb1b0d5a78bcfc3974c0c9e1456f7a0dffabb46646fa
3
+ size 438911712
language_models/vanilla_rnn/reuse models/model4_de_it.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72f7d12ea76cb2dafb7c2a62489f958007e663ded5eae27acf7dc9469f004025
3
+ size 438911712
language_models/vanilla_rnn/reuse models/model4_fr_it.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e03ed8172d5f376e59583e97a82fd77d8bd27825e5091eac6e8d9e1987c89af
3
+ size 438911712
language_models/vanilla_rnn/rq1 models/model1.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8637147ae3371c50cc1488264a567360d04c708a8d67ec0a56b2efe7d740c54e
3
+ size 338125960
language_models/vanilla_rnn/rq1 models/model2.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fff06c0f77191431baca5ef487f4fd0b331b3c53e02cbe0e9a83e20cb3e0b348
3
+ size 371721312