-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtensorflow_onboarding_assignment.py
116 lines (94 loc) · 3.49 KB
/
tensorflow_onboarding_assignment.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
# -*- coding: utf-8 -*-
"""Tensorflow Onboarding Assignment
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1GdukSQWikhlaW7FA26FGdPj8pJKWJuhI
# Tensorflow Onboarding Assignment
https://www.tensorflow.org/tutorials/quickstart/beginner
Actuall assignment begins below here:
https://developers.google.com/machine-learning/glossary#logits
"""
import tensorflow
# Load MNIST dataset
mnist = tensorflow.keras.datasets.mnist
(xTrain, yTrain), (xTest, yTest) = mnist.load_data()
xTrain, xTest = xTrain / 255.0, xTest / 255.0
# Build keras sequential model
model = tensorflow.keras.models.Sequential([
tensorflow.keras.layers.Flatten(input_shape=(28,28)),
tensorflow.keras.layers.Dense(128, activation='relu'),
tensorflow.keras.layers.Dropout(0.2),
tensorflow.keras.layers.Dense(10)
])
# Logits or log-odds
predictions = model(xTrain[:1]).numpy()
predictions
"""Output:
```
array([[-0.03457962, -0.9436625 , -0.22148427, -0.19330877, -0.02459322,
-0.46619663, 0.14027654, -0.35726148, 0.12779313, -0.6115787 ]],
dtype=float32)
```
"""
tensorflow.nn.softmax(predictions).numpy()
"""output:
```
array([[0.11907175, 0.04797325, 0.09877274, 0.10159529, 0.12026681,
0.07733212, 0.14182338, 0.08623227, 0.14006394, 0.06686845]],
dtype=float32)
```
"""
lossFn = tensorflow.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
lossFn(yTrain[:1], predictions).numpy()
"""Untrained model, probability should be close to random. Expected to be close to `2.3`
output:
```
2.559646
```
"""
model.compile(optimizer='adam', loss=lossFn, metrics=['accuracy'])
model.fit(xTrain, yTrain, epochs=5)
"""output:
```
Epoch 1/5
1875/1875 [==============================] - 4s 2ms/step - loss: 0.2971 - accuracy: 0.9129
Epoch 2/5
1875/1875 [==============================] - 4s 2ms/step - loss: 0.1431 - accuracy: 0.9566
Epoch 3/5
1875/1875 [==============================] - 3s 2ms/step - loss: 0.1072 - accuracy: 0.9674
Epoch 4/5
1875/1875 [==============================] - 3s 2ms/step - loss: 0.0864 - accuracy: 0.9735
Epoch 5/5
1875/1875 [==============================] - 4s 2ms/step - loss: 0.0751 - accuracy: 0.9766
<tensorflow.python.keras.callbacks.History at 0x7f76b841c450>
```
"""
model.evaluate(xTest, yTest, verbose=2)
"""Output:
```
313/313 - 0s - loss: 0.0712 - accuracy: 0.9792
[0.07123260200023651, 0.979200005531311]
```
"""
probabilityModel = tensorflow.keras.Sequential([model, tensorflow.keras.layers.Softmax()])
probabilityModel(xTest[:5])
"""Output:
```
<tf.Tensor: shape=(5, 10), dtype=float32, numpy=
array([[4.03490041e-08, 1.65046803e-08, 3.27245516e-06, 1.94969489e-05,
1.55670633e-11, 1.35958282e-06, 3.94158144e-12, 9.99971867e-01,
9.01632063e-07, 2.86395493e-06],
[2.75707112e-06, 8.71449884e-05, 9.99384403e-01, 5.15065098e-04,
5.02042798e-13, 1.27760063e-06, 2.45117224e-08, 6.04167934e-14,
9.25813947e-06, 2.58006572e-12],
[1.85128748e-08, 9.98399675e-01, 1.10141766e-04, 9.78490880e-06,
2.07783414e-05, 1.32908981e-05, 1.77052589e-05, 1.28791248e-03,
1.40457472e-04, 1.66311224e-07],
[9.97735977e-01, 2.34678879e-07, 1.18870237e-04, 1.90825813e-06,
3.32575428e-06, 3.43895335e-05, 2.07656994e-03, 1.57543636e-05,
2.44256648e-06, 1.04833152e-05],
[1.10252779e-06, 5.55624791e-09, 3.76566550e-06, 7.71379234e-07,
9.93223310e-01, 3.26787983e-07, 1.08721606e-05, 1.28094558e-04,
1.15749217e-05, 6.62025064e-03]], dtype=float32)>
```
"""