Skip to content

Commit 5ca3270

Browse files
committed
automatic upgrade to tf2 version
1 parent 2324c3b commit 5ca3270

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

94 files changed

+2301
-2312
lines changed

.gitignore

+1
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
.vscode/settings.json

beginner/conv2d.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
kernel = tf.reshape(k, [3, 3, 1, 1], name='kernel')
1515
image = tf.reshape(i, [1, 4, 4, 1], name='image')
1616

17-
res = tf.squeeze(tf.nn.conv2d(image, kernel, [1, 1, 1, 1], "VALID"))
17+
res = tf.squeeze(tf.nn.conv2d(input=image, filters=kernel, strides=[1, 1, 1, 1], padding="VALID"))
1818
# VALID means no padding
19-
with tf.Session() as sess:
19+
with tf.compat.v1.Session() as sess:
2020
print(sess.run(res))

beginner/custom.py

+9-9
Original file line numberDiff line numberDiff line change
@@ -2,15 +2,15 @@
22
import tensorflow as tf
33

44
def model_fn(features, labels, mode):
5-
W = tf.get_variable("W",[1],tf.float64)
6-
b = tf.get_variable("b",[1],tf.float64)
5+
W = tf.compat.v1.get_variable("W",[1],tf.float64)
6+
b = tf.compat.v1.get_variable("b",[1],tf.float64)
77
y = W*features["x"] + b
8-
loss = tf.reduce_sum(tf.square(y-labels))
9-
global_step = tf.train.get_global_step()
10-
optimizer = tf.train.GradientDescentOptimizer(.01)
8+
loss = tf.reduce_sum(input_tensor=tf.square(y-labels))
9+
global_step = tf.compat.v1.train.get_global_step()
10+
optimizer = tf.compat.v1.train.GradientDescentOptimizer(.01)
1111
train = tf.group(
1212
optimizer.minimize(loss),
13-
tf.assign_add(global_step, 1)
13+
tf.compat.v1.assign_add(global_step, 1)
1414
)
1515
return tf.estimator.EstimatorSpec(
1616
mode=mode,
@@ -24,11 +24,11 @@ def model_fn(features, labels, mode):
2424
x_eval = np.array([2., 5., 8., 1.])
2525
y_eval = np.array([-1.01, -4.1, -7., 0.])
2626

27-
input_fn = tf.estimator.inputs.numpy_input_fn(
27+
input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
2828
{"x": x_train}, y_train, batch_size=4, num_epochs=None, shuffle=True)
29-
train_input_fn = tf.estimator.inputs.numpy_input_fn(
29+
train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
3030
{"x": x_train}, y_train, batch_size=4, num_epochs=1000, shuffle=False)
31-
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
31+
eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
3232
{"x": x_eval}, y_eval, batch_size=4, num_epochs=1000, shuffle=False)
3333

3434
estimator = tf.estimator.Estimator(model_fn=model_fn)

beginner/dropout.py

-15
Original file line numberDiff line numberDiff line change
@@ -1,15 +0,0 @@
1-
import tensorflow as tf
2-
3-
i = tf.constant([
4-
[-1, 3, 1, -0.0001],
5-
[2, 1, 0, 1],
6-
[1, 2, -4, 1],
7-
[-9, 1, 0, 2]
8-
], dtype=tf.float32, name='i')
9-
10-
# b = tf.constant([ 5, 4, 3, 2], dtype=tf.float32)
11-
12-
res = tf.nn.dropout(i, 0.8)
13-
14-
with tf.Session() as sess:
15-
print sess.run(res)

beginner/estimator.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -3,16 +3,16 @@
33

44
feature_columns = [tf.feature_column.numeric_column('x', shape=[1])]
55

6-
estimator = tf.estimator.LinearRegressor(feature_columns)
6+
estimator = tf.estimator.LinearRegressor(feature_columns=feature_columns, loss_reduction=tf.keras.losses.Reduction.SUM)
77

88
train_x = np.array([1., 2., 3., 4.])
99
train_y = np.array([0., -1., -2., -3.])
1010
eval_x = np.array([2.,5.,8.,1.])
1111
eval_y = np.array([-1.01, -4.1, -7, 0.])
1212

13-
input_fn = tf.estimator.inputs.numpy_input_fn({'x':train_x}, train_y, batch_size=4, num_epochs=None, shuffle=True)
14-
train_input_fn = tf.estimator.inputs.numpy_input_fn({'x':train_x}, train_y, batch_size=4, num_epochs=1000, shuffle=False)
15-
eval_input_fn = tf.estimator.inputs.numpy_input_fn({'x':eval_x}, eval_y, batch_size=4, num_epochs=1000, shuffle=False)
13+
input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn({'x':train_x}, train_y, batch_size=4, num_epochs=None, shuffle=True)
14+
train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn({'x':train_x}, train_y, batch_size=4, num_epochs=1000, shuffle=False)
15+
eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn({'x':eval_x}, eval_y, batch_size=4, num_epochs=1000, shuffle=False)
1616

1717
estimator.train(input_fn, steps=1000)
1818

beginner/getstart.py

+7-7
Original file line numberDiff line numberDiff line change
@@ -2,20 +2,20 @@
22

33
W = tf.Variable([.3], dtype=tf.float32)
44
b = tf.Variable([-.3], dtype=tf.float32)
5-
x = tf.placeholder(tf.float32)
6-
y = tf.placeholder(tf.float32)
5+
x = tf.compat.v1.placeholder(tf.float32)
6+
y = tf.compat.v1.placeholder(tf.float32)
77
line_model = W*x+b
88

9-
loss = tf.reduce_sum(tf.square(line_model-y))
9+
loss = tf.reduce_sum(input_tensor=tf.square(line_model-y))
1010

11-
optimizer = tf.train.GradientDescentOptimizer(.01)
11+
optimizer = tf.compat.v1.train.GradientDescentOptimizer(.01)
1212
train = optimizer.minimize(loss)
1313

1414
x_train = [1, 2, 3, 4]
1515
y_train = [0, -1, -2, -3]
1616

17-
sess = tf.Session()
18-
init = tf.global_variables_initializer()
17+
sess = tf.compat.v1.Session()
18+
init = tf.compat.v1.global_variables_initializer()
1919
sess.run(init)
2020

2121
for i in range(1000):
@@ -24,6 +24,6 @@
2424
curr_W, curr_b, curr_loss = sess.run([W,b,loss],{x:x_train, y:y_train})
2525
print("W: %s b: %s loss: %s"%(curr_W,curr_b,curr_loss))
2626

27-
fw = tf.summary.FileWriter('logdir', graph=sess.graph)
27+
fw = tf.compat.v1.summary.FileWriter('logdir', graph=sess.graph)
2828
fw.flush()
2929
fw.close()

beginner/precision_recall.py

+15-15
Original file line numberDiff line numberDiff line change
@@ -40,44 +40,44 @@
4040

4141

4242
def model(input):
43-
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
43+
update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS)
4444
print("update_ops:{}".format(update_ops))
4545
with tf.control_dependencies(update_ops):
46-
return tf.one_hot(tf.argmax(input, 1), 4, axis=-1)
46+
return tf.one_hot(tf.argmax(input=input, axis=1), 4, axis=-1)
4747

4848

49-
with tf.Session() as sess:
50-
input = tf.placeholder(tf.float32, [None, 4])
49+
with tf.compat.v1.Session() as sess:
50+
input = tf.compat.v1.placeholder(tf.float32, [None, 4])
5151
# print(sess.run(tf.cast(t1, tf.bool)))
5252
# print(sess.run(tf.argmax(t2, 1)))
53-
onehot = tf.one_hot(tf.argmax(t2, 1), 4, axis=-1)
53+
onehot = tf.one_hot(tf.argmax(input=t2, axis=1), 4, axis=-1)
5454
print(sess.run(onehot))
5555
print(sess.run(tf.cast(onehot, tf.bool)))
5656
# tf.one_hot(tf.argmax(self.prediction, 1), size, axis = -1),
5757
# print([3, 3, 3]+t1+t3)
5858

59-
r1, _ = tf.metrics.recall(
59+
r1, _ = tf.compat.v1.metrics.recall(
6060
labels=labels,
6161
predictions=model(input),
6262
weights=mask1,
63-
updates_collections=tf.GraphKeys.UPDATE_OPS)
64-
p1, _ = tf.metrics.precision(
63+
updates_collections=tf.compat.v1.GraphKeys.UPDATE_OPS)
64+
p1, _ = tf.compat.v1.metrics.precision(
6565
labels=labels,
6666
predictions=model(input),
6767
weights=mask1,
68-
updates_collections=tf.GraphKeys.UPDATE_OPS)
69-
r2, _ = tf.metrics.recall(
68+
updates_collections=tf.compat.v1.GraphKeys.UPDATE_OPS)
69+
r2, _ = tf.compat.v1.metrics.recall(
7070
labels=labels,
7171
predictions=model(input),
7272
weights=mask2,
73-
updates_collections=tf.GraphKeys.UPDATE_OPS)
74-
p2, _ = tf.metrics.precision(
73+
updates_collections=tf.compat.v1.GraphKeys.UPDATE_OPS)
74+
p2, _ = tf.compat.v1.metrics.precision(
7575
labels=labels,
7676
predictions=model(input),
7777
weights=mask2,
78-
updates_collections=tf.GraphKeys.UPDATE_OPS)
79-
sess.run(tf.global_variables_initializer())
80-
sess.run(tf.local_variables_initializer())
78+
updates_collections=tf.compat.v1.GraphKeys.UPDATE_OPS)
79+
sess.run(tf.compat.v1.global_variables_initializer())
80+
sess.run(tf.compat.v1.local_variables_initializer())
8181
# sess.run([r_op, p_op])
8282

8383
sess.run(model(input), feed_dict={input: logits})

beginner/reduce_mean.py

-13
Original file line numberDiff line numberDiff line change
@@ -1,13 +0,0 @@
1-
import tensorflow as tf
2-
3-
i = tf.constant([
4-
[1, 0, 1, 0],
5-
[0, 1, 0, 1],
6-
[1, 0, 0, 1],
7-
[0, 1, 1, 0]
8-
], dtype=tf.float32, name='i')
9-
10-
res = tf.reduce_mean(i)
11-
12-
with tf.Session() as sess:
13-
print sess.run(res)

beginner/relu.py

-15
Original file line numberDiff line numberDiff line change
@@ -1,15 +0,0 @@
1-
import tensorflow as tf
2-
3-
i = tf.constant([
4-
[-1, 3, 1, -0.0001],
5-
[2, 1, 0, 1],
6-
[1, 2, -4, 1],
7-
[-9, 1, 0, 2]
8-
], dtype=tf.float32, name='i')
9-
10-
b = tf.constant([ 5, 4, 3, 2], dtype=tf.float32)
11-
12-
res = i + b
13-
14-
with tf.Session() as sess:
15-
print sess.run(res)

beginner/test.py

+5-5
Original file line numberDiff line numberDiff line change
@@ -20,8 +20,8 @@
2020

2121

2222
def length(sequence):
23-
used = tf.sign(tf.reduce_max(tf.abs(sequence), 2))
24-
length = tf.reduce_sum(used, 1)
23+
used = tf.sign(tf.reduce_max(input_tensor=tf.abs(sequence), axis=2))
24+
length = tf.reduce_sum(input_tensor=used, axis=1)
2525
length = tf.cast(length, tf.int32)
2626
return length
2727

@@ -42,7 +42,7 @@ def f(p1, p2, p3, p4, ph):
4242
return "{}+{}".format(p1, p2), p2
4343

4444

45-
with tf.Session() as sess:
45+
with tf.compat.v1.Session() as sess:
4646
# l = sess.run(le)
4747
# print(l)
4848
# print(ts)
@@ -52,11 +52,11 @@ def f(p1, p2, p3, p4, ph):
5252
print(st.get_shape())
5353
print(st.eval())
5454
print(t.get_shape())
55-
px1 = tf.placeholder(tf.string, [None, 3, 1])
55+
px1 = tf.compat.v1.placeholder(tf.string, [None, 3, 1])
5656
x1 = np.array([[['a'], ['b'], ['c']],
5757
[['1'], ['2'], ['3']]])
5858
x2 = np.array([5, 6])
59-
ph = tf.placeholder(tf.float32)
59+
ph = tf.compat.v1.placeholder(tf.float32)
6060

6161
# input = tf.Variable([[1.0, 2.0], [3.0, 4.0]])
6262
elems = (np.array([1, 2, 3]), np.array([-1, 1, -1]))

beginner/tf2.py

+22
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,22 @@
1+
from __future__ import absolute_import, division, print_function, unicode_literals
2+
import tensorflow as tf
3+
4+
import matplotlib as mpl
5+
import matplotlib.pyplot as plt
6+
import numpy as np
7+
import os
8+
import pandas as pd
9+
10+
## proxy setting
11+
import os
12+
os.environ['HTTP_PROXY'] = 'http://localhost:1087'
13+
os.environ['HTTPS_PROXY'] = 'http://localhost:1087'
14+
15+
mpl.rcParams['figure.figsize'] = (8, 6)
16+
mpl.rcParams['axes.grid'] = False
17+
18+
zip_path = tf.keras.utils.get_file(
19+
origin='https://storage.googleapis.com/tensorflow/tf-keras-datasets/jena_climate_2009_2016.csv.zip',
20+
fname='jena_climate_2009_2016.csv.zip',
21+
extract=True)
22+
csv_path, _ = os.path.splitext(zip_path)

0 commit comments

Comments
 (0)