常用函数¶
In [1]:
import os
import tensorflow as tf
a = tf.constant([1, 2, 3], dtype=tf.float64)
print(a)
# 强制tensor转换为该数据类型
a = tf.cast(a, dtype=tf.int32)
print(a)
print( tf.reduce_min(a) ) # 计算最小值
print( tf.reduce_max(a) ) # 计算最大值
print( tf.reduce_mean(a) ) # 计算均值
2024-03-29 22:03:02.441627: I tensorflow/core/util/port.cc:110] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`. 2024-03-29 22:03:02.444258: I tensorflow/tsl/cuda/cudart_stub.cc:28] Could not find cuda drivers on your machine, GPU will not be used. 2024-03-29 22:03:02.508584: I tensorflow/tsl/cuda/cudart_stub.cc:28] Could not find cuda drivers on your machine, GPU will not be used. 2024-03-29 22:03:02.509744: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. To enable the following instructions: AVX2 AVX512F AVX512_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.
2024-03-29 22:03:03.437645: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT
tf.Tensor([1. 2. 3.], shape=(3,), dtype=float64) tf.Tensor([1 2 3], shape=(3,), dtype=int32) tf.Tensor(1, shape=(), dtype=int32) tf.Tensor(3, shape=(), dtype=int32) tf.Tensor(2, shape=(), dtype=int32)
In [2]:
# 将变量标记为可训练
# tf.Variable(初始值) --> 将变量标记为可训练的;被标记的变量会在反向传播中记录梯度信息;
# 神经网络训练中,常用该函数标记待训练参数;
w = tf.Variable(tf.random.normal([2, 2]), mean=0, stddev=1)
print(w)
<tf.Variable 'Variable:0' shape=(2, 2) dtype=float32, numpy= array([[-0.9824854 , -0.25114867], [-0.64005363, 0.82449263]], dtype=float32)>
In [3]:
# 四则运算:只有维度相同的张量才可以做四则运算
# tf.add +
# tf.subtract -
# tf.multiply *
# tf.divide /
# 平方、次方、开方
# tf.square
# tf.pow
# tf.sqrt
# 矩阵乘
# tf.matmul
In [4]:
a = tf.ones([1, 3])
b = tf.fill([1, 3], 3.)
print(a, b)
print(tf.add(a, b))
print(tf.subtract(a, b))
print(tf.multiply(a, b))
print(tf.divide(a, b))
b = tf.fill([1, 3], 4.)
print(tf.square(b))
print(tf.pow(b, 3))
print(tf.sqrt(b))
a = tf.ones([2, 3])
b = tf.fill([3, 2], 2.)
print(tf.matmul(a, b))
tf.Tensor([[1. 1. 1.]], shape=(1, 3), dtype=float32) tf.Tensor([[3. 3. 3.]], shape=(1, 3), dtype=float32) tf.Tensor([[4. 4. 4.]], shape=(1, 3), dtype=float32) tf.Tensor([[-2. -2. -2.]], shape=(1, 3), dtype=float32) tf.Tensor([[3. 3. 3.]], shape=(1, 3), dtype=float32) tf.Tensor([[0.33333334 0.33333334 0.33333334]], shape=(1, 3), dtype=float32) tf.Tensor([[16. 16. 16.]], shape=(1, 3), dtype=float32) tf.Tensor([[64. 64. 64.]], shape=(1, 3), dtype=float32) tf.Tensor([[2. 2. 2.]], shape=(1, 3), dtype=float32)
tf.Tensor( [[6. 6.] [6. 6.]], shape=(2, 2), dtype=float32)
In [5]:
# tf.data.Dataset.from_tensor_slices
# 切分传入张量的第一维度,生成输入特征/标签对,构建数据集
# data = tf.data.Dataset.from_tensor_slices((输入特征, 标签))
features = tf.constant([12, 23, 10, 17])
labels = tf.constant([1, 0, 1, 0])
dataset = tf.data.Dataset.from_tensor_slices((features, labels))
print(dataset)
for ele in dataset:
print(ele)
<_TensorSliceDataset element_spec=(TensorSpec(shape=(), dtype=tf.int32, name=None), TensorSpec(shape=(), dtype=tf.int32, name=None))>
(<tf.Tensor: shape=(), dtype=int32, numpy=12>, <tf.Tensor: shape=(), dtype=int32, numpy=1>) (<tf.Tensor: shape=(), dtype=int32, numpy=23>, <tf.Tensor: shape=(), dtype=int32, numpy=0>) (<tf.Tensor: shape=(), dtype=int32, numpy=10>, <tf.Tensor: shape=(), dtype=int32, numpy=1>) (<tf.Tensor: shape=(), dtype=int32, numpy=17>, <tf.Tensor: shape=(), dtype=int32, numpy=0>)
In [6]:
# with 结构记录计算过程,gradient求出张量的梯度
# with tf.GradientTape() as type:
# # 若干个计算
# pass
# grad = tape.gradient(函数, 对谁求导)
with tf.GradientTape() as type:
w = tf.Variable(tf.constant(3.))
loss = tf.pow(w, 2)
grade = type.gradient(loss, w)
print(grade)
with tf.GradientTape() as type:
w = tf.Variable(tf.constant(1.))
w1 = tf.Variable(tf.constant(2.))
loss = tf.pow(w, 2) + 3 * w * w1 + tf.pow(w1, 2)
grade = type.gradient(loss, [w, w1])
print(grade)
tf.Tensor(6.0, shape=(), dtype=float32) [<tf.Tensor: shape=(), dtype=float32, numpy=8.0>, <tf.Tensor: shape=(), dtype=float32, numpy=7.0>]
In [7]:
import matplotlib.pyplot as plt
import numpy as np
x = np.arange(-10, 10, 1)
y = x * x
y1 = 2 * x
plt.grid()
plt.plot(x, y)
plt.plot(x, y1)
Out[7]:
[<matplotlib.lines.Line2D at 0x7f2c2156bb50>]
In [ ]: