import tensorflow as tf
import numpy as np
# 自定义数据集类
class CustomDataset(tf.data.Dataset):
def __init__(self, x_data, y_data):
self.x_data = tf.convert_to_tensor(x_data, dtype=tf.float32)
self.y_data = tf.convert_to_tensor(y_data, dtype=tf.float32)
def __iter__(self):
for i in range(len(self.x_data)):
yield (self.x_data[i], self.y_data[i])
# 逻辑回归模型
class LogisticRegressionModel(tf.keras.Model):
def __init__(self, input_dim):
super(LogisticRegressionModel, self).__init__()
self.linear = tf.keras.layers.Dense(1, input_shape=(input_dim,), activation='sigmoid')
def call(self, x):
return self.linear(x)
# 创建数据集
x_data = np.array([[1], [2], [3], [4], [5]], dtype=np.float32)
y_data = np.array([[0], [0], [1], [1], [1]], dtype=np.float32)
dataset = CustomDataset(x_data, y_data)
# 创建数据加载器
dataloader = dataset.batch(2).shuffle(100).repeat()
# 创建模型、损失函数和优化器
model = LogisticRegressionModel(input_dim=1)
loss_object = tf.keras.losses.BinaryCrossentropy()
optimizer = tf.keras.optimizers.SGD(learning_rate=0.01)
# 训练模型
epochs = 100
for epoch in range(epochs):
for x_batch, y_batch in dataloader:
with tf.GradientTape() as tape:
predictions = model(x_batch)
loss = loss_object(y_batch, predictions)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
if (epoch+1) % 10 == 0:
print(f'Epoch [{epoch+1}/{epochs}], Loss: {loss.numpy():.4f}')
# 保存模型
model.save('logistic_regression_model.h5')
# 加载模型
model = tf.keras.models.load_model('logistic_regression_model.h5')
# 进行预测
x_test = np.array([[6], [7], [8]], dtype=np.float32)
y_pred = model.predict(x_test)
print('预测值:', y_pred)