时间:2021-05-22
代码
import numpy as npimport matplotlib.pyplot as pltfrom sklearn.datasets.samples_generator import make_classificationdef initialize_params(dims): w = np.zeros((dims, 1)) b = 0 return w, bdef sigmoid(x): z = 1 / (1 + np.exp(-x)) return zdef logistic(X, y, w, b): num_train = X.shape[0] y_hat = sigmoid(np.dot(X, w) + b) loss = -1 / num_train * np.sum(y * np.log(y_hat) + (1-y) * np.log(1-y_hat)) cost = -1 / num_train * np.sum(y * np.log(y_hat) + (1 - y) * np.log(1 - y_hat)) dw = np.dot(X.T, (y_hat - y)) / num_train db = np.sum(y_hat - y) / num_train return y_hat, cost, dw, dbdef linear_train(X, y, learning_rate, epochs): # 参数初始化 w, b = initialize_params(X.shape[1]) loss_list = [] for i in range(epochs): # 计算当前的预测值、损失和梯度 y_hat, loss, dw, db = logistic(X, y, w, b) loss_list.append(loss) # 基于梯度下降的参数更新 w += -learning_rate * dw b += -learning_rate * db # 打印迭代次数和损失 if i % 10000 == 0: print("epoch %d loss %f" % (i, loss)) # 保存参数 params = { 'w': w, 'b': b } # 保存梯度 grads = { 'dw': dw, 'db': db } return loss_list, loss, params, gradsdef predict(X, params): w = params['w'] b = params['b'] y_pred = sigmoid(np.dot(X, w) + b) return y_predif __name__ == "__main__": # 生成数据 X, labels = make_classification(n_samples=100, n_features=2, n_informative=2, n_redundant=0, random_state=1, n_clusters_per_class=2) print(X.shape) print(labels.shape) # 生成伪随机数 rng = np.random.RandomState(2) X += 2 * rng.uniform(size=X.shape) # 划分训练集和测试集 offset = int(X.shape[0] * 0.9) X_train, y_train = X[:offset], labels[:offset] X_test, y_test = X[offset:], labels[offset:] y_train = y_train.reshape((-1, 1)) y_test = y_test.reshape((-1, 1)) print('X_train=', X_train.shape) print('y_train=', y_train.shape) print('X_test=', X_test.shape) print('y_test=', y_test.shape) # 训练 loss_list, loss, params, grads = linear_train(X_train, y_train, 0.01, 100000) print(params) # 预测 y_pred = predict(X_test, params) print(y_pred[:10])以上就是python实现逻辑回归的示例的详细内容,更多关于python 逻辑回归的资料请关注其它相关文章!
声明:本页内容来源网络,仅供用户参考;我单位不保证亦不表示资料全面及准确无误,也不保证亦不表示这些资料为最新信息,如因任何原因,本网内容或者用户因倚赖本网内容造成任何损失或损害,我单位将不会负任何法律责任。如涉及版权问题,请提交至online#300.cn邮箱联系删除。
本文实例讲述了Python实现的逻辑回归算法。分享给大家供大家参考,具体如下:使用python实现逻辑回归UsingPythontoImplementLogis
线性逻辑回归本文用代码实现怎么利用sklearn来进行线性逻辑回归的计算,下面先来看看用到的数据。这是有两行特征的数据,然后第三行是数据的标签。python代码
前面文章分别简单介绍了线性回归,逻辑回归,贝叶斯分类,并且用python简单实现。这篇文章介绍更简单的knn,k-近邻算法(kNN,k-NearestNeigh
本文实例为大家分享了pytorch实现逻辑回归的具体代码,供大家参考,具体内容如下一、pytorch实现逻辑回归逻辑回归是非常经典的分类算法,是用于分类任务,如
本文实例为大家分享了python实现梯度下降和逻辑回归的具体代码,供大家参考,具体内容如下importnumpyasnpimportpandasaspdimpo