美文网首页
Algorithms for ML

Algorithms for ML

作者: jjx323 | 来源:发表于2019-07-21 16:05 被阅读0次

Here we provide some codes for machine learning, which may be useful for later research. The codes depend on autograd "https://github.com/HIPS/autograd".

Logistic regression

import autograd.numpy as np   # Thinly-wrapped version of Numpy
from autograd import grad
import matplotlib.pyplot as plt

def sigmoid(x):
    return 0.5*(np.tanh(x/2.0) + 1)

def logistic_predictions(weights, inputs):
    return sigmoid(np.dot(inputs, weights))

def training_loss(weights):
    preds = logistic_predictions(weights, inputs)
    log_label_probabilities = np.log(preds)*targets + np.log((1-preds))*(1-targets)
    return -np.sum(log_label_probabilities)

inputs = np.array([[0.52, 1.12,  0.77],
                   [0.88, -1.08, 0.15],
                   [0.52, 0.06, -1.30],
                   [0.74, -2.49, 1.39]])
    
targets = np.array([True, True, False, False])

training_gradient_fun = grad(training_loss)

weights = np.array([0.0, 0.0, 0.0])
print("Initial loss: ", training_loss(weights))
errors = []
for i in range(1000):
    weights -= training_gradient_fun(weights)*0.01
    # 对于这个Logistic regression的简单程序,我们可以手动计算导数如下:
    #weights -= 0.01*(np.dot(logistic_predictions(weights, inputs) - targets, inputs))
    errors.append(training_loss(weights))
    
print("Trained loss: ", training_loss(weights))
plt.plot(errors)

Black box variational inference

The approximate probability measure is chosen to be Gaussian, and the true probability measure is a mixture Gaussian with two components.

import matplotlib.pyplot as plt

import autograd.numpy as np
import autograd.numpy.random as npr
import autograd.scipy.stats.multivariate_normal as mvn
import autograd.scipy.stats.norm as norm

from autograd import grad
from autograd.misc.optimizers import adam


def black_box_variational_inference(logprob, D, num_samples):
    def unpack_params(params):
        # Variational approximate pdf is a diagonal Gaussian
        mean, log_std = params[:D], params[D:]
        return mean, log_std
    
    def gaussian_entropy(log_std):
        return 0.5*D*(1.0 + np.log(2*np.pi)) + np.sum(log_std)
    
    rs = npr.RandomState(0)
    def variational_objective(params, t):
        """
        Provides a stochastic estimate of the variational lower bound
        """
        mean, log_std = unpack_params(params)
        samples = rs.randn(num_samples, D)*np.exp(log_std) + mean
        lower_bound = gaussian_entropy(log_std) + np.mean(logprob(samples))
        return -lower_bound
    
    gradient = grad(variational_objective)
    
    return variational_objective, gradient, unpack_params


if __name__ == "__main__":
    
    # Specify an inference problem by its unnormalized log-density
    D = 2
    def log_density(x):
        mu1 = np.array([-0.7, -0.7])
        mu2 = np.array([0.7, 0.7])
        sig1 = np.array([[1.0, 0.0], [0.0, 0.1]])
        sig2 = np.array([[0.1, 0.0], [0.0, 1.0]])
        mu_density = np.log(mvn.pdf(x, mu1, sig1) + mvn.pdf(x, mu2, sig2))
        return mu_density
    
    # Build variational objective
    objective, gradient, unpack_params = \
            black_box_variational_inference(log_density, D, num_samples=200)
    
    # Set up plotting code
    def plot_isocontours(ax, func, xlimits=[-2, 2], ylimits=[-2, 2], numticks=101):
        x = np.linspace(*xlimits, num=numticks)
        y = np.linspace(*ylimits, num=numticks)
        X, Y = np.meshgrid(x, y)
        zs = func(np.concatenate([np.atleast_2d(X.ravel()), np.atleast_2d(Y.ravel())]).T)
        Z = zs.reshape(X.shape)
        plt.contour(X, Y, Z)
        ax.set_yticks([])
        ax.set_xticks([])
        
    # Set up figure
    fig = plt.figure(figsize=(8, 8), facecolor="white")
    ax = fig.add_subplot(111, frameon=False)
    plt.ion()
    plt.show(block=False)        
    
    def callback(params, t, g):
        print("Iteration {} lower bound {}".format(t, -objective(params, t)))
        
        plt.cla()
        target_distribution = lambda x: np.exp(log_density(x))
        plot_isocontours(ax, target_distribution)
        
        mean, log_std = unpack_params(params)
        variational_contour = lambda x: mvn.pdf(x, mean, np.diag(np.exp(2*log_std)))
        plot_isocontours(ax, variational_contour)
        plt.draw()
        plt.pause(1.0/10.0)
        
    print("Optimizing variational parameters...")
    init_mean = -1*np.ones(D)
    init_log_std = -5*np.ones(D)
    init_var_params = np.concatenate([init_mean, init_log_std])
    variational_params = adam(gradient, init_var_params, step_size=0.1, 
                              num_iters=200, callback=callback)

相关文章

  • Algorithms for ML

    Here we provide some codes for machine learning, which ma...

  • Master ML Algorithms

    Master ML Algorithms【点击下载】 (https://u18764838.ctfile.com/...

  • MLIib基本统计

    MLIib全貌ML Algorithms (ML 算法): 常用的学习算法,如分类,回归,聚类和协同过滤Featu...

  • Python学算法的高分github

    The Algorithms | Python: All Algorithms implemented in Py...

  • 算法4(Algorithms4)- Part 1 算法分析(A

    算法4(Algorithms4)- Part 1 算法分析(Analysis Of Algorithms) 注:...

  • Proximal Algorithms 4 Algorithms

    Proximal Algorithms 这一节介绍了一些利用proximal的算法. Proximal minim...

  • Algorithms

    算法第一章 动态连通性 首先,我们讲到第一个问题,也就是第三张PPT,动态连通...

  • The Algorithms

    在Github上面看到一个印度人维护的关于算法的项目The Algorithms[https://github.c...

  • Algorithms

    Preface 全书分为7部分40章,不同的专业/用途可以选择不同的内进行重点学习。 mathematical a...

  • Algorithms

    BinarySearch Sort Selection sort Insertion sort

网友评论

      本文标题:Algorithms for ML

      本文链接:https://www.haomeiwen.com/subject/luoqzqtx.html