美文网首页
补全模型ConvKB与CapsE调试

补全模型ConvKB与CapsE调试

作者: 布口袋_天晴了 | 来源:发表于2019-11-11 20:54 被阅读0次
模型一:

Github: ConvKB
论文: A Novel Embedding Model for Knowledge Base CompletionBased on Convolutional Neural Network

模型二:

GitHub: CapsE
论文: A Capsule Network-based Embedding Model for Knowledge Graph Completion and Search Personalization

注:CapsE模型是在ConvKB模型的启发之下建立的。

ConvKB结构 CapsE结构

model.py (ConvKB)

import tensorflow as tf
import numpy as np
import math
class ConvKB(object):

    def __init__(self, sequence_length, num_classes, embedding_size, filter_sizes, num_filters, vocab_size,
                 pre_trained=[], l2_reg_lambda=0.001, is_trainable=True, useConstantInit=False):
        # Placeholders for input, output and dropout
        # 占位符
        self.input_x = tf.placeholder(tf.int32, [None, sequence_length], name="input_x")  #shape=(128,3)
        self.input_y = tf.placeholder(tf.float32, [None, num_classes], name="input_y")  #shape=(128,1)
        self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")

        # Keeping track of l2 regularization loss (optional)
        # 初始化L2正则化损失值为常数0.0
        l2_loss = tf.constant(0.0)

        # Embedding layer
        # 嵌入层,可以是随机初始化的,也可以是提前训练好的(预训练的)
        # self.W是embedding,self.embedded_chars是由input_x映射出的矩阵,self.embedded_chars_expanded是扩充了一个维度,方便使用2D卷积
        with tf.name_scope("embedding"):
            if pre_trained == []:
                self.W = tf.Variable(tf.random_uniform([vocab_size, embedding_size], -math.sqrt(1.0/embedding_size), math.sqrt(1.0/embedding_size), seed=1234), name="W")
            else:
                self.W = tf.get_variable(name="W2", initializer=pre_trained) #trainable=is_trainable)

            self.embedded_chars = tf.nn.embedding_lookup(self.W, self.input_x)  #shape=(128,3,50)
            self.embedded_chars_expanded = tf.expand_dims(self.embedded_chars, -1)   #shape=(128,3,50,1)

        # Create a convolution + maxpool layer for each filter size
        # 针对每个过滤器创建卷积操作+最大池化操作
        pooled_outputs = []
        for i, filter_size in enumerate(filter_sizes):
            with tf.name_scope("conv-maxpool-%s" % filter_size):
                if useConstantInit == False:
                    filter_shape = [sequence_length, filter_size, 1, num_filters]  #shape=(3,1,1,500)
                    W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1, seed=1234), name="W")  #shape=(3,1,1,500)
                else:
                    init1 = tf.constant([[[[0.1]]], [[[0.1]]], [[[-0.1]]]])
                    weight_init = tf.tile(init1, [1, filter_size, 1, num_filters])
                    W = tf.get_variable(name="W3", initializer=weight_init)

                b = tf.Variable(tf.constant(0.0, shape=[num_filters]), name="b")
                conv = tf.nn.conv2d(
                    self.embedded_chars_expanded,
                    W,
                    strides=[1, 1, 1, 1],
                    padding="VALID",
                    name="conv")
                # Apply nonlinearity
                h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")  #shape=(128,1,50,500)
                pooled_outputs.append(h)

        # Combine all the pooled features
        # 将每一层的池化的特征进行拼接,然后扁平化处理
        self.h_pool = tf.concat(pooled_outputs, 2)  #shape=(128,1,50,500)
        total_dims = (embedding_size * len(filter_sizes) - sum(filter_sizes) + len(filter_sizes)) * num_filters
        self.h_pool_flat = tf.reshape(self.h_pool, [-1, total_dims])  #shape=(128,25000)
        
        # Add dropout
        with tf.name_scope("dropout"):
            self.h_drop = tf.nn.dropout(self.h_pool_flat, self.dropout_keep_prob) 

        # Final (unnormalized) scores and predictions
        # 将以上的特征进行归一化,打分
        with tf.name_scope("output"):
            W = tf.get_variable(
                "W",
                shape=[total_dims, num_classes],
                initializer=tf.contrib.layers.xavier_initializer(seed=1234)) #shape=(25000,1)
            b = tf.Variable(tf.constant(0.0, shape=[num_classes]), name="b")
            l2_loss += tf.nn.l2_loss(W)
            l2_loss += tf.nn.l2_loss(b)
            self.scores = tf.nn.xw_plus_b(self.h_drop, W, b, name="scores")
            self.predictions = tf.nn.sigmoid(self.scores)  #shape=(128,1)
        # Calculate loss
        with tf.name_scope("loss"):
            losses = tf.nn.softplus(self.scores * self.input_y)
            self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss

        self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=500)

capsuleNet.py (CapsE)

import tensorflow as tf
from capsuleLayer import CapsLayer
import math

epsilon = 1e-9

class CapsE(object):
    def __init__(self, sequence_length, embedding_size, num_filters, vocab_size, iter_routing, batch_size=256,
                 num_outputs_secondCaps=1, vec_len_secondCaps=10, initialization=[], filter_size=1, useConstantInit=False):
        # Placeholders for input, output
        # 占位符
        self.input_x = tf.placeholder(tf.int32, [batch_size, sequence_length], name="input_x")  #shape=(256,3)
        self.input_y = tf.placeholder(tf.float32, [batch_size, 1], name="input_y")  #shape=(256,1)
        self.filter_size = filter_size  #1
        self.num_filters = num_filters  #400
        self.sequence_length = sequence_length  #3
        self.embedding_size = embedding_size  #100
        self.iter_routing = iter_routing  #1
        self.num_outputs_secondCaps = num_outputs_secondCaps  #1
        self.vec_len_secondCaps = vec_len_secondCaps  #10
        self.batch_size = batch_size  #256
        self.useConstantInit = useConstantInit  #false
        # Embedding layer
        # 嵌入层,可以是随机初始化的,也可以是提前训练好的(预训练的)
        # self.W是embedding,self.embedded_chars是由input_x映射出的矩阵,self.embedded_chars_expanded是扩充了一个维度,方便使用2D卷积
        with tf.name_scope("embedding"):
            if initialization == []:
                self.W = tf.Variable(
                    tf.random_uniform([vocab_size, embedding_size], -math.sqrt(1.0 / embedding_size),
                                      math.sqrt(1.0 / embedding_size), seed=1234), name="W")
            else:
                self.W = tf.get_variable(name="W2", initializer=initialization)  #shape=(40954,100)

        self.embedded_chars = tf.nn.embedding_lookup(self.W, self.input_x)  #shape=(256,3,100)
        self.X = tf.expand_dims(self.embedded_chars, -1)  #shape=(256,3,100,1)

        self.build_arch()  ##构建两层胶囊网络层
        self.loss()
        self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=500)

        tf.logging.info('Seting up the main structure')

    def build_arch(self):
        #The first capsule layer 
        #第一层胶囊网络层
        with tf.variable_scope('FirstCaps_layer'):
            self.firstCaps = CapsLayer(num_outputs_secondCaps=self.num_outputs_secondCaps, vec_len_secondCaps=self.vec_len_secondCaps,
                                    with_routing=False, layer_type='CONV', embedding_size=self.embedding_size,
                                    batch_size=self.batch_size, iter_routing=self.iter_routing,
                                    useConstantInit=self.useConstantInit, filter_size=self.filter_size,
                                    num_filters=self.num_filters, sequence_length=self.sequence_length)

            self.caps1 = self.firstCaps(self.X, kernel_size=1, stride=1)  #shape=(256,100,400,1)
        #The second capsule layer
        #第二层胶囊网络层
        with tf.variable_scope('SecondCaps_layer'):
            self.secondCaps = CapsLayer(num_outputs_secondCaps=self.num_outputs_secondCaps, vec_len_secondCaps=self.vec_len_secondCaps,
                                    with_routing=True, layer_type='FC',
                                    batch_size=self.batch_size, iter_routing=self.iter_routing,
                                    embedding_size=self.embedding_size, useConstantInit=self.useConstantInit, filter_size=self.filter_size,
                                    num_filters=self.num_filters, sequence_length=self.sequence_length)
            self.caps2 = self.secondCaps(self.caps1)  #shape=(256,1,10,1)

        self.v_length = tf.sqrt(tf.reduce_sum(tf.square(self.caps2), axis=2, keep_dims=True) + epsilon)  #shape=(256,1,1,1)

    def loss(self):
        self.scores = tf.reshape(self.v_length, [self.batch_size, 1])  #shape=(256,1)
        self.predictions = tf.nn.sigmoid(self.scores)
        print("Using square softplus loss")
        losses = tf.square(tf.nn.softplus(self.scores * self.input_y))
        self.total_loss = tf.reduce_mean(losses)

相关文章

网友评论

      本文标题:补全模型ConvKB与CapsE调试

      本文链接:https://www.haomeiwen.com/subject/hgotictx.html