关于慕课第五讲中各种经典卷积网络的class模板——tensorflow

   日期:2020-05-13     浏览:97    评论:0    
核心提示:LeNet网络class Lenet5(Model): def __init__(self): super(Lenet5,self).__init__() self.c1 = Conv2D(filters=6,kernel_size=(5,5),activation=sigmoid) self.p1 = MaxPool2D(pool_size=(2,2),strides=2) self.c2 = Conv2D(f人工智能

LeNet网络

class Lenet5(Model):
    def __init__(self):
        super(Lenet5,self).__init__()
        self.c1 = Conv2D(filters=6,kernel_size=(5,5),activation='sigmoid')
        self.p1 = MaxPool2D(pool_size=(2,2),strides=2)
        
        self.c2 = Conv2D(filters=16,kernel_size=(5,5),activation='sigmoid')
        self.p2 = MaxPool2D(pool_size=(2,2),strides=2)
        
        self.flatten = Flatten()
        self.f1 = Dense(120,activation='sigmoid')
        self.f2 = Dense(84,activation='sigmoid')
        self.f3 = Dense(10,activation='sigmoid')
        
    def call(self,x):
        x = self.c1(x)
        x = self.p1(x)
        x = self.c2(x)
        x = self.p2(x)
        x = self.flatten(x)
        x = self.f1(x)
        x = self.f2(x)
        y = self.f3(x)
        
        return y

AlexNet网络

class AlexNet8(Model):
    def __init__(self):
        super(AlexNet8,self).__init__()
        self.c1 = Conv2D(filters=96,kernel_size=(3,3))
        self.b1 = BatchNormalization()
        self.a1 = Activation('relu')
        self.p1 = MaxPool2D(pool_size=(3,3),strides=2)
        
        self.c2 = Conv2D(filters=256,kernel_size=(3,3))
        self.b2 = BatchNormalization()
        self.a2 = Activation('relu')
        self.p2 = MaxPool2D(pool_size=(3,3),strides=2)
        
        self.c3 = Conv2D(filters=384,kernel_size=(3,3),padding='same',activation='relu')
        
        self.c4 = Conv2D(filters=384,kernel_size=(3,3),padding='same',activation='relu')
        
        self.c5 = Conv2D(filters=256,kernel_size=(3,3),padding='same',activation='relu')
        self.p5 = MaxPool2D(pool_size=(3,3),strides=2)
        
        self.flatten = Flatten()
        self.f1 = Dense(2048,activation='relu')
        self.d1 = Dropout(0.2)
        
        self.f2 = Dense(2048,activation='relu')
        self.d2 = Dropout(0.2)
        
        self.f3 = Dense(10,activation='softmax')
        
    def call(self,x):
        x = self.c1(x)
        x = self.b1(x)
        x = self.a1(x)
        x = self.p1(x)
        
        x = self.c2(x)
        x = self.b2(x)
        x = self.a2(x)
        x = self.p2(x)
        
        x = self.c3(x)
        x = self.c4(x)
        x = self.c5(x)
        x = self.p5(x)
        
        x = self.flatten(x)
        x = self.f1(x)
        x = self.d1(x)
        x = self.f2(x)
        x = self.d2(x)
        y = self.f3(x)
        
        return y

VGG网络

class VGG16(Model):
	def __init__(self):
	        super(VGG16, self).__init__()
	        self.c1 = Conv2D(filters=64, kernel_size=(3, 3), padding='same')  # 卷积层1
	        self.b1 = BatchNormalization()  # BN层1
        	self.a1 = Activation('relu')  # 激活层1
        	self.c2 = Conv2D(filters=64, kernel_size=(3, 3), padding='same', )
	        self.b2 = BatchNormalization()  # BN层1
        	self.a2 = Activation('relu')  # 激活层1
        	self.p1 = MaxPool2D(pool_size=(2, 2), strides=2, padding='same')
        	self.d1 = Dropout(0.2)  # dropout层
        	self.c3 = Conv2D(filters=128, kernel_size=(3, 3), padding='same')
        	self.b3 = BatchNormalization()  # BN层1
        	self.a3 = Activation('relu')  # 激活层1
        	self.c4 = Conv2D(filters=128, kernel_size=(3, 3), padding='same')
        	self.b4 = BatchNormalization()  # BN层1
        	self.a4 = Activation('relu')  # 激活层1
        	self.p2 = MaxPool2D(pool_size=(2, 2), strides=2, padding='same')
        	self.d2 = Dropout(0.2)  # dropout层
        	self.c5 = Conv2D(filters=256, kernel_size=(3, 3), padding='same')
        	self.b5 = BatchNormalization()  # BN层1
        	self.a5 = Activation('relu')  # 激活层1
        	self.c6 = Conv2D(filters=256, kernel_size=(3, 3), padding='same')
        	self.b6 = BatchNormalization()  # BN层1
        	self.a6 = Activation('relu')  # 激活层1
        	self.c7 = Conv2D(filters=256, kernel_size=(3, 3), padding='same')
        	self.b7 = BatchNormalization()
        	self.a7 = Activation('relu')
        	self.p3 = MaxPool2D(pool_size=(2, 2), strides=2, padding='same')
        	self.d3 = Dropout(0.2)
        	self.c8 = Conv2D(filters=512, kernel_size=(3, 3), padding='same')
        	self.b8 = BatchNormalization()  # BN层1
        	self.a8 = Activation('relu')  # 激活层1
        	self.c9 = Conv2D(filters=512, kernel_size=(3, 3), padding='same')
        	self.b9 = BatchNormalization()  # BN层1
        	self.a9 = Activation('relu')  # 激活层1
        	self.c10 = Conv2D(filters=512, kernel_size=(3, 3), padding='same')
        	self.b10 = BatchNormalization()
        	self.a10 = Activation('relu')
        	self.p4 = MaxPool2D(pool_size=(2, 2), strides=2, padding='same')
        	self.d4 = Dropout(0.2)
        	self.c11 = Conv2D(filters=512, kernel_size=(3, 3), padding='same')
        	self.b11 = BatchNormalization()  # BN层1
        	self.a11 = Activation('relu')  # 激活层1
        	self.c12 = Conv2D(filters=512, kernel_size=(3, 3), padding='same')
        	self.b12 = BatchNormalization()  # BN层1
        	self.a12 = Activation('relu')  # 激活层1
        	self.c13 = Conv2D(filters=512, kernel_size=(3, 3), padding='same')
        	self.b13 = BatchNormalization()
        	self.a13 = Activation('relu')
        	self.p5 = MaxPool2D(pool_size=(2, 2), strides=2, padding='same')
        	self.d5 = Dropout(0.2)
        	self.flatten = Flatten()
        	self.f1 = Dense(512, activation='relu')
        	self.d6 = Dropout(0.2)
        	self.f2 = Dense(512, activation='relu')
        	self.d7 = Dropout(0.2)
        	self.f3 = Dense(10, activation='softmax')
   def call(self, x):
        x = self.c1(x)
        x = self.b1(x)
        x = self.a1(x)
        x = self.c2(x)
        x = self.b2(x)
        x = self.a2(x)
        x = self.p1(x)
        x = self.d1(x)
        x = self.c3(x)
        x = self.b3(x)
        x = self.a3(x)
        x = self.c4(x)
        x = self.b4(x)
        x = self.a4(x)
        x = self.p2(x)
        x = self.d2(x)
        x = self.c5(x)
        x = self.b5(x)
        x = self.a5(x)
        x = self.c6(x)
        x = self.b6(x)
        x = self.a6(x)
        x = self.c7(x)
        x = self.b7(x)
        x = self.a7(x)
        x = self.p3(x)
        x = self.d3(x)
        x = self.c8(x)
        x = self.b8(x)
        x = self.a8(x)
        x = self.c9(x)
        x = self.b9(x)
        x = self.a9(x)
        x = self.c10(x)
        x = self.b10(x)
        x = self.a10(x)
        x = self.p4(x)
        x = self.d4(x)
        x = self.c11(x)
        x = self.b11(x)
        x = self.a11(x)
        x = self.c12(x)
        x = self.b12(x)
        x = self.a12(x)
        x = self.c13(x)
        x = self.b13(x)
        x = self.a13(x)
        x = self.p5(x)
        x = self.d5(x)
        x = self.flatten(x)
        x = self.f1(x)
        x = self.d6(x)
        x = self.f2(x)
        x = self.d7(x)
        y = self.f3(x)
        return y

Inception网络

# ConvBNrelu 积木
class ConvBNRelu(Model):
    def __init__(self, ch, kernelsz=3, strides=1, padding='same'):
        super(ConvBNRelu, self).__init__()
        self.model = tf.keras.models.Sequential([
            Conv2D(ch, kernelsz, strides=strides, padding=padding),
            BatchNormalization(),
            Activation('relu')
        ])
    def call(self, x):
        x = self.model(x, training=False) #在training=False时,BN通过整个训练集计算均值、方差去做批归一化,training=True时,通过当前batch的均值、方差去做批归一化。推理时 training=False效果好
        return x
# InceptionBlk积木
class InceptionBlk(Model):
    def __init__(self, ch, strides=1):
        super(InceptionBlk, self).__init__()
        self.ch = ch
        self.strides = strides
        self.c1 = ConvBNRelu(ch, kernelsz=1, strides=strides)
        self.c2_1 = ConvBNRelu(ch, kernelsz=1, strides=strides)
        self.c2_2 = ConvBNRelu(ch, kernelsz=3, strides=1)
        self.c3_1 = ConvBNRelu(ch, kernelsz=1, strides=strides)
        self.c3_2 = ConvBNRelu(ch, kernelsz=5, strides=1)
        self.p4_1 = MaxPool2D(3, strides=1, padding='same')
        self.c4_2 = ConvBNRelu(ch, kernelsz=1, strides=strides)
    def call(self, x):
        x1 = self.c1(x)
        x2_1 = self.c2_1(x)
        x2_2 = self.c2_2(x2_1)
        x3_1 = self.c3_1(x)
        x3_2 = self.c3_2(x3_1)
        x4_1 = self.p4_1(x)
        x4_2 = self.c4_2(x4_1)
        # concat along axis=channel
        x = tf.concat([x1, x2_2, x3_2, x4_2], axis=3)
        return x    
# Inception10网络 
class Inception10(Model):
    def __init__(self, num_blocks, num_classes, init_ch=16, **kwargs):
        super(Inception10, self).__init__(**kwargs)
        self.in_channels = init_ch
        self.out_channels = init_ch
        self.num_blocks = num_blocks
        self.init_ch = init_ch
        # 第一层 
        self.c1 = ConvBNRelu(init_ch)
        
        self.blocks = tf.keras.models.Sequential()
        for block_id in range(num_blocks):
            for layer_id in range(2):
                if layer_id == 0:
                    block = InceptionBlk(self.out_channels, strides=2)
                else:
                    block = InceptionBlk(self.out_channels, strides=1)
                self.blocks.add(block)
            # enlarger out_channels per block
            self.out_channels *= 2
        self.p1 = GlobalAveragePooling2D()
        self.f1 = Dense(num_classes, activation='softmax')
    def call(self, x):
        x = self.c1(x)
        x = self.blocks(x)
        x = self.p1(x)
        y = self.f1(x)
        return y

RestNet网络

class ResnetBlock(Model):
    def __init__(self, filters, strides=1, residual_path=False):
        super(ResnetBlock, self).__init__()
        self.filters = filters
        self.strides = strides
        self.residual_path = residual_path
        self.c1 = Conv2D(filters, (3, 3), strides=strides, padding='same', use_bias=False)
        self.b1 = BatchNormalization()
        self.a1 = Activation('relu')
        self.c2 = Conv2D(filters, (3, 3), strides=1, padding='same', use_bias=False)
        self.b2 = BatchNormalization()
        # residual_path为True时,对输入进行下采样,即用1x1的卷积核做卷积操作,保证x能和F(x)维度相同,顺利相加
        if residual_path:
            self.down_c1 = Conv2D(filters, (1, 1), strides=strides, padding='same', use_bias=False)
            self.down_b1 = BatchNormalization()
        self.a2 = Activation('relu')
    def call(self, inputs):
        residual = inputs  # residual等于输入值本身,即residual=x
        # 将输入通过卷积、BN层、激活层,计算F(x)
        x = self.c1(inputs)
        x = self.b1(x)
        x = self.a1(x)
        x = self.c2(x)
        y = self.b2(x)
        if self.residual_path:
            residual = self.down_c1(inputs)
            residual = self.down_b1(residual)
        out = self.a2(y + residual)  # 最后输出的是两部分的和,即F(x)+x或F(x)+Wx,再过激活函数
        return out
class ResNet18(Model):
    def __init__(self, block_list, initial_filters=64):  # block_list表示每个block有几个卷积层
        super(ResNet18, self).__init__()
        self.num_blocks = len(block_list)  # 共有几个block
        self.block_list = block_list
        self.out_filters = initial_filters
        self.c1 = Conv2D(self.out_filters, (3, 3), strides=1, padding='same', use_bias=False)
        self.b1 = BatchNormalization()
        self.a1 = Activation('relu')
        self.blocks = tf.keras.models.Sequential()
        # 构建ResNet网络结构
        for block_id in range(len(block_list)):  # 第几个resnet block
            for layer_id in range(block_list[block_id]):  # 第几个卷积层
                if block_id != 0 and layer_id == 0:  # 对除第一个block以外的每个block的输入进行下采样
                    block = ResnetBlock(self.out_filters, strides=2, residual_path=True)
                else:
                    block = ResnetBlock(self.out_filters, residual_path=False)
                self.blocks.add(block)  # 将构建好的block加入resnet
            self.out_filters *= 2  # 下一个block的卷积核数是上一个block的2倍
        self.p1 = tf.keras.layers.GlobalAveragePooling2D()
        self.f1 = tf.keras.layers.Dense(10, activation='softmax', kernel_regularizer=tf.keras.regularizers.l2())
    def call(self, inputs):
        x = self.c1(inputs)
        x = self.b1(x)
        x = self.a1(x)
        x = self.blocks(x)
        x = self.p1(x)
        y = self.f1(x)
        return y        
 
打赏
 本文转载自:网络 
所有权利归属于原作者,如文章来源标示错误或侵犯了您的权利请联系微信13520258486
更多>最近资讯中心
更多>最新资讯中心
0相关评论

推荐图文
推荐资讯中心
点击排行
最新信息
新手指南
采购商服务
供应商服务
交易安全
关注我们
手机网站:
新浪微博:
微信关注:

13520258486

周一至周五 9:00-18:00
(其他时间联系在线客服)

24小时在线客服