博文

目前显示的是 一月 30, 2025的博文

GoogLeNet框架

Welcome file import torch import torch . nn as nn import torch . nn . functional as F from torch . nn . modules . module import T from torch . utils . hooks import RemovableHandle class InceptionModule ( nn . Module ) : def __init__ ( self , in_channels , ch1x1 , ch3x3red , ch3x3 , ch5x5red , ch5x5 , pool_proj ) : super ( ) . __init__ ( ) # 分支1:1x1卷积 self . branch1 = nn . Conv2d ( in_channels , ch1x1 , kernel_size = 1 ) # 分支2:1x1 -> 3x3卷积 self . branch2 = nn . Sequential ( nn . Conv2d ( in_channels , ch3x3red , kernel_size = 1 ) , nn . Conv2d ( ch3x3red , ch3x3 , kernel_size = 3 , padding = 1 ) ) # 分支3:1x1 -> 5x5 卷积 self . branch3 = nn . Sequential ( nn . Conv2d ( in_channels , ch5x5red , kernel_size = 1 ) , nn . Conv2d ( ch5x5red , ch5x5 , kernel_size = 5 , padding = 2 ) ) # 分支4:3x3 池化 -> 1x1 卷积 self . branch4 = nn . Sequential ( nn . MaxPool2d ( kernel_size = 3 , stride = 1...

VGG框架

Welcome file import torch import torch . nn as nn from DeepLearning . CNN . AlexNet import running_loss , correct from numexpr import is_cpu_amd_intel class VGG16 ( nn . Module ) : def __init__ ( self , num_classes = 1000 ) : super ( VGG16 , self ) . __init__ ( ) self . features = nn . Sequential ( # Block1 nn.Conv2d(3, 64, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(64, 64, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2), # Block2 nn.Conv2d(64, 128, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2), # Block3 nn.Conv2d(128, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2), # Block4 nn.Co...

AlexNet框架

Welcome file import torch import torch . nn as nn class AlexNet ( nn . Module ) : def __init__ ( self , num_classes = 1000 ) : super ( AlexNet , self ) . __init__ ( ) self . features = nn . Sequential ( nn . Conv2d ( 3 , 96 , kernel_size = 11 , stride = 4 , padding = 2 ) , # 输入通道3(RGB) nn . ReLU ( inplace = True ) , nn . MaxPool2d ( kernel_size = 3 , stride = 2 ) , nn . Conv2d ( 96 , 256 , kernel_size = 5 , padding = 2 ) , nn . ReLU ( inplace = True ) , nn . MaxPool2d ( kernel_size = 3 , stride = 2 ) , nn . Conv2d ( 256 , 384 , kernel_size = 3 , padding = 1 ) , nn . ReLU ( inplace = True ) , nn . Conv2d ( 384 , 384 , kernel_size = 3 , padding = 1 ) , nn . ReLU ( inplace = True ) , nn . Conv2d ( 384 , 256 , kernel_size = 3 , padding = 1 ) , nn . ReLU ( inplace = True ) , nn . MaxPool2d ( kernel_size = 3 , stride = 2 ) , ) self . classifier = nn . Sequential ( nn . Dropout ( 0.5 ) , # 原始论文使用Dropout nn . ...

LeNet-5 框架

Welcome file LeNet-5框架代码 import torch import torch . nn as nn class LeNet5 ( nn . Module ) : def __init__ ( self ) : super ( LeNet5 , self ) . __init__ ( ) self . conv1 = nn . Conv2d ( 1 , 6 , kernel_size = 5 , padding = 2 ) # 输入通道1,输出通道6 self . pool1 = nn . AvgPool2d ( kernel_size = 2 , stride = 2 ) # 平均池化 self . conv2 = nn . Conv2d ( 6 , 16 , kernel_size = 5 ) self . pool2 = nn . AvgPool2d ( kernel_size = 2 , stride = 2 ) self . fc1 = nn . Linear ( 16 * 5 * 5 , 120 ) self . fc2 = nn . Linear ( 120 , 84 ) self . fc3 = nn . Linear ( 84 , 10 ) def forward ( self , x ) : x = torch . tanh ( self . conv1 ( x ) ) # 原始论文使用 tanh 激活 x = self . pool1 ( x ) x = torch . tanh ( self . conv2 ( x ) ) x = self . pool2 ( x ) x = x . view ( - 1 , 16 * 5 * 5 ) # 展平 x = torch . tanh ( self . fc1 ( x ) ) x = torch . tanh ( self . fc2 ( x ) ) x = self . fc3 ( x ) return x MNIST分类完整完整项...