diff --git a/Jianhai/lab5/.ipynb_checkpoints/compare-checkpoint.py b/Jianhai/lab5/.ipynb_checkpoints/compare-checkpoint.py new file mode 100644 index 0000000..e69de29 diff --git a/Jianhai/lab5/.ipynb_checkpoints/train-checkpoint.ipynb b/Jianhai/lab5/.ipynb_checkpoints/train-checkpoint.ipynb new file mode 100644 index 0000000..0db8c01 --- /dev/null +++ b/Jianhai/lab5/.ipynb_checkpoints/train-checkpoint.ipynb @@ -0,0 +1,213 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "id": "aa1c822b", + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "import torch.nn as nn\n", + "import torch.optim as optim\n", + "import os\n", + "\n", + "# 导入项目中的模块\n", + "from models import SimpleMLP, DeepMLP, ResidualMLP, SimpleCNN, MediumCNN, VGGStyleNet, SimpleResNet\n", + "from utils import (\n", + " load_cifar10, \n", + " set_seed, \n", + " train_model, \n", + " evaluate_model, \n", + " plot_training_history,\n", + " visualize_model_predictions,\n", + " visualize_conv_filters,\n", + " model_complexity\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "dd3b8edc", + "metadata": { + "inputHidden": false + }, + "outputs": [ + { + "ename": "AttributeError", + "evalue": "module 'os' has no attribute 'expanduser'", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m/tmp/ipykernel_246/1765368111.py\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 12\u001b[0m \u001b[0mset_seed\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 13\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 14\u001b[0;31m \u001b[0mos\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mchdir\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mos\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexpanduser\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"~/work/Jianhai/lab5\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 15\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 16\u001b[0m \u001b[0;31m# 检查是否有可用的GPU\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mAttributeError\u001b[0m: module 'os' has no attribute 'expanduser'" + ] + } + ], + "source": [ + "# 设置参数\n", + "model_type = 'simple_mlp' # 可选: 'simple_mlp', 'deep_mlp', 'residual_mlp', 'simple_cnn', 'medium_cnn', 'vgg_style', 'resnet'\n", + "epochs = 20\n", + "learning_rate = 0.001\n", + "batch_size = 128\n", + "use_data_augmentation = True # CNN通常受益于数据增强\n", + "save_directory = './ck'\n", + "visualize_filters = True # 是否可视化卷积核(仅对CNN有效)\n", + "visualize_predictions = True # 是否可视化预测结果\n", + "\n", + "# 设置随机种子\n", + "set_seed()\n", + "\n", + "os.chdir(os.path.expanduser(\"~/work/Jianhai/lab5\"))\n", + "\n", + "# 检查是否有可用的GPU\n", + "device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n", + "print(f\"使用设备: {device}\")\n", + "\n", + "# 加载数据\n", + "train_loader, valid_loader, test_loader, classes = load_cifar10(\n", + " use_augmentation=use_data_augmentation, \n", + " batch_size=batch_size\n", + ")\n", + "\n", + "# 初始化选择的模型\n", + "if model_type == 'simple_mlp':\n", + " model = SimpleMLP()\n", + " model_name = \"SimpleMLP\"\n", + "elif model_type == 'deep_mlp':\n", + " model = DeepMLP(dropout_rate=0.5, use_bn=True, use_dropout=True)\n", + " model_name = \"DeepMLP\"\n", + "elif model_type == 'residual_mlp':\n", + " model = ResidualMLP(activation='relu')\n", + " model_name = \"ResidualMLP\"\n", + "elif model_type == 'simple_cnn':\n", + " model = SimpleCNN()\n", + " model_name = \"SimpleCNN\"\n", + "elif model_type == 'medium_cnn':\n", + " model = MediumCNN(use_bn=True)\n", + " model_name = \"MediumCNN\"\n", + "elif model_type == 'vgg_style':\n", + " model = VGGStyleNet()\n", + " model_name = \"VGGStyleNet\"\n", + "else: # resnet\n", + " model = SimpleResNet(num_blocks=[2, 2, 2])\n", + " model_name = \"SimpleResNet\"\n", + "\n", + "print(f\"使用模型: {model_name}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1a5322fe", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "分析模型复杂度:\n", + "参数量: 1,578,506\n", + "每批次(128个样本)推理时间: 8.18ms\n", + "Epoch 1/20\n", + "模型已保存到 ./ck/SimpleMLP_best.pth\n", + "训练损失: 1.8831, 训练准确率: 0.3418\n", + "验证损失: 1.7475, 验证准确率: 0.3796\n", + "本轮用时: 48.95s\n", + "--------------------------------------------------\n", + "Epoch 2/20\n" + ] + } + ], + "source": [ + "# 计算模型复杂度\n", + "print(\"\\n分析模型复杂度:\")\n", + "model_complexity(model, device=device)\n", + "\n", + "# 定义损失函数和优化器\n", + "criterion = nn.CrossEntropyLoss()\n", + "optimizer = optim.Adam(model.parameters(), lr=learning_rate)\n", + "\n", + "# 可以添加学习率调度器\n", + "scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=epochs)\n", + "\n", + "# 确保checkpoints目录存在\n", + "os.makedirs(save_directory, exist_ok=True)\n", + "\n", + "# 训练模型\n", + "trained_model, history = train_model(\n", + " model, train_loader, valid_loader, criterion, optimizer, scheduler,\n", + " num_epochs=epochs, device=device, save_dir=save_directory\n", + ")\n", + "\n", + "# 绘制训练历史\n", + "plot_training_history(history, title=f\"{model_name} Training History\")\n", + "\n", + "# 在测试集上评估模型\n", + "print(\"\\n在测试集上评估模型:\")\n", + "test_loss, test_acc = evaluate_model(trained_model, test_loader, criterion, device, classes)\n", + "\n", + "print(f\"{model_name} 最终测试准确率: {test_acc:.4f}\")\n", + "\n", + "# 如果是CNN模型并且需要可视化卷积核\n", + "if visualize_filters and model_type in ['simple_cnn', 'medium_cnn', 'vgg_style', 'resnet']:\n", + " print(\"\\n可视化卷积核:\")\n", + " if model_type == 'simple_cnn':\n", + " visualize_conv_filters(trained_model, 'conv1')\n", + " elif model_type == 'medium_cnn':\n", + " visualize_conv_filters(trained_model, 'conv1')\n", + " elif model_type == 'vgg_style':\n", + " visualize_conv_filters(trained_model, 'features.0')\n", + " else: # resnet\n", + " visualize_conv_filters(trained_model, 'conv1')\n", + "\n", + "# 如果需要可视化模型预测\n", + "if visualize_predictions:\n", + " print(\"\\n可视化模型预测:\")\n", + " visualize_model_predictions(trained_model, test_loader, classes, device)\n", + "\n", + "print(f\"\\n{model_name}的训练和评估已完成!\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3eaec7b4", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6701954f", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.5" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/Jianhai/lab5/.ipynb_checkpoints/实验指导-checkpoint.md b/Jianhai/lab5/.ipynb_checkpoints/实验指导-checkpoint.md new file mode 100644 index 0000000..ece27f7 --- /dev/null +++ b/Jianhai/lab5/.ipynb_checkpoints/实验指导-checkpoint.md @@ -0,0 +1,301 @@ +# 深度学习模型实验指导:MLP与CNN模型对比分析 + +## 实验概述 + +本实验旨在通过对多层感知机(MLP)和卷积神经网络(CNN)的实现、训练和评估,帮助学生深入理解两种模型的结构特点、性能差异以及适用场景。学生将从基础模型开始,逐步探索更复杂的网络架构,最终通过对比分析,掌握深度学习模型设计与评估的关键技能。 + +## 实验目的 + +1. 掌握MLP和CNN的基本原理和实现方法 +2. 了解不同网络结构对模型性能的影响 +3. 学习深度学习模型训练、评估和可视化的方法 +4. 通过对比实验,理解不同模型在图像分类任务中的优缺点 +5. 培养深度学习模型调优和问题解决的能力 + +## 实验准备 + +### 环境要求 + +- Python 3.6+ +- PyTorch 1.7+ +- NumPy, Matplotlib +- scikit-learn (用于评估) +- 建议使用GPU环境(可选) + +实验环境已经在mo平台中搭建好了,同学们无需自行配置 + +### 实验数据集 + +本实验使用CIFAR-10数据集,包含10个类别的彩色图像,每类6000张,共60000张32×32的图像。 + +### 项目结构 + +``` +项目根目录/ +├── models/ +│ ├── __init__.py +│ ├── mlp.py # MLP模型定义 +│ └── cnn.py # CNN模型定义 +├── utils/ +│ ├── __init__.py +│ ├── data_loader.py # 数据加载函数 +│ └── train_utils.py # 训练和评估函数 +├── train_all_notebook.py # 统一训练脚本 +└── compare_models.py # 模型比较脚本 +``` + +## 实验原理 + +### 多层感知机(MLP) + +多层感知机是一种前馈神经网络,由输入层、一个或多个隐藏层和输出层组成。MLP的主要特点是: + +1. 每层神经元与下一层全连接 +2. 使用非线性激活函数(如ReLU、Sigmoid等) +3. 通过反向传播算法进行训练 + +**思考问题1**: MLP在处理图像数据时面临哪些挑战?请从数据结构、参数量和特征提取能力三个角度分析。 + + +### 卷积神经网络(CNN) + +卷积神经网络是为处理具有网格状拓扑结构的数据而设计的神经网络,主要包含卷积层、池化层和全连接层。CNN的主要特点是: + +1. 局部连接:每个神经元只与输入数据的一个局部区域连接 +2. 权重共享:同一特征图的所有神经元共享相同的权重 +3. 多层次特征提取:低层检测边缘等简单特征,高层组合这些特征形成更复杂的表示 + +**思考问题2**: CNN相比MLP在处理图像时具有哪些优势?解释卷积操作如何保留图像的空间信息。 + + +## 实验内容 + +### 第一部分:基础MLP模型 + +#### 1.1 了解MLP模型结构 + +查看`models/mlp.py`文件,理解三种MLP模型的结构: +- `SimpleMLP`: 单隐层MLP +- `DeepMLP`: 多隐层MLP,带有BatchNorm和Dropout +- `ResidualMLP`: 带有残差连接的MLP + +**任务1**: 在下面的代码块中,实现一个具有两个隐藏层的MLP模型。第一隐藏层有128个神经元,第二隐藏层有64个神经元,输出层对应10个类别。使用ReLU激活函数,并添加BatchNorm和Dropout(0.3)。 + +```python +import torch.nn as nn + +class TwoLayerMLP(nn.Module): + def __init__(self, input_dim=3*32*32): + super(TwoLayerMLP, self).__init__() + self.flatten = nn.Flatten() + # 使用nn.Linear, nn.BatchNorm1d, nn.ReLU和nn.Dropout实现两个隐藏层 + + def forward(self, x): + x = self.flatten(x) + # 实现前向传播 + return x +``` + +#### 1.2 训练和评估MLP模型 + +1. 在 `train.ipynb` 中训练SimpleMLP模型,确保将`model_type`设置为`'simple_mlp'`。 + +2. 观察训练过程中的损失和准确率变化,以及最终在测试集上的性能。 + + **分析问题1**: 训练过程中,损失和准确率曲线表现如何?是否出现过拟合或欠拟合?简要分析可能的原因。 + + +3. 修改参数尝试训练DeepMLP模型,将`model_type`设置为`'deep_mlp'`。 + + **分析问题2**: 对比SimpleMLP和DeepMLP的性能,增加网络深度对性能有何影响? + + +### 第二部分:基础CNN模型 + +#### 2.1 了解CNN模型结构 + +查看`models/cnn.py`文件,理解不同CNN模型的结构: +- `SimpleCNN`: 简单的CNN,包含两个卷积层 +- `MediumCNN`: 中等复杂度的CNN,带有BatchNorm和Dropout +- `VGGStyleNet`: VGG风格的CNN,使用连续的3x3卷积 +- `SimpleResNet`: 简化的ResNet,包含残差连接 + +**任务2**: 修改下面的`SimpleCNN`代码,添加一个额外的卷积层和BatchNorm。新的卷积层应该在第二个池化层之后,卷积核数量为64,卷积核大小为3x3。 + +```python +class EnhancedCNN(nn.Module): + def __init__(self): + super(EnhancedCNN, self).__init__() + self.conv1 = nn.Conv2d(3, 16, kernel_size=3, padding=1) + self.conv2 = nn.Conv2d(16, 32, kernel_size=3, padding=1) + # 在这里添加一个新的卷积层、BatchNorm和相应的池化层 + self.pool = nn.MaxPool2d(2, 2) + self.flatten = nn.Flatten() + # 修改全连接层以适应新的特征图尺寸 + self.relu = nn.ReLU() + def forward(self, x): + # 实现包含新卷积层的前向传播 + return x +``` + +#### 2.2 训练和评估CNN模型 + +1. 在 `train.ipynb` 中训练SimpleMLP模型,确保将`model_type`设置为`'simple_cnn'`,并将`use_data_augmentation`设置为`True`。 + +2. 观察训练过程和卷积核可视化结果。 + + **分析问题3**: 卷积核可视化显示了什么模式?这些模式与图像中的哪些特征可能对应? + + +3. 继续训练MediumCNN模型,将`model_type`设置为`'medium_cnn'`。 + + **分析问题4**: CNN模型相比MLP在CIFAR-10上的性能有何不同?为什么会有这样的差异? + + + +### 第三部分:高级CNN架构探索 + +#### 3.1 VGG风格和ResNet风格网络架构 + +在本部分中,我们将探索两种影响深远的CNN架构:VGG和ResNet。通过理解这些经典架构的设计理念和特点,可以帮助我们设计更高效的神经网络。 + +##### 3.1.1 VGG架构特点 +VGG网络(由Visual Geometry Group开发)是一种非常简洁而有效的CNN架构,在2014年ImageNet挑战赛中取得了优异成绩。其主要特点包括: + +1. **简单统一的设计**:使用小尺寸(3×3)卷积核和2×2最大池化层 +2. **深度堆叠**:通过堆叠多个相同配置的卷积层增加网络深度 +3. **结构规整**:遵循"卷积层组-池化层"的模式,随着网络深入,特征图尺寸减小而通道数增加 + +在我们的实现中,`VGGStyleNet`采用了简化版的VGG设计理念,包含三个卷积块,每个块包含两个卷积层和一个池化层。 + +1. 在 `train.ipynb` 中训练SimpleMLP模型,确保将`model_type`设置为`'vgg_style'`,并将`use_data_augmentation`设置为`True`。 + +2. 观察网络的训练过程和性能。特别注意其收敛速度和最终准确率。 + +##### 3.1.2 ResNet架构及残差连接 + +ResNet(残差网络)由微软研究院的He等人在2015年提出,是解决"深度退化问题"的突破性架构。其核心创新是引入了残差连接(skip connection): + +1. **残差连接**:通过快捷连接(shortcut connection)将输入直接加到输出上,形成恒等映射路径 +2. **残差学习**:网络不再直接学习输入到输出的映射F(x),而是学习残差F(x)-x +3. **深度扩展**:残差连接有效缓解了梯度消失问题,使得训练非常深的网络成为可能 + +在我们的实现中,`SimpleResNet`使用了基本的残差块,每个残差块包含两个3×3的卷积层和一个跳跃连接。 + +1. 在 `train.ipynb` 中训练SimpleMLP模型,确保将`model_type`设置为`'resnet'`,并将`use_data_augmentation`设置为`True`。 + +2. 观察网络的训练过程和性能,特别是深度对训练稳定性的影响。 + +##### 3.1.3 Bottleneck结构 + +在更深的ResNet变体中,常使用"瓶颈"(Bottleneck)结构来降低计算复杂度: + +- 使用1×1卷积降低通道数(降维) +- 使用3×3卷积进行特征提取 +- 再使用1×1卷积恢复通道数(升维) + +这种设计大幅减少参数量和计算量,同时保持或提高性能。 + +**思考问题3**: 分析Bottleneck结构的优势。为什么1×1卷积在深度CNN中如此重要?它如何帮助控制网络的参数量和计算复杂度? + + +**探索问题1**: 查看`models/cnn.py`中的`SimpleResNet`实现,分析残差连接是如何实现的。如果输入和输出通道数不匹配,代码是如何处理的? + + + +#### 3.2 模型复杂度分析 + +不同CNN架构在性能和效率之间存在权衡。现在我们将通过分析不同模型的参数量和推理时间来理解这种权衡。 + +1. 运行以下代码来分析各个模型的复杂度: + ```python + from models import SimpleMLP, DeepMLP, ResidualMLP, SimpleCNN, MediumCNN, VGGStyleNet, SimpleResNet + from utils import model_complexity + import torch + + device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') + + models = { + 'SimpleMLP': SimpleMLP(), + 'DeepMLP': DeepMLP(), + 'SimpleCNN': SimpleCNN(), + 'MediumCNN': MediumCNN(), + 'VGGStyleNet': VGGStyleNet(), + 'SimpleResNet': SimpleResNet() + } + + results = {} + for name, model in models.items(): + print(f"\n分析{name}复杂度:") + params, time = model_complexity(model, device=device) + results[name] = {'params': params, 'time': time} + ``` + +2. 记录并比较各个模型的参数量和推理时间。 + +**分析问题5**: VGG风格和ResNet风格网络的性能比较。残差连接带来了哪些优势? + +**分析问题6**: 参数量和推理时间如何影响模型的实用性?如何在性能和效率之间找到平衡? + + +#### 3.3 理解高级CNN设计理念 + +随着深度学习的发展,CNN架构设计也变得更加精细和高效。以下是一些重要的设计理念: + +1. **网络深度与宽度平衡**:更深的网络能学习更抽象的特征,但也更难训练;更宽的网络(更多通道)能捕获更多特征,但参数量增加 +2. **跳跃连接**:除了ResNet的残差连接,还有DenseNet的密集连接、U-Net的跨层连接等 +3. **特征增强**:注意力机制(如SENet的通道注意力)、特征融合等 +4. **高效卷积设计**:深度可分离卷积(MobileNet)、组卷积(ShuffleNet)等 + +**探索问题2**: 如果你要为移动设备设计一个CNN模型,应该考虑哪些因素来权衡性能和效率?请提出至少三条具体的设计原则。 + + +### 第四部分:模型比较与分析 + +运行 `compare.ipynb` 来对比不同模型的性能: + +**综合分析**: 根据比较结果,分析不同类型模型(MLP和CNN)以及不同复杂度模型的性能差异。考虑以下几点: +1. 测试准确率 +2. 参数量 +3. 推理时间 +4. 训练收敛速度 +5. 过拟合/欠拟合情况 + + +## 创新探索任务(选做) + +选择下列一项或多项任务完成: + +1. **模型改进**:对任一模型进行修改和改进,提高其在CIFAR-10上的性能。 +2. **可视化分析**:设计更好的可视化方法来解释模型的决策过程。 +3. **迁移学习**:探索如何利用预训练模型提高CIFAR-10的分类性能。 +4. **对抗性样本**:生成对抗性样本,并研究不同模型对对抗性样本的鲁棒性。 +5. **自监督学习**:实现一个简单的自监督学习方法,并评估其效果。 + +## 实验报告要求 + +实验报告应包含以下内容: + +1. 实验目的和背景介绍 +2. 实验原理简述 +3. 实验过程描述 +4. 实现的代码(关键部分,包含详细注释) +5. 实验结果和分析(包括填写的所有分析问题和任务) +6. 创新探索任务的设计、实现和结果(如果选做) +7. 结论和思考 +8. 参考文献 + +## 评分标准 + +- 基础任务完成度:60% +- 分析问题深度和准确性:35% +- 创新探索任务:15% (bonus) +- 报告质量和表达清晰度:5% + +## 参考资料 + +1. LeCun, Y., Bengio, Y., & Hinton, G. (2015). Deep learning. Nature, 521(7553), 436-444. +2. He, K., Zhang, X., Ren, S., & Sun, J. (2016). Deep residual learning for image recognition. CVPR. +3. Simonyan, K., & Zisserman, A. (2014). Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556. +4. PyTorch文档:https://pytorch.org/docs/stable/index.html +5. CS231n: Convolutional Neural Networks for Visual Recognition:https://cs231n.github.io/ \ No newline at end of file diff --git a/Jianhai/lab5/Experiment Guide.md b/Jianhai/lab5/Experiment Guide.md deleted file mode 100644 index 1f33893..0000000 --- a/Jianhai/lab5/Experiment Guide.md +++ /dev/null @@ -1,305 +0,0 @@ -# 深度学习模型实验指导:MLP与CNN模型对比分析 - -## 实验概述 - -本实验旨在通过对多层感知机(MLP)和卷积神经网络(CNN)的实现、训练和评估,帮助学生深入理解两种模型的结构特点、性能差异以及适用场景。学生将从基础模型开始,逐步探索更复杂的网络架构,最终通过对比分析,掌握深度学习模型设计与评估的关键技能。 - -本实验的代码已经可以稳定运行。作业内容包括补全两个模型定义代码(MLP与CNN)以及回答一系列问题。两个补全任务的代码仅需在实验报告中体现即可。 - - - -## 实验目的 - -1. 掌握MLP和CNN的基本原理和实现方法 -2. 了解不同网络结构对模型性能的影响 -3. 学习深度学习模型训练、评估和可视化的方法 -4. 通过对比实验,理解不同模型在图像分类任务中的优缺点 -5. 培养深度学习模型调优和问题解决的能力 - -## 实验准备 - -### 环境要求 - -- Python 3.6+ -- PyTorch 1.7+ -- NumPy, Matplotlib -- scikit-learn (用于评估) -- 建议使用GPU环境(可选) - -实验环境已经在mo平台中搭建好了,同学们无需自行配置 - -### 实验数据集 - -本实验使用CIFAR-10数据集,包含10个类别的彩色图像,每类6000张,共60000张32×32的图像。 - -### 项目结构 - -``` -项目根目录/ -├── models/ -│ ├── __init__.py -│ ├── mlp.py # MLP模型定义 -│ └── cnn.py # CNN模型定义 -├── utils/ -│ ├── __init__.py -│ ├── data_loader.py # 数据加载函数 -│ └── train_utils.py # 训练和评估函数 -├── train_all_notebook.py # 统一训练脚本 -└── compare_models.py # 模型比较脚本 -``` - -## 实验原理 - -### 多层感知机(MLP) - -多层感知机是一种前馈神经网络,由输入层、一个或多个隐藏层和输出层组成。MLP的主要特点是: - -1. 每层神经元与下一层全连接 -2. 使用非线性激活函数(如ReLU、Sigmoid等) -3. 通过反向传播算法进行训练 - -**思考问题1**: MLP在处理图像数据时面临哪些挑战?请从数据结构、参数量和特征提取能力三个角度分析。 - - -### 卷积神经网络(CNN) - -卷积神经网络是为处理具有网格状拓扑结构的数据而设计的神经网络,主要包含卷积层、池化层和全连接层。CNN的主要特点是: - -1. 局部连接:每个神经元只与输入数据的一个局部区域连接 -2. 权重共享:同一特征图的所有神经元共享相同的权重 -3. 多层次特征提取:低层检测边缘等简单特征,高层组合这些特征形成更复杂的表示 - -**思考问题2**: CNN相比MLP在处理图像时具有哪些优势?解释卷积操作如何保留图像的空间信息。 - - -## 实验内容 - -### 第一部分:基础MLP模型 - -#### 1.1 了解MLP模型结构 - -查看`models/mlp.py`文件,理解三种MLP模型的结构: -- `SimpleMLP`: 单隐层MLP -- `DeepMLP`: 多隐层MLP,带有BatchNorm和Dropout -- `ResidualMLP`: 带有残差连接的MLP - -**任务1**: 在下面的代码块中,实现一个具有两个隐藏层的MLP模型。第一隐藏层有128个神经元,第二隐藏层有64个神经元,输出层对应10个类别。使用ReLU激活函数,并添加BatchNorm和Dropout(0.3)。 - -```python -import torch.nn as nn - -class TwoLayerMLP(nn.Module): - def __init__(self, input_dim=3*32*32): - super(TwoLayerMLP, self).__init__() - self.flatten = nn.Flatten() - # 使用nn.Linear, nn.BatchNorm1d, nn.ReLU和nn.Dropout实现两个隐藏层 - - def forward(self, x): - x = self.flatten(x) - # 实现前向传播 - return x -``` - -#### 1.2 训练和评估MLP模型 - -1. 在 `train.ipynb` 中训练SimpleMLP模型,确保将`model_type`设置为`'simple_mlp'`。 - -2. 观察训练过程中的损失和准确率变化,以及最终在测试集上的性能。 - - **分析问题1**: 训练过程中,损失和准确率曲线表现如何?是否出现过拟合或欠拟合?简要分析可能的原因。 - - -3. 修改参数尝试训练DeepMLP模型,将`model_type`设置为`'deep_mlp'`。 - - **分析问题2**: 对比SimpleMLP和DeepMLP的性能,增加网络深度对性能有何影响? - - -### 第二部分:基础CNN模型 - -#### 2.1 了解CNN模型结构 - -查看`models/cnn.py`文件,理解不同CNN模型的结构: -- `SimpleCNN`: 简单的CNN,包含两个卷积层 -- `MediumCNN`: 中等复杂度的CNN,带有BatchNorm和Dropout -- `VGGStyleNet`: VGG风格的CNN,使用连续的3x3卷积 -- `SimpleResNet`: 简化的ResNet,包含残差连接 - -**任务2**: 修改下面的`SimpleCNN`代码,添加一个额外的卷积层和BatchNorm。新的卷积层应该在第二个池化层之后,卷积核数量为64,卷积核大小为3x3。 - -```python -class EnhancedCNN(nn.Module): - def __init__(self): - super(EnhancedCNN, self).__init__() - self.conv1 = nn.Conv2d(3, 16, kernel_size=3, padding=1) - self.conv2 = nn.Conv2d(16, 32, kernel_size=3, padding=1) - # 在这里添加一个新的卷积层、BatchNorm和相应的池化层 - self.pool = nn.MaxPool2d(2, 2) - self.flatten = nn.Flatten() - # 修改全连接层以适应新的特征图尺寸 - self.relu = nn.ReLU() - def forward(self, x): - # 实现包含新卷积层的前向传播 - return x -``` - -#### 2.2 训练和评估CNN模型 - -1. 在 `train.ipynb` 中训练SimpleMLP模型,确保将`model_type`设置为`'simple_cnn'`,并将`use_data_augmentation`设置为`True`。 - -2. 观察训练过程和卷积核可视化结果。 - - **分析问题3**: 卷积核可视化显示了什么模式?这些模式与图像中的哪些特征可能对应? - - -3. 继续训练MediumCNN模型,将`model_type`设置为`'medium_cnn'`。 - - **分析问题4**: CNN模型相比MLP在CIFAR-10上的性能有何不同?为什么会有这样的差异? - - - -### 第三部分:高级CNN架构探索 - -#### 3.1 VGG风格和ResNet风格网络架构 - -在本部分中,我们将探索两种影响深远的CNN架构:VGG和ResNet。通过理解这些经典架构的设计理念和特点,可以帮助我们设计更高效的神经网络。 - -##### 3.1.1 VGG架构特点 -VGG网络(由Visual Geometry Group开发)是一种非常简洁而有效的CNN架构,在2014年ImageNet挑战赛中取得了优异成绩。其主要特点包括: - -1. **简单统一的设计**:使用小尺寸(3×3)卷积核和2×2最大池化层 -2. **深度堆叠**:通过堆叠多个相同配置的卷积层增加网络深度 -3. **结构规整**:遵循"卷积层组-池化层"的模式,随着网络深入,特征图尺寸减小而通道数增加 - -在我们的实现中,`VGGStyleNet`采用了简化版的VGG设计理念,包含三个卷积块,每个块包含两个卷积层和一个池化层。 - -1. 在 `train.ipynb` 中训练SimpleMLP模型,确保将`model_type`设置为`'vgg_style'`,并将`use_data_augmentation`设置为`True`。 - -2. 观察网络的训练过程和性能。特别注意其收敛速度和最终准确率。 - -##### 3.1.2 ResNet架构及残差连接 - -ResNet(残差网络)由微软研究院的He等人在2015年提出,是解决"深度退化问题"的突破性架构。其核心创新是引入了残差连接(skip connection): - -1. **残差连接**:通过快捷连接(shortcut connection)将输入直接加到输出上,形成恒等映射路径 -2. **残差学习**:网络不再直接学习输入到输出的映射F(x),而是学习残差F(x)-x -3. **深度扩展**:残差连接有效缓解了梯度消失问题,使得训练非常深的网络成为可能 - -在我们的实现中,`SimpleResNet`使用了基本的残差块,每个残差块包含两个3×3的卷积层和一个跳跃连接。 - -1. 在 `train.ipynb` 中训练SimpleMLP模型,确保将`model_type`设置为`'resnet'`,并将`use_data_augmentation`设置为`True`。 - -2. 观察网络的训练过程和性能,特别是深度对训练稳定性的影响。 - -##### 3.1.3 Bottleneck结构 - -在更深的ResNet变体中,常使用"瓶颈"(Bottleneck)结构来降低计算复杂度: - -- 使用1×1卷积降低通道数(降维) -- 使用3×3卷积进行特征提取 -- 再使用1×1卷积恢复通道数(升维) - -这种设计大幅减少参数量和计算量,同时保持或提高性能。 - -**思考问题3**: 分析Bottleneck结构的优势。为什么1×1卷积在深度CNN中如此重要?它如何帮助控制网络的参数量和计算复杂度? - - -**探索问题1**: 查看`models/cnn.py`中的`SimpleResNet`实现,分析残差连接是如何实现的。如果输入和输出通道数不匹配,代码是如何处理的? - - - -#### 3.2 模型复杂度分析 - -不同CNN架构在性能和效率之间存在权衡。现在我们将通过分析不同模型的参数量和推理时间来理解这种权衡。 - -1. 运行以下代码来分析各个模型的复杂度: - ```python - from models import SimpleMLP, DeepMLP, ResidualMLP, SimpleCNN, MediumCNN, VGGStyleNet, SimpleResNet - from utils import model_complexity - import torch - - device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') - - models = { - 'SimpleMLP': SimpleMLP(), - 'DeepMLP': DeepMLP(), - 'SimpleCNN': SimpleCNN(), - 'MediumCNN': MediumCNN(), - 'VGGStyleNet': VGGStyleNet(), - 'SimpleResNet': SimpleResNet() - } - - results = {} - for name, model in models.items(): - print(f"\n分析{name}复杂度:") - params, time = model_complexity(model, device=device) - results[name] = {'params': params, 'time': time} - ``` - -2. 记录并比较各个模型的参数量和推理时间。 - -**分析问题5**: VGG风格和ResNet风格网络的性能比较。残差连接带来了哪些优势? - -**分析问题6**: 参数量和推理时间如何影响模型的实用性?如何在性能和效率之间找到平衡? - - -#### 3.3 理解高级CNN设计理念 - -随着深度学习的发展,CNN架构设计也变得更加精细和高效。以下是一些重要的设计理念: - -1. **网络深度与宽度平衡**:更深的网络能学习更抽象的特征,但也更难训练;更宽的网络(更多通道)能捕获更多特征,但参数量增加 -2. **跳跃连接**:除了ResNet的残差连接,还有DenseNet的密集连接、U-Net的跨层连接等 -3. **特征增强**:注意力机制(如SENet的通道注意力)、特征融合等 -4. **高效卷积设计**:深度可分离卷积(MobileNet)、组卷积(ShuffleNet)等 - -**探索问题2**: 如果你要为移动设备设计一个CNN模型,应该考虑哪些因素来权衡性能和效率?请提出至少三条具体的设计原则。 - - -### 第四部分:模型比较与分析 - -运行 `compare.py` 来对比不同模型的性能: - -**综合分析**: 根据比较结果,分析不同类型模型(MLP和CNN)以及不同复杂度模型的性能差异。考虑以下几点: -1. 测试准确率 -2. 参数量 -3. 推理时间 -4. 训练收敛速度 -5. 过拟合/欠拟合情况 - - -## 创新探索任务(选做) - -选择下列一项或多项任务完成: - -1. **模型改进**:对任一模型进行修改和改进,提高其在CIFAR-10上的性能。 -2. **可视化分析**:设计更好的可视化方法来解释模型的决策过程。 -3. **迁移学习**:探索如何利用预训练模型提高CIFAR-10的分类性能。 -4. **对抗性样本**:生成对抗性样本,并研究不同模型对对抗性样本的鲁棒性。 -5. **自监督学习**:实现一个简单的自监督学习方法,并评估其效果。 - -## 实验报告要求 - -实验报告应包含以下内容: - -1. 实验目的和背景介绍 -2. 实验原理简述 -3. 实验过程描述 -4. 实现的代码(关键部分,包含详细注释) -5. 实验结果和分析(包括填写的所有分析问题和任务) -6. 创新探索任务的设计、实现和结果(如果选做) -7. 结论和思考 -8. 参考文献 - -## 评分标准 - -- 基础任务完成度:60% -- 分析问题深度和准确性:35% -- 创新探索任务:15% (bonus) -- 报告质量和表达清晰度:5% - -## 参考资料 - -1. LeCun, Y., Bengio, Y., & Hinton, G. (2015). Deep learning. Nature, 521(7553), 436-444. -2. He, K., Zhang, X., Ren, S., & Sun, J. (2016). Deep residual learning for image recognition. CVPR. -3. Simonyan, K., & Zisserman, A. (2014). Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556. -4. PyTorch文档:https://pytorch.org/docs/stable/index.html -5. CS231n: Convolutional Neural Networks for Visual Recognition:https://cs231n.github.io/ \ No newline at end of file diff --git a/Jianhai/lab5/checkpoints/DeepMLP_best.pth b/Jianhai/lab5/checkpoints/DeepMLP_best.pth new file mode 100644 index 0000000..596bcc0 Binary files /dev/null and b/Jianhai/lab5/checkpoints/DeepMLP_best.pth differ diff --git a/Jianhai/lab5/checkpoints/SimpleCNN_best.pth b/Jianhai/lab5/checkpoints/SimpleCNN_best.pth new file mode 100644 index 0000000..cd22815 Binary files /dev/null and b/Jianhai/lab5/checkpoints/SimpleCNN_best.pth differ diff --git a/Jianhai/lab5/checkpoints/SimpleMLP_best.pth b/Jianhai/lab5/checkpoints/SimpleMLP_best.pth new file mode 100644 index 0000000..15381c6 Binary files /dev/null and b/Jianhai/lab5/checkpoints/SimpleMLP_best.pth differ diff --git a/Jianhai/lab5/ck/SimpleCNN_best.pth b/Jianhai/lab5/ck/SimpleCNN_best.pth new file mode 100644 index 0000000..4788bf1 Binary files /dev/null and b/Jianhai/lab5/ck/SimpleCNN_best.pth differ diff --git a/Jianhai/lab5/ck/SimpleMLP_best.pth b/Jianhai/lab5/ck/SimpleMLP_best.pth new file mode 100644 index 0000000..8a3eab6 Binary files /dev/null and b/Jianhai/lab5/ck/SimpleMLP_best.pth differ diff --git a/Jianhai/lab5/compare.py b/Jianhai/lab5/compare.py new file mode 100644 index 0000000..45152d4 --- /dev/null +++ b/Jianhai/lab5/compare.py @@ -0,0 +1,321 @@ +import torch +import torch.nn as nn +import torch.optim as optim +import matplotlib.pyplot as plt +import numpy as np +import time +import os + +# 导入项目中的模块 +from models import SimpleMLP, DeepMLP, ResidualMLP, SimpleCNN, MediumCNN, VGGStyleNet, SimpleResNet +from utils import load_cifar10, set_seed + +def train_model(model, train_loader, valid_loader, criterion, optimizer, scheduler=None, + num_epochs=10, device=None, save_dir='./checkpoints'): + """训练模型并记录性能指标""" + if device is None: + device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') + + start_time = time.time() + model = model.to(device) + + history = { + 'train_loss': [], 'train_acc': [], + 'val_loss': [], 'val_acc': [], + 'epoch_times': [] + } + + best_val_acc = 0.0 + + # 确保保存目录存在 + os.makedirs(save_dir, exist_ok=True) + + for epoch in range(num_epochs): + epoch_start = time.time() + print(f"Epoch {epoch+1}/{num_epochs}") + + # 训练阶段 + model.train() + train_loss = 0.0 + train_correct = 0 + train_total = 0 + + for inputs, labels in train_loader: + inputs, labels = inputs.to(device), labels.to(device) + + # 梯度清零 + optimizer.zero_grad() + + # 前向传播 + outputs = model(inputs) + loss = criterion(outputs, labels) + + # 反向传播和优化 + loss.backward() + optimizer.step() + + # 统计 + train_loss += loss.item() * inputs.size(0) + _, predicted = torch.max(outputs, 1) + train_total += labels.size(0) + train_correct += (predicted == labels).sum().item() + + # 计算训练指标 + train_loss = train_loss / len(train_loader.sampler) + train_acc = train_correct / train_total + + # 验证阶段 + model.eval() + val_loss = 0.0 + val_correct = 0 + val_total = 0 + + with torch.no_grad(): + for inputs, labels in valid_loader: + inputs, labels = inputs.to(device), labels.to(device) + + # 前向传播 + outputs = model(inputs) + loss = criterion(outputs, labels) + + # 统计 + val_loss += loss.item() * inputs.size(0) + _, predicted = torch.max(outputs, 1) + val_total += labels.size(0) + val_correct += (predicted == labels).sum().item() + + # 计算验证指标 + val_loss = val_loss / len(valid_loader.sampler) + val_acc = val_correct / val_total + + # 更新学习率 + if scheduler: + scheduler.step() + + # 记录历史 + history['train_loss'].append(train_loss) + history['train_acc'].append(train_acc) + history['val_loss'].append(val_loss) + history['val_acc'].append(val_acc) + + # 记录每个epoch的时间 + epoch_end = time.time() + epoch_time = epoch_end - epoch_start + history['epoch_times'].append(epoch_time) + + # 保存最佳模型 + if val_acc > best_val_acc: + best_val_acc = val_acc + torch.save(model.state_dict(), f"{save_dir}/{model.__class__.__name__}_best.pth") + + print(f"训练损失: {train_loss:.4f}, 训练准确率: {train_acc:.4f}") + print(f"验证损失: {val_loss:.4f}, 验证准确率: {val_acc:.4f}") + print(f"本轮用时: {epoch_time:.2f}s") + print("-" * 50) + + # 计算总训练时间 + total_time = time.time() - start_time + print(f"总训练时间: {total_time:.2f}s") + + return model, history + +def evaluate_model(model, test_loader, criterion, device=None): + """评估模型在测试集上的性能""" + if device is None: + device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') + + model = model.to(device) + model.eval() + + test_loss = 0.0 + test_correct = 0 + test_total = 0 + + with torch.no_grad(): + for inputs, labels in test_loader: + inputs, labels = inputs.to(device), labels.to(device) + + # 前向传播 + outputs = model(inputs) + loss = criterion(outputs, labels) + + # 统计 + test_loss += loss.item() * inputs.size(0) + _, predicted = torch.max(outputs, 1) + test_total += labels.size(0) + test_correct += (predicted == labels).sum().item() + + # 计算测试指标 + test_loss = test_loss / len(test_loader.dataset) + test_acc = test_correct / test_total + + return test_loss, test_acc + +def model_complexity(model, input_size=(3, 32, 32), batch_size=128, device=None): + """计算模型参数量和推理时间""" + if device is None: + device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') + + model = model.to(device) + model.eval() + + # 计算参数量 + num_params = sum(p.numel() for p in model.parameters() if p.requires_grad) + + # 创建随机输入 + dummy_input = torch.randn(batch_size, *input_size).to(device) + + # 预热 + with torch.no_grad(): + for _ in range(10): + _ = model(dummy_input) + + # 计时 + start_time = time.time() + with torch.no_grad(): + for _ in range(100): + _ = model(dummy_input) + end_time = time.time() + + inference_time = (end_time - start_time) / 100 + + return num_params, inference_time + +def compare_models(): + """比较不同模型的性能""" + # 设置随机种子 + set_seed() + + # 检查是否有可用的GPU + device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') + print(f"使用设备: {device}") + + # 加载数据 + train_loader, valid_loader, test_loader, classes = load_cifar10( + use_augmentation=True, + batch_size=128 + ) + + # 定义要比较的模型 + models = { + 'SimpleMLP': SimpleMLP(), + 'DeepMLP': DeepMLP(dropout_rate=0.5, use_bn=True, use_dropout=True), + 'ResidualMLP': ResidualMLP(activation='relu'), + 'SimpleCNN': SimpleCNN(), + 'MediumCNN': MediumCNN(use_bn=True), + 'VGGStyleNet': VGGStyleNet(), + 'SimpleResNet': SimpleResNet(num_blocks=[2, 2, 2]) + } + + # 存储结果 + results = {} + + # 训练和评估每个模型 + for model_name, model in models.items(): + print(f"\n开始训练 {model_name}...") + + # 定义损失函数和优化器 + criterion = nn.CrossEntropyLoss() + optimizer = optim.Adam(model.parameters(), lr=0.001) + scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=15) + + # 计算模型复杂度 + print(f"\n分析 {model_name} 复杂度...") + num_params, inference_time = model_complexity(model, device=device) + + # 训练模型 + _, history = train_model( + model, train_loader, valid_loader, criterion, optimizer, scheduler, + num_epochs=15, device=device, save_dir='./checkpoints' + ) + + # 在测试集上评估模型 + test_loss, test_acc = evaluate_model(model, test_loader, criterion, device) + + print(f"{model_name} 测试准确率: {test_acc:.4f}") + + # 存储结果 + results[model_name] = { + 'history': history, + 'test_acc': test_acc, + 'params': num_params, + 'inf_time': inference_time + } + + # 比较模型性能 + model_names = list(results.keys()) + test_accs = [results[name]['test_acc'] for name in model_names] + params = [results[name]['params'] / 1e6 for name in model_names] # 转换为百万 + inf_times = [results[name]['inf_time'] * 1000 for name in model_names] # 转换为毫秒 + + # 创建比较图表 + fig, axes = plt.subplots(3, 1, figsize=(15, 15)) + + # 测试准确率比较 + ax = axes[0] + bars = ax.bar(model_names, test_accs, color='skyblue') + ax.set_title('Model Test Accuracy Comparison') # 英文标题 + ax.set_ylabel('Accuracy') # 英文标签 + ax.set_ylim(0, 1) + + # 添加数值标签 + for bar, acc in zip(bars, test_accs): + ax.text(bar.get_x() + bar.get_width()/2., bar.get_height() + 0.01, + f'{acc:.4f}', ha='center', va='bottom') + + # 参数量比较 + ax = axes[1] + bars = ax.bar(model_names, params, color='lightgreen') + ax.set_title('Model Parameter Count Comparison (millions)') # 英文标题 + ax.set_ylabel('Parameters (M)') # 英文标签 + + # 添加数值标签 + for bar, param in zip(bars, params): + ax.text(bar.get_x() + bar.get_width()/2., bar.get_height() + 0.1, + f'{param:.2f}M', ha='center', va='bottom') + + # 推理时间比较 + ax = axes[2] + bars = ax.bar(model_names, inf_times, color='salmon') + ax.set_title('Model Inference Time Comparison (ms/batch)') # 英文标题 + ax.set_ylabel('Inference time (ms)') # 英文标签 + + # 添加数值标签 + for bar, time in zip(bars, inf_times): + ax.text(bar.get_x() + bar.get_width()/2., bar.get_height() + 0.1, + f'{time:.2f}ms', ha='center', va='bottom') + + plt.tight_layout() + plt.savefig('model_comparison.png') + plt.show() + + # 绘制训练曲线比较 + fig, axes = plt.subplots(2, 1, figsize=(15, 10)) + + # 训练损失比较 + ax = axes[0] + for name in model_names: + ax.plot(results[name]['history']['train_loss'], label=f'{name} Training') + ax.plot(results[name]['history']['val_loss'], '--', label=f'{name} Validation') + ax.set_title('Training Loss Comparison') # 英文标题 + ax.set_xlabel('Epoch') # 英文标签 + ax.set_ylabel('Loss') # 英文标签 + ax.legend() + + # 验证准确率比较 + ax = axes[1] + for name in model_names: + ax.plot(results[name]['history']['val_acc'], label=name) + ax.set_title('Validation Accuracy Comparison') # 英文标题 + ax.set_xlabel('Epoch') # 英文标签 + ax.set_ylabel('Accuracy') # 英文标签 + ax.legend() + + plt.tight_layout() + plt.savefig('training_curves_comparison.png') + plt.show() + + return results + +if __name__ == "__main__": + results = compare_models() \ No newline at end of file diff --git a/Jianhai/lab5/data/cifar-10-batches-py/batches.meta b/Jianhai/lab5/data/cifar-10-batches-py/batches.meta new file mode 100644 index 0000000..4467a6e Binary files /dev/null and b/Jianhai/lab5/data/cifar-10-batches-py/batches.meta differ diff --git a/Jianhai/lab5/data/cifar-10-batches-py/data_batch_1 b/Jianhai/lab5/data/cifar-10-batches-py/data_batch_1 new file mode 100644 index 0000000..ab404a5 Binary files /dev/null and b/Jianhai/lab5/data/cifar-10-batches-py/data_batch_1 differ diff --git a/Jianhai/lab5/data/cifar-10-batches-py/data_batch_2 b/Jianhai/lab5/data/cifar-10-batches-py/data_batch_2 new file mode 100644 index 0000000..6bf1369 Binary files /dev/null and b/Jianhai/lab5/data/cifar-10-batches-py/data_batch_2 differ diff --git a/Jianhai/lab5/data/cifar-10-batches-py/data_batch_3 b/Jianhai/lab5/data/cifar-10-batches-py/data_batch_3 new file mode 100644 index 0000000..66a0d63 Binary files /dev/null and b/Jianhai/lab5/data/cifar-10-batches-py/data_batch_3 differ diff --git a/Jianhai/lab5/data/cifar-10-batches-py/data_batch_4 b/Jianhai/lab5/data/cifar-10-batches-py/data_batch_4 new file mode 100644 index 0000000..cf8d03d Binary files /dev/null and b/Jianhai/lab5/data/cifar-10-batches-py/data_batch_4 differ diff --git a/Jianhai/lab5/data/cifar-10-batches-py/data_batch_5 b/Jianhai/lab5/data/cifar-10-batches-py/data_batch_5 new file mode 100644 index 0000000..468b2aa Binary files /dev/null and b/Jianhai/lab5/data/cifar-10-batches-py/data_batch_5 differ diff --git a/Jianhai/lab5/data/cifar-10-batches-py/readme.html b/Jianhai/lab5/data/cifar-10-batches-py/readme.html new file mode 100644 index 0000000..e377ade --- /dev/null +++ b/Jianhai/lab5/data/cifar-10-batches-py/readme.html @@ -0,0 +1 @@ + diff --git a/Jianhai/lab5/data/cifar-10-batches-py/test_batch b/Jianhai/lab5/data/cifar-10-batches-py/test_batch new file mode 100644 index 0000000..3e03f1f Binary files /dev/null and b/Jianhai/lab5/data/cifar-10-batches-py/test_batch differ diff --git a/Jianhai/lab5/data/cifar-10-python.tar.gz b/Jianhai/lab5/data/cifar-10-python.tar.gz new file mode 100644 index 0000000..90c5365 Binary files /dev/null and b/Jianhai/lab5/data/cifar-10-python.tar.gz differ diff --git a/Jianhai/lab5/models/.ipynb_checkpoints/__init__-checkpoint.py b/Jianhai/lab5/models/.ipynb_checkpoints/__init__-checkpoint.py new file mode 100644 index 0000000..e69de29 diff --git a/Jianhai/lab5/models/.ipynb_checkpoints/cnn-checkpoint.py b/Jianhai/lab5/models/.ipynb_checkpoints/cnn-checkpoint.py new file mode 100644 index 0000000..e69de29 diff --git a/Jianhai/lab5/models/.ipynb_checkpoints/mlp-checkpoint.py b/Jianhai/lab5/models/.ipynb_checkpoints/mlp-checkpoint.py new file mode 100644 index 0000000..e69de29 diff --git a/Jianhai/lab5/models/__init__.py b/Jianhai/lab5/models/__init__.py new file mode 100644 index 0000000..34cc8d3 --- /dev/null +++ b/Jianhai/lab5/models/__init__.py @@ -0,0 +1,2 @@ +from .mlp import SimpleMLP, DeepMLP, ResidualMLP +from .cnn import SimpleCNN, MediumCNN, VGGStyleNet, SimpleResNet \ No newline at end of file diff --git a/Jianhai/lab5/models/cnn.py b/Jianhai/lab5/models/cnn.py new file mode 100644 index 0000000..e30c335 --- /dev/null +++ b/Jianhai/lab5/models/cnn.py @@ -0,0 +1,203 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +class SimpleCNN(nn.Module): + """简单的CNN模型,包含两个卷积层和两个全连接层""" + def __init__(self): + super(SimpleCNN, self).__init__() + self.conv1 = nn.Conv2d(3, 16, kernel_size=3, padding=1) + self.conv2 = nn.Conv2d(16, 32, kernel_size=3, padding=1) + self.pool = nn.MaxPool2d(2, 2) + self.fc1 = nn.Linear(32 * 8 * 8, 128) + self.fc2 = nn.Linear(128, 10) + self.relu = nn.ReLU() + + def forward(self, x): + x = self.pool(self.relu(self.conv1(x))) # 输出大小: 16x16x16 + x = self.pool(self.relu(self.conv2(x))) # 输出大小: 8x8x32 + x = x.view(-1, 32 * 8 * 8) # 展平 + x = self.relu(self.fc1(x)) + x = self.fc2(x) + return x + + +class MediumCNN(nn.Module): + """中等复杂度的CNN模型,包含批标准化和Dropout""" + def __init__(self, use_bn=True): + super(MediumCNN, self).__init__() + self.conv1 = nn.Conv2d(3, 32, kernel_size=3, padding=1) + self.bn1 = nn.BatchNorm2d(32) if use_bn else nn.Identity() + self.conv2 = nn.Conv2d(32, 32, kernel_size=3, padding=1) + self.bn2 = nn.BatchNorm2d(32) if use_bn else nn.Identity() + self.pool1 = nn.MaxPool2d(2, 2) + self.dropout1 = nn.Dropout(0.25) + + self.conv3 = nn.Conv2d(32, 64, kernel_size=3, padding=1) + self.bn3 = nn.BatchNorm2d(64) if use_bn else nn.Identity() + self.conv4 = nn.Conv2d(64, 64, kernel_size=3, padding=1) + self.bn4 = nn.BatchNorm2d(64) if use_bn else nn.Identity() + self.pool2 = nn.MaxPool2d(2, 2) + self.dropout2 = nn.Dropout(0.25) + + self.flatten = nn.Flatten() + self.fc1 = nn.Linear(64 * 8 * 8, 512) + self.bn5 = nn.BatchNorm1d(512) if use_bn else nn.Identity() + self.dropout3 = nn.Dropout(0.5) + self.fc2 = nn.Linear(512, 10) + self.relu = nn.ReLU() + + def forward(self, x): + x = self.relu(self.bn1(self.conv1(x))) + x = self.relu(self.bn2(self.conv2(x))) + x = self.pool1(x) + x = self.dropout1(x) + + x = self.relu(self.bn3(self.conv3(x))) + x = self.relu(self.bn4(self.conv4(x))) + x = self.pool2(x) + x = self.dropout2(x) + + x = self.flatten(x) + x = self.relu(self.bn5(self.fc1(x))) + x = self.dropout3(x) + x = self.fc2(x) + return x + + +class VGGStyleNet(nn.Module): + """VGG风格的CNN网络,使用连续的3x3卷积和池化""" + def __init__(self): + super(VGGStyleNet, self).__init__() + + # VGG风格:连续的3x3卷积 + 池化 + self.features = nn.Sequential( + # 第一块:64通道 + nn.Conv2d(3, 64, kernel_size=3, padding=1), + nn.BatchNorm2d(64), + nn.ReLU(inplace=True), + nn.Conv2d(64, 64, kernel_size=3, padding=1), + nn.BatchNorm2d(64), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=2, stride=2), + nn.Dropout(0.25), + + # 第二块:128通道 + nn.Conv2d(64, 128, kernel_size=3, padding=1), + nn.BatchNorm2d(128), + nn.ReLU(inplace=True), + nn.Conv2d(128, 128, kernel_size=3, padding=1), + nn.BatchNorm2d(128), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=2, stride=2), + nn.Dropout(0.25), + + # 第三块:256通道 + nn.Conv2d(128, 256, kernel_size=3, padding=1), + nn.BatchNorm2d(256), + nn.ReLU(inplace=True), + nn.Conv2d(256, 256, kernel_size=3, padding=1), + nn.BatchNorm2d(256), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=2, stride=2), + nn.Dropout(0.25), + ) + + # 分类器 + self.classifier = nn.Sequential( + nn.Flatten(), + nn.Linear(256 * 4 * 4, 512), + nn.BatchNorm1d(512), + nn.ReLU(inplace=True), + nn.Dropout(0.5), + nn.Linear(512, 10) + ) + + # 权重初始化 + self._initialize_weights() + + def forward(self, x): + x = self.features(x) + x = self.classifier(x) + return x + + def _initialize_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + if m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + nn.init.constant_(m.bias, 0) + + +class ResidualBlock(nn.Module): + """卷积神经网络的残差块""" + def __init__(self, in_channels, out_channels, stride=1): + super(ResidualBlock, self).__init__() + self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, + stride=stride, padding=1, bias=False) + self.bn1 = nn.BatchNorm2d(out_channels) + self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, + stride=1, padding=1, bias=False) + self.bn2 = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU(inplace=True) + + self.shortcut = nn.Sequential() + if stride != 1 or in_channels != out_channels: + self.shortcut = nn.Sequential( + nn.Conv2d(in_channels, out_channels, kernel_size=1, + stride=stride, bias=False), + nn.BatchNorm2d(out_channels) + ) + + def forward(self, x): + residual = x + + out = self.relu(self.bn1(self.conv1(x))) + out = self.bn2(self.conv2(out)) + out += self.shortcut(residual) + out = self.relu(out) + + return out + + +class SimpleResNet(nn.Module): + """简化版ResNet模型""" + def __init__(self, num_blocks=[2, 2, 2], num_classes=10): + super(SimpleResNet, self).__init__() + self.in_channels = 16 + + self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, + padding=1, bias=False) + self.bn1 = nn.BatchNorm2d(16) + self.relu = nn.ReLU(inplace=True) + + self.layer1 = self._make_layer(16, num_blocks[0], stride=1) + self.layer2 = self._make_layer(32, num_blocks[1], stride=2) + self.layer3 = self._make_layer(64, num_blocks[2], stride=2) + + self.avg_pool = nn.AdaptiveAvgPool2d((1, 1)) + self.fc = nn.Linear(64, num_classes) + + def _make_layer(self, out_channels, num_blocks, stride): + strides = [stride] + [1] * (num_blocks - 1) + layers = [] + for stride in strides: + layers.append(ResidualBlock(self.in_channels, out_channels, stride)) + self.in_channels = out_channels + return nn.Sequential(*layers) + + def forward(self, x): + x = self.relu(self.bn1(self.conv1(x))) + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.avg_pool(x) + x = x.view(x.size(0), -1) + x = self.fc(x) + return x \ No newline at end of file diff --git a/Jianhai/lab5/models/mlp.py b/Jianhai/lab5/models/mlp.py new file mode 100644 index 0000000..e9aaf72 --- /dev/null +++ b/Jianhai/lab5/models/mlp.py @@ -0,0 +1,151 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +class SimpleMLP(nn.Module): + """单隐层MLP模型""" + def __init__(self, input_dim=3*32*32, hidden_dim=512, output_dim=10): + super(SimpleMLP, self).__init__() + self.flatten = nn.Flatten() + self.fc1 = nn.Linear(input_dim, hidden_dim) + self.relu = nn.ReLU() + self.fc2 = nn.Linear(hidden_dim, output_dim) + + def forward(self, x): + x = self.flatten(x) + x = self.relu(self.fc1(x)) + x = self.fc2(x) + return x + + +class DeepMLP(nn.Module): + """深层MLP模型,具有多个隐藏层、批标准化和dropout""" + def __init__(self, input_dim=3*32*32, dropout_rate=0.5, use_bn=True, use_dropout=True): + super(DeepMLP, self).__init__() + self.flatten = nn.Flatten() + self.use_bn = use_bn + self.use_dropout = use_dropout + + # 第一层 + self.fc1 = nn.Linear(input_dim, 1024) + self.bn1 = nn.BatchNorm1d(1024) if use_bn else nn.Identity() + + # 第二层 + self.fc2 = nn.Linear(1024, 512) + self.bn2 = nn.BatchNorm1d(512) if use_bn else nn.Identity() + + # 第三层 + self.fc3 = nn.Linear(512, 256) + self.bn3 = nn.BatchNorm1d(256) if use_bn else nn.Identity() + + # 输出层 + self.fc4 = nn.Linear(256, 10) + + # 激活和Dropout + self.relu = nn.ReLU() + self.dropout = nn.Dropout(dropout_rate) if use_dropout else nn.Identity() + + def forward(self, x): + x = self.flatten(x) + + # 第一层 + x = self.fc1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.dropout(x) + + # 第二层 + x = self.fc2(x) + x = self.bn2(x) + x = self.relu(x) + x = self.dropout(x) + + # 第三层 + x = self.fc3(x) + x = self.bn3(x) + x = self.relu(x) + x = self.dropout(x) + + # 输出层 + x = self.fc4(x) + + return x + + +class ResidualBlock(nn.Module): + """MLP的残差块""" + def __init__(self, input_dim, output_dim, activation, dropout_rate=0.5): + super(ResidualBlock, self).__init__() + + self.linear1 = nn.Linear(input_dim, output_dim) + self.bn1 = nn.BatchNorm1d(output_dim) + self.linear2 = nn.Linear(output_dim, output_dim) + self.bn2 = nn.BatchNorm1d(output_dim) + + self.activation = activation + self.dropout = nn.Dropout(dropout_rate) + + # 如果输入维度不等于输出维度,添加一个线性变换 + self.shortcut = nn.Identity() + if input_dim != output_dim: + self.shortcut = nn.Sequential( + nn.Linear(input_dim, output_dim), + nn.BatchNorm1d(output_dim) + ) + + def forward(self, x): + residual = x + + out = self.linear1(x) + out = self.bn1(out) + out = self.activation(out) + out = self.dropout(out) + + out = self.linear2(out) + out = self.bn2(out) + + out += self.shortcut(residual) + out = self.activation(out) + + return out + + +class ResidualMLP(nn.Module): + """带有残差连接的MLP模型""" + def __init__(self, input_dim=3*32*32, hidden_dims=[1024, 1024, 1024, 512, 512, 512], output_dim=10, + dropout_rate=0.5, activation='relu'): + super(ResidualMLP, self).__init__() + self.flatten = nn.Flatten() + + # 选择激活函数 + if activation == 'relu': + self.activation = nn.ReLU() + elif activation == 'leaky_relu': + self.activation = nn.LeakyReLU(0.1) + elif activation == 'gelu': + self.activation = nn.GELU() + elif activation == 'swish': + self.activation = lambda x: x * torch.sigmoid(x) + else: + raise ValueError(f"不支持的激活函数: {activation}") + + # 输入层 + layers = [] + layers.append(nn.Linear(input_dim, hidden_dims[0])) + layers.append(nn.BatchNorm1d(hidden_dims[0])) + layers.append(self.activation) + layers.append(nn.Dropout(dropout_rate)) + + # 隐藏层,带残差连接 + for i in range(1, len(hidden_dims)): + layers.append(ResidualBlock(hidden_dims[i-1], hidden_dims[i], self.activation, dropout_rate)) + + # 输出层 + layers.append(nn.Linear(hidden_dims[-1], output_dim)) + + self.layers = nn.Sequential(*layers) + + def forward(self, x): + x = self.flatten(x) + x = self.layers(x) + return x \ No newline at end of file diff --git a/Jianhai/lab5/train.ipynb b/Jianhai/lab5/train.ipynb new file mode 100644 index 0000000..00ff25e --- /dev/null +++ b/Jianhai/lab5/train.ipynb @@ -0,0 +1,224 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "id": "4f3d7435", + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "import torch.nn as nn\n", + "import torch.optim as optim\n", + "import os\n", + "\n", + "# 导入项目中的模块\n", + "from models import SimpleMLP, DeepMLP, ResidualMLP, SimpleCNN, MediumCNN, VGGStyleNet, SimpleResNet\n", + "from utils import (\n", + " load_cifar10, \n", + " set_seed, \n", + " train_model, \n", + " evaluate_model, \n", + " plot_training_history,\n", + " visualize_model_predictions,\n", + " visualize_conv_filters,\n", + " model_complexity\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "9c8a2cb3", + "metadata": { + "inputHidden": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "使用设备: cpu\n", + "Files already downloaded and verified\n", + "Files already downloaded and verified\n", + "训练集大小: 45000\n", + "验证集大小: 5000\n", + "测试集大小: 10000\n", + "使用模型: SimpleMLP\n" + ] + } + ], + "source": [ + "# 设置参数\n", + "model_type = 'simple_mlp' # 可选: 'simple_mlp', 'deep_mlp', 'residual_mlp', 'simple_cnn', 'medium_cnn', 'vgg_style', 'resnet'\n", + "epochs = 20\n", + "learning_rate = 0.001\n", + "batch_size = 128\n", + "use_data_augmentation = True # CNN通常受益于数据增强\n", + "save_directory = './ck'\n", + "visualize_filters = True # 是否可视化卷积核(仅对CNN有效)\n", + "visualize_predictions = True # 是否可视化预测结果\n", + "\n", + "# 设置随机种子\n", + "set_seed()\n", + "\n", + "#因为mo平台的提交任务机制,需要手动切换到该文件夹下。\n", + "os.chdir(os.path.expanduser(\"~/work/Jianhai/lab5\"))\n", + "\n", + "# 检查是否有可用的GPU\n", + "device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n", + "print(f\"使用设备: {device}\")\n", + "\n", + "# 加载数据\n", + "train_loader, valid_loader, test_loader, classes = load_cifar10(\n", + " use_augmentation=use_data_augmentation, \n", + " batch_size=batch_size\n", + ")\n", + "\n", + "# 初始化选择的模型\n", + "if model_type == 'simple_mlp':\n", + " model = SimpleMLP()\n", + " model_name = \"SimpleMLP\"\n", + "elif model_type == 'deep_mlp':\n", + " model = DeepMLP(dropout_rate=0.5, use_bn=True, use_dropout=True)\n", + " model_name = \"DeepMLP\"\n", + "elif model_type == 'residual_mlp':\n", + " model = ResidualMLP(activation='relu')\n", + " model_name = \"ResidualMLP\"\n", + "elif model_type == 'simple_cnn':\n", + " model = SimpleCNN()\n", + " model_name = \"SimpleCNN\"\n", + "elif model_type == 'medium_cnn':\n", + " model = MediumCNN(use_bn=True)\n", + " model_name = \"MediumCNN\"\n", + "elif model_type == 'vgg_style':\n", + " model = VGGStyleNet()\n", + " model_name = \"VGGStyleNet\"\n", + "else: # resnet\n", + " model = SimpleResNet(num_blocks=[2, 2, 2])\n", + " model_name = \"SimpleResNet\"\n", + "\n", + "print(f\"使用模型: {model_name}\")" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "51f4362c", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "分析模型复杂度:\n", + "参数量: 1,578,506\n", + "每批次(128个样本)推理时间: 8.96ms\n", + "Epoch 1/20\n" + ] + }, + { + "ename": "KeyboardInterrupt", + "evalue": "", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m/tmp/ipykernel_246/3850660409.py\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 16\u001b[0m trained_model, history = train_model(\n\u001b[1;32m 17\u001b[0m \u001b[0mmodel\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtrain_loader\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mvalid_loader\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcriterion\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moptimizer\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mscheduler\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 18\u001b[0;31m \u001b[0mnum_epochs\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mepochs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdevice\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mdevice\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0msave_dir\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0msave_directory\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 19\u001b[0m )\n\u001b[1;32m 20\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/work/Jianhai/lab5/utils/train_utils.py\u001b[0m in \u001b[0;36mtrain_model\u001b[0;34m(model, train_loader, valid_loader, criterion, optimizer, scheduler, num_epochs, device, save_dir)\u001b[0m\n\u001b[1;32m 63\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 64\u001b[0m \u001b[0;31m# 反向传播和优化\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 65\u001b[0;31m \u001b[0mloss\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbackward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 66\u001b[0m \u001b[0moptimizer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstep\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 67\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/.virtualenvs/basenv/lib/python3.7/site-packages/torch/tensor.py\u001b[0m in \u001b[0;36mbackward\u001b[0;34m(self, gradient, retain_graph, create_graph, inputs)\u001b[0m\n\u001b[1;32m 243\u001b[0m \u001b[0mcreate_graph\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mcreate_graph\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 244\u001b[0m inputs=inputs)\n\u001b[0;32m--> 245\u001b[0;31m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mautograd\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbackward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mgradient\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mretain_graph\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcreate_graph\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minputs\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0minputs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 246\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 247\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mregister_hook\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhook\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/.virtualenvs/basenv/lib/python3.7/site-packages/torch/autograd/__init__.py\u001b[0m in \u001b[0;36mbackward\u001b[0;34m(tensors, grad_tensors, retain_graph, create_graph, grad_variables, inputs)\u001b[0m\n\u001b[1;32m 145\u001b[0m Variable._execution_engine.run_backward(\n\u001b[1;32m 146\u001b[0m \u001b[0mtensors\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mgrad_tensors_\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mretain_graph\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcreate_graph\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minputs\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 147\u001b[0;31m allow_unreachable=True, accumulate_grad=True) # allow_unreachable flag\n\u001b[0m\u001b[1;32m 148\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 149\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mKeyboardInterrupt\u001b[0m: " + ] + } + ], + "source": [ + "# 计算模型复杂度\n", + "print(\"\\n分析模型复杂度:\")\n", + "model_complexity(model, device=device)\n", + "\n", + "# 定义损失函数和优化器\n", + "criterion = nn.CrossEntropyLoss()\n", + "optimizer = optim.Adam(model.parameters(), lr=learning_rate)\n", + "\n", + "# 可以添加学习率调度器\n", + "scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=epochs)\n", + "\n", + "# 确保checkpoints目录存在\n", + "os.makedirs(save_directory, exist_ok=True)\n", + "\n", + "# 训练模型\n", + "trained_model, history = train_model(\n", + " model, train_loader, valid_loader, criterion, optimizer, scheduler,\n", + " num_epochs=epochs, device=device, save_dir=save_directory\n", + ")\n", + "\n", + "# 绘制训练历史\n", + "plot_training_history(history, title=f\"{model_name} Training History\")\n", + "\n", + "# 在测试集上评估模型\n", + "print(\"\\n在测试集上评估模型:\")\n", + "test_loss, test_acc = evaluate_model(trained_model, test_loader, criterion, device, classes)\n", + "\n", + "print(f\"{model_name} 最终测试准确率: {test_acc:.4f}\")\n", + "\n", + "# 如果是CNN模型并且需要可视化卷积核\n", + "if visualize_filters and model_type in ['simple_cnn', 'medium_cnn', 'vgg_style', 'resnet']:\n", + " print(\"\\n可视化卷积核:\")\n", + " if model_type == 'simple_cnn':\n", + " visualize_conv_filters(trained_model, 'conv1')\n", + " elif model_type == 'medium_cnn':\n", + " visualize_conv_filters(trained_model, 'conv1')\n", + " elif model_type == 'vgg_style':\n", + " visualize_conv_filters(trained_model, 'features.0')\n", + " else: # resnet\n", + " visualize_conv_filters(trained_model, 'conv1')\n", + "\n", + "# 如果需要可视化模型预测\n", + "if visualize_predictions:\n", + " print(\"\\n可视化模型预测:\")\n", + " visualize_model_predictions(trained_model, test_loader, classes, device)\n", + "\n", + "print(f\"\\n{model_name}的训练和评估已完成!\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d9379a62", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "554f08d9", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.5" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/Jianhai/lab5/utils/.ipynb_checkpoints/__init__-checkpoint.py b/Jianhai/lab5/utils/.ipynb_checkpoints/__init__-checkpoint.py new file mode 100644 index 0000000..cc95838 --- /dev/null +++ b/Jianhai/lab5/utils/.ipynb_checkpoints/__init__-checkpoint.py @@ -0,0 +1,9 @@ +from .data_loader import load_cifar10, visualize_samples, set_seed +from .train_utils import ( + train_model, + evaluate_model, + plot_training_history, + visualize_model_predictions, + visualize_conv_filters, + model_complexity +) \ No newline at end of file diff --git a/Jianhai/lab5/utils/.ipynb_checkpoints/data_loader-checkpoint.py b/Jianhai/lab5/utils/.ipynb_checkpoints/data_loader-checkpoint.py new file mode 100644 index 0000000..e69de29 diff --git a/Jianhai/lab5/utils/.ipynb_checkpoints/train_utils-checkpoint.py b/Jianhai/lab5/utils/.ipynb_checkpoints/train_utils-checkpoint.py new file mode 100644 index 0000000..0165203 --- /dev/null +++ b/Jianhai/lab5/utils/.ipynb_checkpoints/train_utils-checkpoint.py @@ -0,0 +1,374 @@ +import torch +import torch.nn as nn +import torch.optim as optim +import matplotlib.pyplot as plt +import numpy as np +import time +import os + +def train_model(model, train_loader, valid_loader, criterion, optimizer, scheduler=None, + num_epochs=10, device=None, save_dir='./checkpoints'): + """ + 训练模型并记录性能指标 + + 参数: + model: 要训练的模型 + train_loader, valid_loader: 训练和验证数据加载器 + criterion: 损失函数 + optimizer: 优化器 + scheduler: 学习率调度器(可选) + num_epochs: 训练轮数 + device: 使用的设备 + save_dir: 模型保存目录 + + 返回: + history: 包含训练历史的字典 + """ + if device is None: + device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') + + start_time = time.time() + model = model.to(device) + + history = { + 'train_loss': [], 'train_acc': [], + 'val_loss': [], 'val_acc': [], + 'epoch_times': [] + } + + best_val_acc = 0.0 + + # 确保保存目录存在 + os.makedirs(save_dir, exist_ok=True) + + for epoch in range(num_epochs): + epoch_start = time.time() + print(f"Epoch {epoch+1}/{num_epochs}") + + # 训练阶段 + model.train() + train_loss = 0.0 + train_correct = 0 + train_total = 0 + + for inputs, labels in train_loader: + inputs, labels = inputs.to(device), labels.to(device) + + # 梯度清零 + optimizer.zero_grad() + + # 前向传播 + outputs = model(inputs) + loss = criterion(outputs, labels) + + # 反向传播和优化 + loss.backward() + optimizer.step() + + # 统计 + train_loss += loss.item() * inputs.size(0) + _, predicted = torch.max(outputs, 1) + train_total += labels.size(0) + train_correct += (predicted == labels).sum().item() + + # 计算训练指标 + train_loss = train_loss / len(train_loader.sampler) + train_acc = train_correct / train_total + + # 验证阶段 + model.eval() + val_loss = 0.0 + val_correct = 0 + val_total = 0 + + with torch.no_grad(): + for inputs, labels in valid_loader: + inputs, labels = inputs.to(device), labels.to(device) + + # 前向传播 + outputs = model(inputs) + loss = criterion(outputs, labels) + + # 统计 + val_loss += loss.item() * inputs.size(0) + _, predicted = torch.max(outputs, 1) + val_total += labels.size(0) + val_correct += (predicted == labels).sum().item() + + # 计算验证指标 + val_loss = val_loss / len(valid_loader.sampler) + val_acc = val_correct / val_total + + # 更新学习率 + if scheduler: + scheduler.step() + + # 记录历史 + history['train_loss'].append(train_loss) + history['train_acc'].append(train_acc) + history['val_loss'].append(val_loss) + history['val_acc'].append(val_acc) + + # 记录每个epoch的时间 + epoch_end = time.time() + epoch_time = epoch_end - epoch_start + history['epoch_times'].append(epoch_time) + + # 如果是最佳模型,保存权重 + if val_acc > best_val_acc: + best_val_acc = val_acc + torch.save(model.state_dict(), f"{save_dir}/{model.__class__.__name__}_best.pth") + print(f"Model saved to {save_dir}/{model.__class__.__name__}_best.pth") + + print(f"Train Loss: {train_loss:.4f}, Train Accuracy: {train_acc:.4f}") + print(f"Val Loss: {val_loss:.4f}, Val Accuracy: {val_acc:.4f}") + print(f"Epoch Time: {epoch_time:.2f}s") + print("-" * 50) + + # 计算总训练时间 + total_time = time.time() - start_time + print(f"Total Training Time: {total_time:.2f}s") + + return model, history + +def evaluate_model(model, test_loader, criterion, device=None, classes=None): + """ + 评估模型在测试集上的性能 + + 参数: + model: 要评估的模型 + test_loader: 测试数据加载器 + criterion: 损失函数 + device: 使用的设备 + classes: 类别名称列表 + + 返回: + test_loss: 测试损失 + test_acc: 测试准确率 + """ + if device is None: + device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') + + model = model.to(device) + model.eval() + + test_loss = 0.0 + test_correct = 0 + test_total = 0 + + y_true = [] + y_pred = [] + + with torch.no_grad(): + for inputs, labels in test_loader: + inputs, labels = inputs.to(device), labels.to(device) + + # 前向传播 + outputs = model(inputs) + loss = criterion(outputs, labels) + + # 统计 + test_loss += loss.item() * inputs.size(0) + _, predicted = torch.max(outputs, 1) + test_total += labels.size(0) + test_correct += (predicted == labels).sum().item() + + # 收集真实标签和预测标签 + y_true.extend(labels.cpu().numpy()) + y_pred.extend(predicted.cpu().numpy()) + + # 计算测试指标 + test_loss = test_loss / len(test_loader.dataset) + test_acc = test_correct / test_total + + print(f"Test Loss: {test_loss:.4f}, Test Accuracy: {test_acc:.4f}") + + # 如果提供了类别名称,计算混淆矩阵 + if classes: + try: + from sklearn.metrics import confusion_matrix, classification_report + import seaborn as sns + + cm = confusion_matrix(y_true, y_pred) + plt.figure(figsize=(10, 8)) + sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', xticklabels=classes, yticklabels=classes) + plt.xlabel('Predicted') + plt.ylabel('True') + plt.title('Confusion Matrix') + plt.show() + + # 打印分类报告 + print("Classification Report:") + print(classification_report(y_true, y_pred, target_names=classes)) + except ImportError: + print("Warning: sklearn or seaborn not installed, cannot generate confusion matrix and classification report") + + return test_loss, test_acc + +def plot_training_history(history, title="Training History"): + """ + 绘制训练历史曲线 + + 参数: + history: 包含训练历史的字典 + title: 图表标题 + """ + plt.figure(figsize=(12, 5)) + + # 绘制损失曲线 + plt.subplot(1, 2, 1) + plt.plot(history['train_loss'], label='Training Loss') + plt.plot(history['val_loss'], label='Validation Loss') + plt.xlabel('Epoch') + plt.ylabel('Loss') + plt.title('Loss Curves') + plt.legend() + + # 绘制准确率曲线 + plt.subplot(1, 2, 2) + plt.plot(history['train_acc'], label='Training Accuracy') + plt.plot(history['val_acc'], label='Validation Accuracy') + plt.xlabel('Epoch') + plt.ylabel('Accuracy') + plt.title('Accuracy Curves') + plt.legend() + + plt.suptitle(title) + plt.tight_layout() + plt.savefig(f"{title.replace(' ', '_')}.png") + plt.show() + +def visualize_model_predictions(model, test_loader, classes, device=None, num_images=25): + """ + 可视化模型预测 + + 参数: + model: 要评估的模型 + test_loader: 测试数据加载器 + classes: 类别名称列表 + device: 使用的设备 + num_images: 要显示的图像数量 + """ + if device is None: + device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') + + model = model.to(device) + model.eval() + + # 获取batch数据 + images, labels = next(iter(test_loader)) + + with torch.no_grad(): + outputs = model(images.to(device)) + _, preds = torch.max(outputs, 1) + + # 将预测和标签转换为CPU上的numpy数组 + preds = preds.cpu().numpy() + labels = labels.numpy() + + # 计算display_grid的尺寸 + grid_size = int(np.ceil(np.sqrt(num_images))) + fig, axes = plt.subplots(grid_size, grid_size, figsize=(15, 15)) + + for i, ax in enumerate(axes.flat): + if i < min(num_images, len(preds)): + img = images[i].numpy().transpose((1, 2, 0)) + # 反标准化 + mean = np.array([0.4914, 0.4822, 0.4465]) + std = np.array([0.2023, 0.1994, 0.2010]) + img = std * img + mean + img = np.clip(img, 0, 1) + + ax.imshow(img) + color = "green" if preds[i] == labels[i] else "red" + ax.set_title(f"Predicted: {classes[preds[i]]}\nTrue: {classes[labels[i]]}", color=color) + ax.axis('off') + + plt.tight_layout() + plt.show() + +def visualize_conv_filters(model, layer_name='conv1'): + """ + 可视化卷积核 + + 参数: + model: 模型 + layer_name: 要可视化的卷积层名称 + """ + model.eval() + + # 获取指定层的权重 + for name, module in model.named_modules(): + if name == layer_name and isinstance(module, nn.Conv2d): + weights = module.weight.data.clone().cpu() + break + else: + print(f"Conv layer '{layer_name}' not found") + return + + # 规范化权重以便可视化 + weights = weights - weights.min() + weights = weights / weights.max() + + # 绘制卷积核 + num_filters = min(16, weights.size(0)) + fig, axes = plt.subplots(4, 4, figsize=(10, 10)) + fig.suptitle(f'Filters from {layer_name} layer') + + for i, ax in enumerate(axes.flat): + if i < num_filters: + # 如果是3通道的卷积核,直接显示RGB + if weights.size(1) == 3: + ax.imshow(weights[i].permute(1, 2, 0)) + else: + # 如果不是3通道,只显示第一个通道 + ax.imshow(weights[i, 0], cmap='viridis') + ax.axis('off') + + plt.tight_layout() + plt.show() + +def model_complexity(model, input_size=(3, 32, 32), batch_size=128, device=None): + """ + 计算模型参数量和推理时间 + + 参数: + model: 要评估的模型 + input_size: 输入尺寸 + batch_size: 批量大小 + device: 使用的设备 + + 返回: + num_params: 参数量 + inference_time: 每批次推理时间 + """ + if device is None: + device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') + + model = model.to(device) + model.eval() + + # 计算参数量 + num_params = sum(p.numel() for p in model.parameters() if p.requires_grad) + + # 创建随机输入 + dummy_input = torch.randn(batch_size, *input_size).to(device) + + # 预热 + with torch.no_grad(): + for _ in range(10): + _ = model(dummy_input) + + # 计时 + start_time = time.time() + with torch.no_grad(): + for _ in range(100): + _ = model(dummy_input) + end_time = time.time() + + inference_time = (end_time - start_time) / 100 + + print(f"Parameters: {num_params:,}") + print(f"Inference time per batch ({batch_size} samples): {inference_time*1000:.2f}ms") + + return num_params, inference_time \ No newline at end of file diff --git a/Jianhai/lab5/utils/__init__.py b/Jianhai/lab5/utils/__init__.py new file mode 100644 index 0000000..cc95838 --- /dev/null +++ b/Jianhai/lab5/utils/__init__.py @@ -0,0 +1,9 @@ +from .data_loader import load_cifar10, visualize_samples, set_seed +from .train_utils import ( + train_model, + evaluate_model, + plot_training_history, + visualize_model_predictions, + visualize_conv_filters, + model_complexity +) \ No newline at end of file diff --git a/Jianhai/lab5/utils/data_loader.py b/Jianhai/lab5/utils/data_loader.py new file mode 100644 index 0000000..1788547 --- /dev/null +++ b/Jianhai/lab5/utils/data_loader.py @@ -0,0 +1,169 @@ +import numpy as np +import matplotlib.pyplot as plt +import torch +from torch.utils.data import DataLoader, SubsetRandomSampler +from torchvision import datasets, transforms + +# 设置随机种子,确保实验可重复性 +def set_seed(seed=42): + """ + 设置随机种子,确保实验可重复性 + + 参数: + seed: 随机种子 + """ + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + +# 基本数据变换 - 只进行标准化 +basic_transform = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) +]) + +# 使用数据增强的变换 +augmented_transform = transforms.Compose([ + transforms.RandomCrop(32, padding=4), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) +]) + +def load_cifar10(use_augmentation=False, valid_size=0.1, batch_size=128, num_workers=2): + """ + 加载CIFAR-10数据集,并分割出验证集 + + 参数: + use_augmentation: 是否对训练集使用数据增强 + valid_size: 验证集比例 + batch_size: 批次大小 + num_workers: 数据加载器使用的工作进程数 + + 返回: + train_loader, valid_loader, test_loader: 数据加载器 + classes: 类别名称 + """ + transform = augmented_transform if use_augmentation else basic_transform + + # 加载训练数据 + train_dataset = datasets.CIFAR10( + root='./data', + train=True, + download=True, + transform=transform + ) + + # 加载测试数据 + test_dataset = datasets.CIFAR10( + root='./data', + train=False, + download=True, + transform=basic_transform + ) + + # 计算验证集大小 + num_train = len(train_dataset) + indices = list(range(num_train)) + np.random.shuffle(indices) + split = int(valid_size * num_train) + train_idx, valid_idx = indices[split:], indices[:split] + + # 创建数据采样器 + train_sampler = SubsetRandomSampler(train_idx) + valid_sampler = SubsetRandomSampler(valid_idx) + + # 创建数据加载器 + train_loader = DataLoader( + train_dataset, batch_size=batch_size, sampler=train_sampler, num_workers=num_workers + ) + valid_loader = DataLoader( + train_dataset, batch_size=batch_size, sampler=valid_sampler, num_workers=num_workers + ) + test_loader = DataLoader( + test_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers + ) + + print(f"训练集大小: {len(train_idx)}") + print(f"验证集大小: {len(valid_idx)}") + print(f"测试集大小: {len(test_dataset)}") + + # 获取类别名称 + classes = ('plane', 'car', 'bird', 'cat', 'deer', + 'dog', 'frog', 'horse', 'ship', 'truck') + + return train_loader, valid_loader, test_loader, classes + +def visualize_samples(dataloader, classes, num_samples=5): + """ + 可视化数据样本 + + 参数: + dataloader: 数据加载器 + classes: 类别名称 + num_samples: 每个类别要显示的样本数 + """ + # 获取batch数据 + images, labels = next(iter(dataloader)) + + # 创建样本计数器 + class_counts = {i: 0 for i in range(len(classes))} + indices = [] + + for i, label in enumerate(labels): + label = label.item() + if class_counts[label] < num_samples: + indices.append(i) + class_counts[label] += 1 + + # 如果所有类别都有足够的样本,则停止 + if all(count >= num_samples for count in class_counts.values()): + break + + # 获取选定的图像和标签 + selected_images = images[indices] + selected_labels = labels[indices] + + # 创建图像网格 + fig, axes = plt.subplots(10, num_samples, figsize=(15, 20)) + fig.subplots_adjust(hspace=0.5) + + # 对于每个类别 + for class_idx in range(len(classes)): + # 找到该类别的所有样本 + class_indices = [i for i, label in enumerate(selected_labels) if label == class_idx] + + for i in range(min(num_samples, len(class_indices))): + img_idx = class_indices[i] + img = selected_images[img_idx].numpy().transpose((1, 2, 0)) + # 反标准化 + mean = np.array([0.4914, 0.4822, 0.4465]) + std = np.array([0.2023, 0.1994, 0.2010]) + img = std * img + mean + img = np.clip(img, 0, 1) + + ax = axes[class_idx, i] + ax.imshow(img) + ax.set_title(classes[class_idx]) + ax.axis('off') + + plt.tight_layout() + plt.show() + +if __name__ == "__main__": + # 设置随机种子 + set_seed() + + # 检查是否有可用的GPU + device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') + print(f"使用设备: {device}") + + # 加载数据 + train_loader, valid_loader, test_loader, classes = load_cifar10(use_augmentation=False) + + # 可视化一些样本 + visualize_samples(train_loader, classes, num_samples=5) \ No newline at end of file diff --git a/Jianhai/lab5/utils/train_utils.py b/Jianhai/lab5/utils/train_utils.py new file mode 100644 index 0000000..80f7e59 --- /dev/null +++ b/Jianhai/lab5/utils/train_utils.py @@ -0,0 +1,374 @@ +import torch +import torch.nn as nn +import torch.optim as optim +import matplotlib.pyplot as plt +import numpy as np +import time +import os + +def train_model(model, train_loader, valid_loader, criterion, optimizer, scheduler=None, + num_epochs=10, device=None, save_dir='./checkpoints'): + """ + 训练模型并记录性能指标 + + 参数: + model: 要训练的模型 + train_loader, valid_loader: 训练和验证数据加载器 + criterion: 损失函数 + optimizer: 优化器 + scheduler: 学习率调度器(可选) + num_epochs: 训练轮数 + device: 使用的设备 + save_dir: 模型保存目录 + + 返回: + history: 包含训练历史的字典 + """ + if device is None: + device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') + + start_time = time.time() + model = model.to(device) + + history = { + 'train_loss': [], 'train_acc': [], + 'val_loss': [], 'val_acc': [], + 'epoch_times': [] + } + + best_val_acc = 0.0 + + # 确保保存目录存在 + os.makedirs(save_dir, exist_ok=True) + + for epoch in range(num_epochs): + epoch_start = time.time() + print(f"Epoch {epoch+1}/{num_epochs}") + + # 训练阶段 + model.train() + train_loss = 0.0 + train_correct = 0 + train_total = 0 + + for inputs, labels in train_loader: + inputs, labels = inputs.to(device), labels.to(device) + + # 梯度清零 + optimizer.zero_grad() + + # 前向传播 + outputs = model(inputs) + loss = criterion(outputs, labels) + + # 反向传播和优化 + loss.backward() + optimizer.step() + + # 统计 + train_loss += loss.item() * inputs.size(0) + _, predicted = torch.max(outputs, 1) + train_total += labels.size(0) + train_correct += (predicted == labels).sum().item() + + # 计算训练指标 + train_loss = train_loss / len(train_loader.sampler) + train_acc = train_correct / train_total + + # 验证阶段 + model.eval() + val_loss = 0.0 + val_correct = 0 + val_total = 0 + + with torch.no_grad(): + for inputs, labels in valid_loader: + inputs, labels = inputs.to(device), labels.to(device) + + # 前向传播 + outputs = model(inputs) + loss = criterion(outputs, labels) + + # 统计 + val_loss += loss.item() * inputs.size(0) + _, predicted = torch.max(outputs, 1) + val_total += labels.size(0) + val_correct += (predicted == labels).sum().item() + + # 计算验证指标 + val_loss = val_loss / len(valid_loader.sampler) + val_acc = val_correct / val_total + + # 更新学习率 + if scheduler: + scheduler.step() + + # 记录历史 + history['train_loss'].append(train_loss) + history['train_acc'].append(train_acc) + history['val_loss'].append(val_loss) + history['val_acc'].append(val_acc) + + # 记录每个epoch的时间 + epoch_end = time.time() + epoch_time = epoch_end - epoch_start + history['epoch_times'].append(epoch_time) + + # 如果是最佳模型,保存权重 + if val_acc > best_val_acc: + best_val_acc = val_acc + torch.save(model.state_dict(), f"{save_dir}/{model.__class__.__name__}_best.pth") + print(f"模型已保存到 {save_dir}/{model.__class__.__name__}_best.pth") + + print(f"训练损失: {train_loss:.4f}, 训练准确率: {train_acc:.4f}") + print(f"验证损失: {val_loss:.4f}, 验证准确率: {val_acc:.4f}") + print(f"本轮用时: {epoch_time:.2f}s") + print("-" * 50) + + # 计算总训练时间 + total_time = time.time() - start_time + print(f"总训练时间: {total_time:.2f}s") + + return model, history + +def evaluate_model(model, test_loader, criterion, device=None, classes=None): + """ + 评估模型在测试集上的性能 + + 参数: + model: 要评估的模型 + test_loader: 测试数据加载器 + criterion: 损失函数 + device: 使用的设备 + classes: 类别名称列表 + + 返回: + test_loss: 测试损失 + test_acc: 测试准确率 + """ + if device is None: + device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') + + model = model.to(device) + model.eval() + + test_loss = 0.0 + test_correct = 0 + test_total = 0 + + y_true = [] + y_pred = [] + + with torch.no_grad(): + for inputs, labels in test_loader: + inputs, labels = inputs.to(device), labels.to(device) + + # 前向传播 + outputs = model(inputs) + loss = criterion(outputs, labels) + + # 统计 + test_loss += loss.item() * inputs.size(0) + _, predicted = torch.max(outputs, 1) + test_total += labels.size(0) + test_correct += (predicted == labels).sum().item() + + # 收集真实标签和预测标签 + y_true.extend(labels.cpu().numpy()) + y_pred.extend(predicted.cpu().numpy()) + + # 计算测试指标 + test_loss = test_loss / len(test_loader.dataset) + test_acc = test_correct / test_total + + print(f"测试损失: {test_loss:.4f}, 测试准确率: {test_acc:.4f}") + + # 如果提供了类别名称,计算混淆矩阵 + if classes: + try: + from sklearn.metrics import confusion_matrix, classification_report + import seaborn as sns + + cm = confusion_matrix(y_true, y_pred) + plt.figure(figsize=(10, 8)) + sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', xticklabels=classes, yticklabels=classes) + plt.xlabel('Predicted') # 英文标签 + plt.ylabel('True') # 英文标签 + plt.title('Confusion Matrix') # 英文标题 + plt.show() + + # 打印分类报告 + print("分类报告:") + print(classification_report(y_true, y_pred, target_names=classes)) + except ImportError: + print("警告: 未安装sklearn或seaborn,无法生成混淆矩阵和分类报告") + + return test_loss, test_acc + +def plot_training_history(history, title="Training History"): + """ + 绘制训练历史曲线 + + 参数: + history: 包含训练历史的字典 + title: 图表标题 + """ + plt.figure(figsize=(12, 5)) + + # 绘制损失曲线 + plt.subplot(1, 2, 1) + plt.plot(history['train_loss'], label='Training Loss') # 英文标签 + plt.plot(history['val_loss'], label='Validation Loss') # 英文标签 + plt.xlabel('Epochs') # 英文标签 + plt.ylabel('Loss') # 英文标签 + plt.title('Loss Curves') # 英文标题 + plt.legend() + + # 绘制准确率曲线 + plt.subplot(1, 2, 2) + plt.plot(history['train_acc'], label='Training Accuracy') # 英文标签 + plt.plot(history['val_acc'], label='Validation Accuracy') # 英文标签 + plt.xlabel('Epochs') # 英文标签 + plt.ylabel('Accuracy') # 英文标签 + plt.title('Accuracy Curves') # 英文标题 + plt.legend() + + plt.suptitle(title) # 英文总标题 + plt.tight_layout() + plt.savefig(f"{title.replace(' ', '_')}.png") + plt.show() + +def visualize_model_predictions(model, test_loader, classes, device=None, num_images=25): + """ + 可视化模型预测 + + 参数: + model: 要评估的模型 + test_loader: 测试数据加载器 + classes: 类别名称列表 + device: 使用的设备 + num_images: 要显示的图像数量 + """ + if device is None: + device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') + + model = model.to(device) + model.eval() + + # 获取batch数据 + images, labels = next(iter(test_loader)) + + with torch.no_grad(): + outputs = model(images.to(device)) + _, preds = torch.max(outputs, 1) + + # 将预测和标签转换为CPU上的numpy数组 + preds = preds.cpu().numpy() + labels = labels.numpy() + + # 计算display_grid的尺寸 + grid_size = int(np.ceil(np.sqrt(num_images))) + fig, axes = plt.subplots(grid_size, grid_size, figsize=(15, 15)) + + for i, ax in enumerate(axes.flat): + if i < min(num_images, len(preds)): + img = images[i].numpy().transpose((1, 2, 0)) + # 反标准化 + mean = np.array([0.4914, 0.4822, 0.4465]) + std = np.array([0.2023, 0.1994, 0.2010]) + img = std * img + mean + img = np.clip(img, 0, 1) + + ax.imshow(img) + color = "green" if preds[i] == labels[i] else "red" + ax.set_title(f"Predicted: {classes[preds[i]]}\nTrue: {classes[labels[i]]}", color=color) # 英文标签 + ax.axis('off') + + plt.tight_layout() + plt.show() + +def visualize_conv_filters(model, layer_name='conv1'): + """ + 可视化卷积核 + + 参数: + model: 模型 + layer_name: 要可视化的卷积层名称 + """ + model.eval() + + # 获取指定层的权重 + for name, module in model.named_modules(): + if name == layer_name and isinstance(module, nn.Conv2d): + weights = module.weight.data.clone().cpu() + break + else: + print(f"未找到名为 {layer_name} 的卷积层") + return + + # 规范化权重以便可视化 + weights = weights - weights.min() + weights = weights / weights.max() + + # 绘制卷积核 + num_filters = min(16, weights.size(0)) + fig, axes = plt.subplots(4, 4, figsize=(10, 10)) + fig.suptitle(f'Conv Layer {layer_name} Filters') # 英文标题 + + for i, ax in enumerate(axes.flat): + if i < num_filters: + # 如果是3通道的卷积核,直接显示RGB + if weights.size(1) == 3: + ax.imshow(weights[i].permute(1, 2, 0)) + else: + # 如果不是3通道,只显示第一个通道 + ax.imshow(weights[i, 0], cmap='viridis') + ax.axis('off') + + plt.tight_layout() + plt.show() + +def model_complexity(model, input_size=(3, 32, 32), batch_size=128, device=None): + """ + 计算模型参数量和推理时间 + + 参数: + model: 要评估的模型 + input_size: 输入尺寸 + batch_size: 批量大小 + device: 使用的设备 + + 返回: + num_params: 参数量 + inference_time: 每批次推理时间 + """ + if device is None: + device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') + + model = model.to(device) + model.eval() + + # 计算参数量 + num_params = sum(p.numel() for p in model.parameters() if p.requires_grad) + + # 创建随机输入 + dummy_input = torch.randn(batch_size, *input_size).to(device) + + # 预热 + with torch.no_grad(): + for _ in range(10): + _ = model(dummy_input) + + # 计时 + start_time = time.time() + with torch.no_grad(): + for _ in range(100): + _ = model(dummy_input) + end_time = time.time() + + inference_time = (end_time - start_time) / 100 + + print(f"参数量: {num_params:,}") + print(f"每批次({batch_size}个样本)推理时间: {inference_time*1000:.2f}ms") + + return num_params, inference_time \ No newline at end of file diff --git a/Jianhai/lab5/实验指导.md b/Jianhai/lab5/实验指导.md new file mode 100644 index 0000000..1f33893 --- /dev/null +++ b/Jianhai/lab5/实验指导.md @@ -0,0 +1,305 @@ +# 深度学习模型实验指导:MLP与CNN模型对比分析 + +## 实验概述 + +本实验旨在通过对多层感知机(MLP)和卷积神经网络(CNN)的实现、训练和评估,帮助学生深入理解两种模型的结构特点、性能差异以及适用场景。学生将从基础模型开始,逐步探索更复杂的网络架构,最终通过对比分析,掌握深度学习模型设计与评估的关键技能。 + +本实验的代码已经可以稳定运行。作业内容包括补全两个模型定义代码(MLP与CNN)以及回答一系列问题。两个补全任务的代码仅需在实验报告中体现即可。 + + + +## 实验目的 + +1. 掌握MLP和CNN的基本原理和实现方法 +2. 了解不同网络结构对模型性能的影响 +3. 学习深度学习模型训练、评估和可视化的方法 +4. 通过对比实验,理解不同模型在图像分类任务中的优缺点 +5. 培养深度学习模型调优和问题解决的能力 + +## 实验准备 + +### 环境要求 + +- Python 3.6+ +- PyTorch 1.7+ +- NumPy, Matplotlib +- scikit-learn (用于评估) +- 建议使用GPU环境(可选) + +实验环境已经在mo平台中搭建好了,同学们无需自行配置 + +### 实验数据集 + +本实验使用CIFAR-10数据集,包含10个类别的彩色图像,每类6000张,共60000张32×32的图像。 + +### 项目结构 + +``` +项目根目录/ +├── models/ +│ ├── __init__.py +│ ├── mlp.py # MLP模型定义 +│ └── cnn.py # CNN模型定义 +├── utils/ +│ ├── __init__.py +│ ├── data_loader.py # 数据加载函数 +│ └── train_utils.py # 训练和评估函数 +├── train_all_notebook.py # 统一训练脚本 +└── compare_models.py # 模型比较脚本 +``` + +## 实验原理 + +### 多层感知机(MLP) + +多层感知机是一种前馈神经网络,由输入层、一个或多个隐藏层和输出层组成。MLP的主要特点是: + +1. 每层神经元与下一层全连接 +2. 使用非线性激活函数(如ReLU、Sigmoid等) +3. 通过反向传播算法进行训练 + +**思考问题1**: MLP在处理图像数据时面临哪些挑战?请从数据结构、参数量和特征提取能力三个角度分析。 + + +### 卷积神经网络(CNN) + +卷积神经网络是为处理具有网格状拓扑结构的数据而设计的神经网络,主要包含卷积层、池化层和全连接层。CNN的主要特点是: + +1. 局部连接:每个神经元只与输入数据的一个局部区域连接 +2. 权重共享:同一特征图的所有神经元共享相同的权重 +3. 多层次特征提取:低层检测边缘等简单特征,高层组合这些特征形成更复杂的表示 + +**思考问题2**: CNN相比MLP在处理图像时具有哪些优势?解释卷积操作如何保留图像的空间信息。 + + +## 实验内容 + +### 第一部分:基础MLP模型 + +#### 1.1 了解MLP模型结构 + +查看`models/mlp.py`文件,理解三种MLP模型的结构: +- `SimpleMLP`: 单隐层MLP +- `DeepMLP`: 多隐层MLP,带有BatchNorm和Dropout +- `ResidualMLP`: 带有残差连接的MLP + +**任务1**: 在下面的代码块中,实现一个具有两个隐藏层的MLP模型。第一隐藏层有128个神经元,第二隐藏层有64个神经元,输出层对应10个类别。使用ReLU激活函数,并添加BatchNorm和Dropout(0.3)。 + +```python +import torch.nn as nn + +class TwoLayerMLP(nn.Module): + def __init__(self, input_dim=3*32*32): + super(TwoLayerMLP, self).__init__() + self.flatten = nn.Flatten() + # 使用nn.Linear, nn.BatchNorm1d, nn.ReLU和nn.Dropout实现两个隐藏层 + + def forward(self, x): + x = self.flatten(x) + # 实现前向传播 + return x +``` + +#### 1.2 训练和评估MLP模型 + +1. 在 `train.ipynb` 中训练SimpleMLP模型,确保将`model_type`设置为`'simple_mlp'`。 + +2. 观察训练过程中的损失和准确率变化,以及最终在测试集上的性能。 + + **分析问题1**: 训练过程中,损失和准确率曲线表现如何?是否出现过拟合或欠拟合?简要分析可能的原因。 + + +3. 修改参数尝试训练DeepMLP模型,将`model_type`设置为`'deep_mlp'`。 + + **分析问题2**: 对比SimpleMLP和DeepMLP的性能,增加网络深度对性能有何影响? + + +### 第二部分:基础CNN模型 + +#### 2.1 了解CNN模型结构 + +查看`models/cnn.py`文件,理解不同CNN模型的结构: +- `SimpleCNN`: 简单的CNN,包含两个卷积层 +- `MediumCNN`: 中等复杂度的CNN,带有BatchNorm和Dropout +- `VGGStyleNet`: VGG风格的CNN,使用连续的3x3卷积 +- `SimpleResNet`: 简化的ResNet,包含残差连接 + +**任务2**: 修改下面的`SimpleCNN`代码,添加一个额外的卷积层和BatchNorm。新的卷积层应该在第二个池化层之后,卷积核数量为64,卷积核大小为3x3。 + +```python +class EnhancedCNN(nn.Module): + def __init__(self): + super(EnhancedCNN, self).__init__() + self.conv1 = nn.Conv2d(3, 16, kernel_size=3, padding=1) + self.conv2 = nn.Conv2d(16, 32, kernel_size=3, padding=1) + # 在这里添加一个新的卷积层、BatchNorm和相应的池化层 + self.pool = nn.MaxPool2d(2, 2) + self.flatten = nn.Flatten() + # 修改全连接层以适应新的特征图尺寸 + self.relu = nn.ReLU() + def forward(self, x): + # 实现包含新卷积层的前向传播 + return x +``` + +#### 2.2 训练和评估CNN模型 + +1. 在 `train.ipynb` 中训练SimpleMLP模型,确保将`model_type`设置为`'simple_cnn'`,并将`use_data_augmentation`设置为`True`。 + +2. 观察训练过程和卷积核可视化结果。 + + **分析问题3**: 卷积核可视化显示了什么模式?这些模式与图像中的哪些特征可能对应? + + +3. 继续训练MediumCNN模型,将`model_type`设置为`'medium_cnn'`。 + + **分析问题4**: CNN模型相比MLP在CIFAR-10上的性能有何不同?为什么会有这样的差异? + + + +### 第三部分:高级CNN架构探索 + +#### 3.1 VGG风格和ResNet风格网络架构 + +在本部分中,我们将探索两种影响深远的CNN架构:VGG和ResNet。通过理解这些经典架构的设计理念和特点,可以帮助我们设计更高效的神经网络。 + +##### 3.1.1 VGG架构特点 +VGG网络(由Visual Geometry Group开发)是一种非常简洁而有效的CNN架构,在2014年ImageNet挑战赛中取得了优异成绩。其主要特点包括: + +1. **简单统一的设计**:使用小尺寸(3×3)卷积核和2×2最大池化层 +2. **深度堆叠**:通过堆叠多个相同配置的卷积层增加网络深度 +3. **结构规整**:遵循"卷积层组-池化层"的模式,随着网络深入,特征图尺寸减小而通道数增加 + +在我们的实现中,`VGGStyleNet`采用了简化版的VGG设计理念,包含三个卷积块,每个块包含两个卷积层和一个池化层。 + +1. 在 `train.ipynb` 中训练SimpleMLP模型,确保将`model_type`设置为`'vgg_style'`,并将`use_data_augmentation`设置为`True`。 + +2. 观察网络的训练过程和性能。特别注意其收敛速度和最终准确率。 + +##### 3.1.2 ResNet架构及残差连接 + +ResNet(残差网络)由微软研究院的He等人在2015年提出,是解决"深度退化问题"的突破性架构。其核心创新是引入了残差连接(skip connection): + +1. **残差连接**:通过快捷连接(shortcut connection)将输入直接加到输出上,形成恒等映射路径 +2. **残差学习**:网络不再直接学习输入到输出的映射F(x),而是学习残差F(x)-x +3. **深度扩展**:残差连接有效缓解了梯度消失问题,使得训练非常深的网络成为可能 + +在我们的实现中,`SimpleResNet`使用了基本的残差块,每个残差块包含两个3×3的卷积层和一个跳跃连接。 + +1. 在 `train.ipynb` 中训练SimpleMLP模型,确保将`model_type`设置为`'resnet'`,并将`use_data_augmentation`设置为`True`。 + +2. 观察网络的训练过程和性能,特别是深度对训练稳定性的影响。 + +##### 3.1.3 Bottleneck结构 + +在更深的ResNet变体中,常使用"瓶颈"(Bottleneck)结构来降低计算复杂度: + +- 使用1×1卷积降低通道数(降维) +- 使用3×3卷积进行特征提取 +- 再使用1×1卷积恢复通道数(升维) + +这种设计大幅减少参数量和计算量,同时保持或提高性能。 + +**思考问题3**: 分析Bottleneck结构的优势。为什么1×1卷积在深度CNN中如此重要?它如何帮助控制网络的参数量和计算复杂度? + + +**探索问题1**: 查看`models/cnn.py`中的`SimpleResNet`实现,分析残差连接是如何实现的。如果输入和输出通道数不匹配,代码是如何处理的? + + + +#### 3.2 模型复杂度分析 + +不同CNN架构在性能和效率之间存在权衡。现在我们将通过分析不同模型的参数量和推理时间来理解这种权衡。 + +1. 运行以下代码来分析各个模型的复杂度: + ```python + from models import SimpleMLP, DeepMLP, ResidualMLP, SimpleCNN, MediumCNN, VGGStyleNet, SimpleResNet + from utils import model_complexity + import torch + + device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') + + models = { + 'SimpleMLP': SimpleMLP(), + 'DeepMLP': DeepMLP(), + 'SimpleCNN': SimpleCNN(), + 'MediumCNN': MediumCNN(), + 'VGGStyleNet': VGGStyleNet(), + 'SimpleResNet': SimpleResNet() + } + + results = {} + for name, model in models.items(): + print(f"\n分析{name}复杂度:") + params, time = model_complexity(model, device=device) + results[name] = {'params': params, 'time': time} + ``` + +2. 记录并比较各个模型的参数量和推理时间。 + +**分析问题5**: VGG风格和ResNet风格网络的性能比较。残差连接带来了哪些优势? + +**分析问题6**: 参数量和推理时间如何影响模型的实用性?如何在性能和效率之间找到平衡? + + +#### 3.3 理解高级CNN设计理念 + +随着深度学习的发展,CNN架构设计也变得更加精细和高效。以下是一些重要的设计理念: + +1. **网络深度与宽度平衡**:更深的网络能学习更抽象的特征,但也更难训练;更宽的网络(更多通道)能捕获更多特征,但参数量增加 +2. **跳跃连接**:除了ResNet的残差连接,还有DenseNet的密集连接、U-Net的跨层连接等 +3. **特征增强**:注意力机制(如SENet的通道注意力)、特征融合等 +4. **高效卷积设计**:深度可分离卷积(MobileNet)、组卷积(ShuffleNet)等 + +**探索问题2**: 如果你要为移动设备设计一个CNN模型,应该考虑哪些因素来权衡性能和效率?请提出至少三条具体的设计原则。 + + +### 第四部分:模型比较与分析 + +运行 `compare.py` 来对比不同模型的性能: + +**综合分析**: 根据比较结果,分析不同类型模型(MLP和CNN)以及不同复杂度模型的性能差异。考虑以下几点: +1. 测试准确率 +2. 参数量 +3. 推理时间 +4. 训练收敛速度 +5. 过拟合/欠拟合情况 + + +## 创新探索任务(选做) + +选择下列一项或多项任务完成: + +1. **模型改进**:对任一模型进行修改和改进,提高其在CIFAR-10上的性能。 +2. **可视化分析**:设计更好的可视化方法来解释模型的决策过程。 +3. **迁移学习**:探索如何利用预训练模型提高CIFAR-10的分类性能。 +4. **对抗性样本**:生成对抗性样本,并研究不同模型对对抗性样本的鲁棒性。 +5. **自监督学习**:实现一个简单的自监督学习方法,并评估其效果。 + +## 实验报告要求 + +实验报告应包含以下内容: + +1. 实验目的和背景介绍 +2. 实验原理简述 +3. 实验过程描述 +4. 实现的代码(关键部分,包含详细注释) +5. 实验结果和分析(包括填写的所有分析问题和任务) +6. 创新探索任务的设计、实现和结果(如果选做) +7. 结论和思考 +8. 参考文献 + +## 评分标准 + +- 基础任务完成度:60% +- 分析问题深度和准确性:35% +- 创新探索任务:15% (bonus) +- 报告质量和表达清晰度:5% + +## 参考资料 + +1. LeCun, Y., Bengio, Y., & Hinton, G. (2015). Deep learning. Nature, 521(7553), 436-444. +2. He, K., Zhang, X., Ren, S., & Sun, J. (2016). Deep residual learning for image recognition. CVPR. +3. Simonyan, K., & Zisserman, A. (2014). Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556. +4. PyTorch文档:https://pytorch.org/docs/stable/index.html +5. CS231n: Convolutional Neural Networks for Visual Recognition:https://cs231n.github.io/ \ No newline at end of file diff --git a/_OVERVIEW.md b/_OVERVIEW.md deleted file mode 100644 index b848eb0..0000000 --- a/_OVERVIEW.md +++ /dev/null @@ -1,5 +0,0 @@ -## 介绍 (Introduction) - -添加该项目的功能、使用场景和输入输出参数等相关信息。 - -You can describe the function, usage and parameters of the project. diff --git a/_README.ipynb b/_README.ipynb index 3e361e1..2a19098 100644 --- a/_README.ipynb +++ b/_README.ipynb @@ -135,7 +135,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.5.2" + "version": "3.7.5" }, "pycharm": { "stem_cell": { diff --git a/checkpoints/DeepMLP_best.pth b/checkpoints/DeepMLP_best.pth new file mode 100644 index 0000000..22ac67a Binary files /dev/null and b/checkpoints/DeepMLP_best.pth differ diff --git a/checkpoints/SimpleMLP_best.pth b/checkpoints/SimpleMLP_best.pth new file mode 100644 index 0000000..3043b00 Binary files /dev/null and b/checkpoints/SimpleMLP_best.pth differ diff --git a/coding_here.ipynb b/coding_here.ipynb deleted file mode 100644 index b6865ee..0000000 --- a/coding_here.ipynb +++ /dev/null @@ -1,34 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print('Hello Mo!')" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.5" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/data/cifar-10-batches-py/batches.meta b/data/cifar-10-batches-py/batches.meta new file mode 100644 index 0000000..4467a6e Binary files /dev/null and b/data/cifar-10-batches-py/batches.meta differ diff --git a/data/cifar-10-batches-py/data_batch_1 b/data/cifar-10-batches-py/data_batch_1 new file mode 100644 index 0000000..ab404a5 Binary files /dev/null and b/data/cifar-10-batches-py/data_batch_1 differ diff --git a/data/cifar-10-batches-py/data_batch_2 b/data/cifar-10-batches-py/data_batch_2 new file mode 100644 index 0000000..6bf1369 Binary files /dev/null and b/data/cifar-10-batches-py/data_batch_2 differ diff --git a/data/cifar-10-batches-py/data_batch_3 b/data/cifar-10-batches-py/data_batch_3 new file mode 100644 index 0000000..66a0d63 Binary files /dev/null and b/data/cifar-10-batches-py/data_batch_3 differ diff --git a/data/cifar-10-batches-py/data_batch_4 b/data/cifar-10-batches-py/data_batch_4 new file mode 100644 index 0000000..cf8d03d Binary files /dev/null and b/data/cifar-10-batches-py/data_batch_4 differ diff --git a/data/cifar-10-batches-py/data_batch_5 b/data/cifar-10-batches-py/data_batch_5 new file mode 100644 index 0000000..468b2aa Binary files /dev/null and b/data/cifar-10-batches-py/data_batch_5 differ diff --git a/data/cifar-10-batches-py/readme.html b/data/cifar-10-batches-py/readme.html new file mode 100644 index 0000000..e377ade --- /dev/null +++ b/data/cifar-10-batches-py/readme.html @@ -0,0 +1 @@ + diff --git a/data/cifar-10-batches-py/test_batch b/data/cifar-10-batches-py/test_batch new file mode 100644 index 0000000..3e03f1f Binary files /dev/null and b/data/cifar-10-batches-py/test_batch differ diff --git a/data/cifar-10-python.tar.gz b/data/cifar-10-python.tar.gz new file mode 100644 index 0000000..90c5365 Binary files /dev/null and b/data/cifar-10-python.tar.gz differ diff --git a/image/scikitlearn.jpg b/image/scikitlearn.jpg new file mode 100644 index 0000000..6366111 Binary files /dev/null and b/image/scikitlearn.jpg differ