{"id":4689,"date":"2022-07-10T18:12:17","date_gmt":"2022-07-10T10:12:17","guid":{"rendered":"http:\/\/139.9.1.231\/?p=4689"},"modified":"2022-08-28T10:14:29","modified_gmt":"2022-08-28T02:14:29","slug":"papaer_sum","status":"publish","type":"post","link":"http:\/\/139.9.1.231\/index.php\/2022\/07\/10\/papaer_sum\/","title":{"rendered":"\u8bba\u6587\u9605\u8bfb\u2014\u2014\u5408\u96c6(\u6301\u7eed\u66f4\u65b0)"},"content":{"rendered":"\n<h1>\u6587\u732e\u94fe\u63a5<\/h1>\n\n\n\n<h2><a href=\"https:\/\/github.com\/chenpaopao\/deep-learning-for-image-processing\/tree\/master\/article_link#%E5%9B%BE%E5%83%8F%E5%88%86%E7%B1%BBclassification\"><\/a>\u56fe\u50cf\u5206\u7c7b(Classification)<\/h2>\n\n\n\n<ul><li>LeNet&nbsp;<a href=\"http:\/\/yann.lecun.com\/exdb\/lenet\/index.html\">http:\/\/yann.lecun.com\/exdb\/lenet\/index.html<\/a><\/li><li>AlexNet&nbsp;<a href=\"http:\/\/papers.nips.cc\/paper\/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf\">http:\/\/papers.nips.cc\/paper\/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf<\/a><\/li><li>ZFNet(Visualizing and Understanding Convolutional Networks)&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/1311.2901\">https:\/\/arxiv.org\/abs\/1311.2901<\/a><\/li><li>VGG&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/1409.1556\">https:\/\/arxiv.org\/abs\/1409.1556<\/a><\/li><li>GoogLeNet, Inceptionv1(Going deeper with convolutions)&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/1409.4842\">https:\/\/arxiv.org\/abs\/1409.4842<\/a><\/li><li>Batch Normalization&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/1502.03167\">https:\/\/arxiv.org\/abs\/1502.03167<\/a><\/li><li>Inceptionv3(Rethinking the Inception Architecture for Computer Vision)&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/1512.00567\">https:\/\/arxiv.org\/abs\/1512.00567<\/a><\/li><li>Inceptionv4, Inception-ResNet&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/1602.07261\">https:\/\/arxiv.org\/abs\/1602.07261<\/a><\/li><li>Xception(Deep Learning with Depthwise Separable Convolutions)&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/1610.02357\">https:\/\/arxiv.org\/abs\/1610.02357<\/a><\/li><li>ResNet&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/1512.03385\">https:\/\/arxiv.org\/abs\/1512.03385<\/a><\/li><li>ResNeXt&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/1611.05431\">https:\/\/arxiv.org\/abs\/1611.05431<\/a><\/li><li>DenseNet&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/1608.06993\">https:\/\/arxiv.org\/abs\/1608.06993<\/a><\/li><li>NASNet-A(Learning Transferable Architectures for Scalable Image Recognition)&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/1707.07012\">https:\/\/arxiv.org\/abs\/1707.07012<\/a><\/li><li>SENet(Squeeze-and-Excitation Networks)&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/1709.01507\">https:\/\/arxiv.org\/abs\/1709.01507<\/a><\/li><li>MobileNet(v1)&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/1704.04861\">https:\/\/arxiv.org\/abs\/1704.04861<\/a><\/li><li>MobileNet(v2)&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/1801.04381\">https:\/\/arxiv.org\/abs\/1801.04381<\/a><\/li><li>MobileNet(v3)&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/1905.02244\">https:\/\/arxiv.org\/abs\/1905.02244<\/a><\/li><li>ShuffleNet(v1)&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/1707.01083\">https:\/\/arxiv.org\/abs\/1707.01083<\/a><\/li><li>ShuffleNet(v2)&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/1807.11164\">https:\/\/arxiv.org\/abs\/1807.11164<\/a><\/li><li>Bag of Tricks for Image Classification with Convolutional Neural Networks&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/1812.01187\">https:\/\/arxiv.org\/abs\/1812.01187<\/a><\/li><li>EfficientNet(v1)&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/1905.11946\">https:\/\/arxiv.org\/abs\/1905.11946<\/a><\/li><li>EfficientNet(v2)&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/2104.00298\">https:\/\/arxiv.org\/abs\/2104.00298<\/a><\/li><li>CSPNet&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/1911.11929\">https:\/\/arxiv.org\/abs\/1911.11929<\/a><\/li><li>RegNet&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/2003.13678\">https:\/\/arxiv.org\/abs\/2003.13678<\/a><\/li><li>NFNets(High-Performance Large-Scale Image Recognition Without Normalization)&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/2102.06171\">https:\/\/arxiv.org\/abs\/2102.06171<\/a><\/li><li>Vision Transformer&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/2010.11929\">https:\/\/arxiv.org\/abs\/2010.11929<\/a><\/li><li>DeiT(Training data-efficient image transformers )&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/2012.12877\">https:\/\/arxiv.org\/abs\/2012.12877<\/a><\/li><li>Swin Transformer&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/2103.14030\">https:\/\/arxiv.org\/abs\/2103.14030<\/a><\/li><li>Swin Transformer V2: Scaling Up Capacity and Resolution&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/2111.09883\">https:\/\/arxiv.org\/abs\/2111.09883<\/a><\/li><li>BEiT: BERT Pre-Training of Image Transformers&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/2106.08254\">https:\/\/arxiv.org\/abs\/2106.08254<\/a><\/li><li>MAE(Masked Autoencoders Are Scalable Vision Learners)&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/2111.06377\">https:\/\/arxiv.org\/abs\/2111.06377<\/a><\/li><li>ConvNeXt(A ConvNet for the 2020s)&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/2201.03545\">https:\/\/arxiv.org\/abs\/2201.03545<\/a><\/li><li>MobileViT(v1)&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/2110.02178\">https:\/\/arxiv.org\/abs\/2110.02178<\/a><\/li><li>MobileOne(An Improved One millisecond Mobile Backbone)&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/2206.04040\">https:\/\/arxiv.org\/abs\/2206.04040<\/a><\/li><\/ul>\n\n\n\n<h2><a href=\"https:\/\/github.com\/chenpaopao\/deep-learning-for-image-processing\/tree\/master\/article_link#%E7%9B%AE%E6%A0%87%E6%A3%80%E6%B5%8Bobject-detection\"><\/a>\u76ee\u6807\u68c0\u6d4b(Object Detection)<\/h2>\n\n\n\n<ul><li>R-CNN&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/1311.2524\">https:\/\/arxiv.org\/abs\/1311.2524<\/a><\/li><li>Fast R-CNN&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/1504.08083\">https:\/\/arxiv.org\/abs\/1504.08083<\/a><\/li><li>Faster R-CNN&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/1506.01497\">https:\/\/arxiv.org\/abs\/1506.01497<\/a><\/li><li>Cascade R-CNN: Delving into High Quality Object Detection&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/1712.00726\">https:\/\/arxiv.org\/abs\/1712.00726<\/a><\/li><li>Mask R-CNN&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/1703.06870\">https:\/\/arxiv.org\/abs\/1703.06870<\/a><\/li><li>SSD&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/1512.02325\">https:\/\/arxiv.org\/abs\/1512.02325<\/a><\/li><li>FPN(Feature Pyramid Networks for Object Detection)&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/1612.03144\">https:\/\/arxiv.org\/abs\/1612.03144<\/a><\/li><li>RetinaNet(Focal Loss for Dense Object Detection)&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/1708.02002\">https:\/\/arxiv.org\/abs\/1708.02002<\/a><\/li><li>Bag of Freebies for Training Object Detection Neural Networks&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/1902.04103\">https:\/\/arxiv.org\/abs\/1902.04103<\/a><\/li><li>YOLOv1&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/1506.02640\">https:\/\/arxiv.org\/abs\/1506.02640<\/a><\/li><li>YOLOv2&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/1612.08242\">https:\/\/arxiv.org\/abs\/1612.08242<\/a><\/li><li>YOLOv3&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/1804.02767\">https:\/\/arxiv.org\/abs\/1804.02767<\/a><\/li><li>YOLOv4&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/2004.10934\">https:\/\/arxiv.org\/abs\/2004.10934<\/a><\/li><li>YOLOX(Exceeding YOLO Series in 2021)&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/2107.08430\">https:\/\/arxiv.org\/abs\/2107.08430<\/a><\/li><li>PP-YOLO&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/2007.12099\">https:\/\/arxiv.org\/abs\/2007.12099<\/a><\/li><li>PP-YOLOv2&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/2104.10419\">https:\/\/arxiv.org\/abs\/2104.10419<\/a><\/li><li>CornerNet&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/1808.01244\">https:\/\/arxiv.org\/abs\/1808.01244<\/a><\/li><li>FCOS(Old)&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/1904.01355\">https:\/\/arxiv.org\/abs\/1904.01355<\/a><\/li><li>FCOS(New)&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/2006.09214\">https:\/\/arxiv.org\/abs\/2006.09214<\/a><\/li><li>CenterNet&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/1904.07850\">https:\/\/arxiv.org\/abs\/1904.07850<\/a><\/li><\/ul>\n\n\n\n<h2><a href=\"https:\/\/github.com\/chenpaopao\/deep-learning-for-image-processing\/tree\/master\/article_link#%E8%AF%AD%E4%B9%89%E5%88%86%E5%89%B2semantic-segmentation\"><\/a>\u8bed\u4e49\u5206\u5272(Semantic Segmentation)<\/h2>\n\n\n\n<ul><li>FCN(Fully Convolutional Networks for Semantic Segmentation)&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/1411.4038\">https:\/\/arxiv.org\/abs\/1411.4038<\/a><\/li><li>UNet(U-Net: Convolutional Networks for Biomedical Image Segmentation)&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/1505.04597\">https:\/\/arxiv.org\/abs\/1505.04597<\/a><\/li><li>DeepLabv1(Semantic Image Segmentation with Deep Convolutional Nets and Fully Connected CRFs)&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/1412.7062\">https:\/\/arxiv.org\/abs\/1412.7062<\/a><\/li><li>DeepLabv2(Semantic Image Segmentation with Deep Convolutional Nets, Atrous Convolution, and Fully Connected CRFs)&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/1606.00915\">https:\/\/arxiv.org\/abs\/1606.00915<\/a><\/li><li>DeepLabv3(Rethinking Atrous Convolution for Semantic Image Segmentation)&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/1706.05587\">https:\/\/arxiv.org\/abs\/1706.05587<\/a><\/li><li>DeepLabv3+(Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation)&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/1802.02611\">https:\/\/arxiv.org\/abs\/1802.02611<\/a><\/li><\/ul>\n\n\n\n<h2><a href=\"https:\/\/github.com\/chenpaopao\/deep-learning-for-image-processing\/tree\/master\/article_link#%E5%AE%9E%E4%BE%8B%E5%88%86%E5%89%B2instance-segmentation\"><\/a>\u5b9e\u4f8b\u5206\u5272(Instance Segmentation)<\/h2>\n\n\n\n<ul><li>Mask R-CNN&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/1703.06870\">https:\/\/arxiv.org\/abs\/1703.06870<\/a><\/li><\/ul>\n\n\n\n<h2><a href=\"https:\/\/github.com\/chenpaopao\/deep-learning-for-image-processing\/tree\/master\/article_link#%E5%85%B3%E9%94%AE%E7%82%B9%E6%A3%80%E6%B5%8Bkeypoint-detection\"><\/a>\u5173\u952e\u70b9\u68c0\u6d4b(Keypoint Detection)<\/h2>\n\n\n\n<ul><li>HRNet(Deep High-Resolution Representation Learning for Human Pose Estimation)&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/1902.09212\">https:\/\/arxiv.org\/abs\/1902.09212<\/a><\/li><\/ul>\n\n\n\n<h2><a href=\"https:\/\/github.com\/chenpaopao\/deep-learning-for-image-processing\/tree\/master\/article_link#%E8%87%AA%E7%84%B6%E8%AF%AD%E8%A8%80%E5%A4%84%E7%90%86\"><\/a>\u81ea\u7136\u8bed\u8a00\u5904\u7406<\/h2>\n\n\n\n<ul><li>Attention Is All You Need&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/1706.03762\">https:\/\/arxiv.org\/abs\/1706.03762<\/a><\/li><\/ul>\n\n\n\n<h2><a href=\"https:\/\/github.com\/chenpaopao\/deep-learning-for-image-processing\/tree\/master\/article_link#others\"><\/a>Others<\/h2>\n\n\n\n<ul><li>Microsoft COCO: Common Objects in Context&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/1405.0312\">https:\/\/arxiv.org\/abs\/1405.0312<\/a><\/li><li>The PASCALVisual Object Classes Challenge: A Retrospective&nbsp;<a href=\"http:\/\/host.robots.ox.ac.uk\/pascal\/VOC\/pubs\/everingham15.pdf\">http:\/\/host.robots.ox.ac.uk\/pascal\/VOC\/pubs\/everingham15.pdf<\/a><\/li><li>Grad-CAM: Visual Explanations from Deep Networks via Gradient-based Localization&nbsp;<a href=\"https:\/\/arxiv.org\/abs\/1610.02391\">https:\/\/arxiv.org\/abs\/1610.02391<\/a><\/li><\/ul>\n","protected":false},"excerpt":{"rendered":"<p>\u6587\u732e\u94fe\u63a5 \u56fe\u50cf\u5206\u7c7b(Classification) LeNet&nbsp;http:\/\/yann.lecun. &hellip; <a href=\"http:\/\/139.9.1.231\/index.php\/2022\/07\/10\/papaer_sum\/\" class=\"more-link\">\u7ee7\u7eed\u9605\u8bfb<span class=\"screen-reader-text\">\u8bba\u6587\u9605\u8bfb\u2014\u2014\u5408\u96c6(\u6301\u7eed\u66f4\u65b0)<\/span><\/a><\/p>\n","protected":false},"author":1,"featured_media":0,"comment_status":"open","ping_status":"open","sticky":false,"template":"","format":"standard","meta":[],"categories":[9],"tags":[],"_links":{"self":[{"href":"http:\/\/139.9.1.231\/index.php\/wp-json\/wp\/v2\/posts\/4689"}],"collection":[{"href":"http:\/\/139.9.1.231\/index.php\/wp-json\/wp\/v2\/posts"}],"about":[{"href":"http:\/\/139.9.1.231\/index.php\/wp-json\/wp\/v2\/types\/post"}],"author":[{"embeddable":true,"href":"http:\/\/139.9.1.231\/index.php\/wp-json\/wp\/v2\/users\/1"}],"replies":[{"embeddable":true,"href":"http:\/\/139.9.1.231\/index.php\/wp-json\/wp\/v2\/comments?post=4689"}],"version-history":[{"count":2,"href":"http:\/\/139.9.1.231\/index.php\/wp-json\/wp\/v2\/posts\/4689\/revisions"}],"predecessor-version":[{"id":4691,"href":"http:\/\/139.9.1.231\/index.php\/wp-json\/wp\/v2\/posts\/4689\/revisions\/4691"}],"wp:attachment":[{"href":"http:\/\/139.9.1.231\/index.php\/wp-json\/wp\/v2\/media?parent=4689"}],"wp:term":[{"taxonomy":"category","embeddable":true,"href":"http:\/\/139.9.1.231\/index.php\/wp-json\/wp\/v2\/categories?post=4689"},{"taxonomy":"post_tag","embeddable":true,"href":"http:\/\/139.9.1.231\/index.php\/wp-json\/wp\/v2\/tags?post=4689"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}