@article{Seong-Yoon Shin_Gwanghyun Jo_Guangxing Wang_2023, title={A Novel Method for Fashion Clothing Image Classification Based on Deep Learning}, volume={22}, url={https://e-journal.uum.edu.my/index.php/jict/article/view/18162}, DOI={10.32890/jict2023.22.1.6}, abstractNote={<p>Image recognition and classification is a significant research topic in computational vision and widely used computer technology. The<br />methods often used in image classification and recognition tasks are based on deep learning, like Convolutional Neural Networks<br />(CNNs), LeNet, and Long Short-Term Memory networks (LSTM). Unfortunately, the classification accuracy of these methods is<br />unsatisfactory. In recent years, using large-scale deep learning networks to achieve image recognition and classification can<br />improve classification accuracy, such as VGG16 and Residual Network (ResNet). However, due to the deep network hierarchy<br />and complex parameter settings, these models take more time in the training phase, especially when the sample number is small, which can easily lead to overfitting. This paper suggested a deep learning-based image classification technique based on a CNN model and improved convolutional and pooling layers. Furthermore, the study adopted the approximate dynamic learning rate update algorithm in the model training to realize the learning rate’s self-adaptation, ensure the model’s rapid convergence, and shorten the training time. Using the proposed model, an experiment was conducted on the Fashion-MNIST dataset, taking 6,000 images as the training dataset and 1,000 images as the testing dataset. In actual experiments, the classification accuracy of the suggested method was 93 percent, 4.6 percent higher than that of the basic CNN model. Simultaneously, the study compared the influence of the batch size of model training on classification accuracy. Experimental outcomes showed this model is very generalized in fashion clothing image classification tasks. </p>}, number={1}, journal={Journal of Information and Communication Technology}, author={Seong-Yoon Shin and Gwanghyun Jo and Guangxing Wang}, year={2023}, month={Jan.}, pages={127–148} }