Advertisement

测试了一下keras和mxnet的速度

阅读量:

这两个都很好用啊,适合我这样的入门小白

win10 64 cuda8.0 cudnn5.1 gtx1060

cnn mnist

复制代码
 import numpy

    
 import os
    
 import urllib
    
 import gzip
    
 import struct
    
 def read_data(label_name, image_name):
    
     s=os.getenv('DATA')
    
     with gzip.open(os.getenv('DATA')+'\ MNIST\ '+label_name) as flbl:
    
     magic, num = struct.unpack(">II", flbl.read(8))
    
     label = numpy.fromstring(flbl.read(), dtype=numpy.int8)
    
     with gzip.open(os.getenv('DATA')+'\ MNIST\ '+image_name, 'rb') as fimg:
    
     magic, num, rows, cols = struct.unpack(">IIII", fimg.read(16))
    
     image = numpy.fromstring(fimg.read(), dtype=numpy.uint8).reshape(len(label), rows, cols)
    
     return (label, image)
    
 (train_lbl, train_img) = read_data('train-labels-idx1-ubyte.gz', 'train-images-idx3-ubyte.gz')
    
 (val_lbl, val_img) = read_data('t10k-labels-idx1-ubyte.gz','t10k-images-idx3-ubyte.gz')
    
 def to4d(img):
    
     return img.reshape(img.shape[0], 1, 28, 28).astype(numpy.float32)/255
    
 def repack_data(d):
    
     t = numpy.zeros((d.size, 10))
    
     for i in range(d.size):
    
     t[i][d[i]] = 1
    
     return t
    
 train_img=to4d(train_img)
    
 val_img=to4d(val_img)
    
 batch_size = 100
    
 num_epoch =5
    
 #backend='mxnet'
    
 backend='keras'
    
 if backend=='keras':
    
     from keras.models import *
    
     from keras.layers import *
    
     from keras.optimizers import *
    
     model = Sequential()
    
     model.add(Convolution2D(64, 5, 5, input_shape=(1,28,28), init='uniform', activation='relu'))
    
     model.add(MaxPooling2D())
    
     model.add(Convolution2D(128, 5, 5, init='uniform', activation='relu'))
    
     model.add(MaxPooling2D())
    
     model.add(Flatten())
    
     model.add(Dense(1024, init='uniform', activation='relu'))
    
     model.add(Dense(1024, init='uniform', activation='relu'))
    
     model.add(Dense(10, init='uniform', activation='softmax'))
    
     model.summary()
    
     model.compile(loss='categorical_crossentropy', optimizer=adadelta(), metrics=['accuracy'])
    
     model.fit(train_img,repack_data(train_lbl),batch_size=batch_size,nb_epoch=num_epoch,validation_data=(val_img,repack_data(val_lbl)))
    
 else:
    
     import mxnet
    
     train_iter = mxnet.io.NDArrayIter(train_img, train_lbl, batch_size, shuffle=True)
    
     val_iter = mxnet.io.NDArrayIter(val_img, val_lbl, batch_size)
    
     data = mxnet.symbol.Variable('data')
    
     conv1 = mxnet.sym.Convolution(data=data, kernel=(5, 5), num_filter=64)
    
     relu1 = mxnet.sym.Activation(data=conv1, act_type="relu")
    
     pool1 = mxnet.sym.Pooling(data=relu1, pool_type="max", kernel=(2, 2), stride=(2, 2))
    
     conv2 = mxnet.sym.Convolution(data=pool1, kernel=(5, 5), num_filter=128)
    
     relu2 = mxnet.sym.Activation(data=conv2, act_type="relu")
    
     pool2 = mxnet.sym.Pooling(data=relu2, pool_type="max", kernel=(2, 2), stride=(2, 2))
    
     flatten = mxnet.sym.Flatten(data=pool2)
    
     fc1 = mxnet.symbol.FullyConnected(data=flatten, num_hidden=1024)
    
     relu3 = mxnet.sym.Activation(data=fc1, act_type="relu")
    
     fc2 = mxnet.symbol.FullyConnected(data=relu3, num_hidden=1024)
    
     relu4 = mxnet.sym.Activation(data=fc2, act_type="relu")
    
     fc3 = mxnet.sym.FullyConnected(data=relu4, num_hidden=10)
    
     net = mxnet.sym.SoftmaxOutput(data=fc3, name='softmax')
    
     mxnet.viz.plot_network(symbol=net, shape= {"data" : (batch_size, 1, 28, 28)}).render('mxnet')
    
     model = mxnet.model.FeedForward(
    
     ctx=mxnet.gpu(0),  # use GPU 0 for training, others are same as before
    
     symbol=net,
    
     num_epoch=num_epoch,
    
     learning_rate=0.1,
    
     optimizer='AdaDelta',
    
     initializer=mxnet.initializer.Uniform())
    
     import logging
    
     logging.getLogger().setLevel(logging.DEBUG)
    
     model.fit(
    
     X=train_iter,
    
     eval_data=val_iter,
    
     batch_end_callback=mxnet.callback.Speedometer(batch_size, 200)
    
     )

Layer (type) Output Shape Param # Connected to

convolution2d_1 (Convolution2D) (None, 64, 24, 24) 1664 convolution2d_input_1[0][0]


maxpooling2d_1 (MaxPooling2D) (None, 64, 12, 12) 0 convolution2d_1[0][0]


convolution2d_2 (Convolution2D) (None, 128, 8, 8) 204928 maxpooling2d_1[0][0]


maxpooling2d_2 (MaxPooling2D) (None, 128, 4, 4) 0 convolution2d_2[0][0]


flatten_1 (Flatten) (None, 2048) 0 maxpooling2d_2[0][0]


dense_1 (Dense) (None, 1024) 2098176 flatten_1[0][0]


dense_2 (Dense) (None, 1024) 1049600 dense_1[0][0]


dense_3 (Dense) (None, 10) 10250 dense_2[0][0]

Total params: 3364618


keras+theano

采用了6万组样本进行训练,并在1万组样本上进行验证

mxnet

INFO:root:During the training process, GPU(0) was utilized effectively.
INFO:root:During epoch 0, batch 200 completed with a processing speed of 2960.54 samples per second and achieved a training accuracy of 84.56%.
INFO:root:During epoch 1, batch processing began at 2878.78 samples per second, resulting in a higher training accuracy of approximately 97.51%.
INFO:root:During epoch 1, validation accuracy reached an impressive level of about 98.67%.
INFO:root:Different epochs exhibited varying performance metrics throughout the training cycles.

我对keras的速度表示认可,并且觉得它运行得相当流畅。基本上来说,在同类硬件中已经达到了预期的性能水平。此外,在GPU使用方面也相当稳定大都处于满负荷状态

但是theano后端的编译速度好慢好慢好慢!

MXNet运行缓慢。MXNet耗时三倍。运行官方示例时发现其速度较GTX 980低一半,请确认配置是否存在问题。

不过我发现mxnet训练的时候cpu一直是100,可能是这个原因。。。。

悲伤的故事

全部评论 (0)

还没有任何评论哟~