In [1]:
# sec: lib
import numpy as np
import chainer
import chainer.links as L
import chainer.functions as F
# sec: ver
chainer.print_runtime_info()
In [2]:
class MyConvNet(chainer.Chain):
def __init__(self):
super(MyConvNet, self).__init__()
# パラメータを持つ層の登録
with self.init_scope():
self.c1 = L.Convolution2D(None, 20, ksize=3, stride=2, pad=0)
self.c2 = L.Convolution2D(None, 40, ksize=3, stride=2, pad=0)
self.l1 = L.Linear(None, 100)
self.l2 = L.Linear(None, 100)
self.l3 = L.Linear(None, 40)
def forward(self, x):
# データを受け取った際のforward計算を書く
# sec: conv
print("x", x.shape, np.prod(x.shape[1:]))
h = F.relu(self.c1(x))
print("conv1", h.shape, np.prod(h.shape[1:]))
h = F.relu(self.c2(h))
print("conv2", h.shape, np.prod(h.shape[1:]))
# sec: fc
h = F.relu(self.l1(h))
print("fc1", h.shape)
h = F.relu(self.l2(h))
print("fc2", h.shape)
h = self.l3(h)
print("fc3", h.shape)
return h
In [3]:
n_batch = 128
n_ch = 3
n_height = 20
n_width = 120
# sec: NNモデルの出力値を計算
model = MyConvNet()
x = np.zeros((n_batch, n_ch, n_height, n_width))
y = model(x)
print("y", y.shape)
In [4]:
n_batch = 128
n_ch = 3
n_height = 20
n_width = 120
# sec: NNモデルの出力値を計算
model = MyConvNet()
x = np.zeros((n_batch, n_ch, n_height, n_width), dtype="float32")
y = model(x)
print("y", y.shape)
In [5]:
import numpy as np
import chainer
import chainer.links as L
import chainer.functions as F
chainer.print_runtime_info()
In [6]:
class MyConvNet(chainer.Chain):
def __init__(self):
super(MyConvNet, self).__init__()
# パラメータを持つ層の登録
with self.init_scope():
self.c1 = L.Convolution2D(None, 20, ksize=3, stride=2, pad=0)
self.c2 = L.Convolution2D(None, 40, ksize=3, stride=2, pad=0)
self.l1 = L.Linear(None, 100)
self.l2 = L.Linear(None, 100)
self.l3 = L.Linear(None, 40)
def forward(self, x):
# データを受け取った際のforward計算を書く
# sec: conv
print("x", x.shape, np.prod(x.shape[1:]))
h = F.relu(self.c1(x))
print("conv1", h.shape, np.prod(h.shape[1:]))
h = F.relu(self.c2(h))
print("conv2", h.shape, np.prod(h.shape[1:]))
# sec: fc
h = F.relu(self.l1(h))
print("fc1", h.shape)
h = F.relu(self.l2(h))
print("fc2", h.shape)
h = self.l3(h)
print("fc3", h.shape)
return h
In [7]:
n_batch = 128
n_ch = 1
n_height = 20
n_width = 120
# sec: NNモデルの出力値を計算
model = MyConvNet()
x = np.zeros((n_batch, n_ch, n_height, n_width), dtype="float32")
y = model(x)
print("y", y.shape)
In [8]:
# sec: 勾配を計算
loss = F.softmax_cross_entropy(y, np.zeros(n_batch))
print(loss)
model.cleargrads()
loss.backward()
In [10]:
# sec: 勾配を計算
loss = F.softmax_cross_entropy(y, np.zeros(n_batch, dtype="int32"))
print(loss)
model.cleargrads()
loss.backward()
In [5]:
import numpy as np
import chainer
import chainer.links as L
import chainer.functions as F
chainer.print_runtime_info()
In [21]:
class MyConvNet(chainer.Chain):
def __init__(self):
super(MyConvNet, self).__init__()
# パラメータを持つ層の登録
with self.init_scope():
self.c1 = L.Convolution2D(None, 20, ksize=5, stride=5, pad=0)
self.c2 = L.Convolution2D(None, 40, ksize=5, stride=5, pad=0)
self.l1 = L.Linear(None, 100)
self.l2 = L.Linear(None, 100)
self.l3 = L.Linear(None, 40)
def forward(self, x):
# データを受け取った際のforward計算を書く
# sec: conv
print("x", x.shape, np.prod(x.shape[1:]))
h = F.relu(self.c1(x))
print("conv1", h.shape, np.prod(h.shape[1:]))
h = F.relu(self.c2(h))
print("conv2", h.shape, np.prod(h.shape[1:]))
# sec: fc
h = F.relu(self.l1(h))
print("fc1", h.shape)
h = F.relu(self.l2(h))
print("fc2", h.shape)
h = self.l3(h)
print("fc3", h.shape)
return h
In [22]:
n_batch = 128
n_ch = 1
n_height = 20
n_width = 120
# sec: NNモデルの出力値を計算
model = MyConvNet()
x = np.zeros((n_batch, n_ch, n_height, n_width), dtype="float32")
y = model(x)
print("y", y.shape)
In [19]:
class MyConvNet(chainer.Chain):
def __init__(self):
super(MyConvNet, self).__init__()
# パラメータを持つ層の登録
with self.init_scope():
self.c1 = L.Convolution2D(None, 20, ksize=5, stride=2, pad=0)
self.c2 = L.Convolution2D(None, 40, ksize=5, stride=2, pad=0)
self.l1 = L.Linear(None, 100)
self.l2 = L.Linear(None, 100)
self.l3 = L.Linear(None, 40)
def forward(self, x):
# データを受け取った際のforward計算を書く
# sec: conv
print("x", x.shape, np.prod(x.shape[1:]))
h = F.relu(self.c1(x))
print("conv1", h.shape, np.prod(h.shape[1:]))
h = F.relu(self.c2(h))
print("conv2", h.shape, np.prod(h.shape[1:]))
# sec: fc
h = F.relu(self.l1(h))
print("fc1", h.shape)
h = F.relu(self.l2(h))
print("fc2", h.shape)
h = self.l3(h)
print("fc3", h.shape)
return h
In [20]:
n_batch = 128
n_ch = 1
n_height = 20
n_width = 120
# sec: NNモデルの出力値を計算
model = MyConvNet()
x = np.zeros((n_batch, n_ch, n_height, n_width), dtype="float32")
y = model(x)
print("y", y.shape)
In [23]:
import numpy as np
import chainer
import chainer.links as L
import chainer.functions as F
chainer.print_runtime_info()
In [24]:
class MyConvNet(chainer.Chain):
def __init__(self):
super(MyConvNet, self).__init__()
# パラメータを持つ層の登録
with self.init_scope():
self.c1 = L.Convolution2D(None, 20, ksize=3, stride=2, pad=0)
self.c2 = L.Convolution2D(None, 40, ksize=3, stride=2, pad=0)
self.l1 = L.Linear(None, 100)
self.l2 = L.Linear(None, 100)
self.l3 = L.Linear(None, 40)
def forward(self, x):
# データを受け取った際のforward計算を書く
# sec: conv
print("x", x.shape, np.prod(x.shape[1:]))
h = F.relu(self.c1(x))
print("conv1", h.shape, np.prod(h.shape[1:]))
h = F.relu(self.c2(h))
print("conv2", h.shape, np.prod(h.shape[1:]))
# sec: fc
h = F.relu(self.l1(h))
print("fc1", h.shape)
h = F.relu(self.l2(h))
print("fc2", h.shape)
h = self.l3(h)
print("fc3", h.shape)
return h
In [25]:
n_batch = 128
n_ch = 1
n_height = 20
n_width = 120
# sec: NNモデルの出力値を計算
model = MyConvNet()
x = np.zeros((n_batch, n_ch, n_height, n_width), dtype="float32")
y = model(x)
print("y", y.shape)
In [26]:
# sec: 勾配を計算
loss = F.softmax_cross_entropy(y, np.arange(0, n_batch, dtype="int32"))
print(loss)
model.cleargrads()
loss.backward()
In [27]:
# sec: 勾配を計算
loss = F.softmax_cross_entropy(y, np.arange(0, n_batch, dtype="int32") % 40)
print(loss)
model.cleargrads()
loss.backward()
コメント
コメントを投稿