包括BN层、卷积层、池化层、交叉熵、随机梯度下降法、非极大抑制、k均值聚类等秋招常见的代码实现。文章来源地址https://www.toymoban.com/news/detail-582474.html
1. BN层
import numpy as np
def batch_norm(outputs, gamma, beta, epsilon=1e-6, momentum=0.9, running_mean=0, running_var=1):
'''
:param outputs: [B, L]
:param gamma: mean
:param beta:
:param epsilon:
:return:
'''
mean = np.mean(outputs, axis=(0, 2, 3), keepdims=True) # 1, C, H, W
var = np.var(outputs, axis=(0,2,3), keepdims=True) # 1, C, H, W
# mean = np.mean(outputs, axis=0)
# var = np.var(outputs, axis=0)
# 滑动平均用于记录mean和var,用于测试
running_mean = momentum * running_mean + (1-momentum) * mean
running_var = momentum * running_var + (1-momentum) * var
res = gamma * ( outputs - mean ) / np.sqrt(var + epsilon) + beta
return res, running_mean, running_var
if __name__ == '__main__':
outputs = np.random.random((16, 64, 8, 8))
tmp, _, _ = batch_norm(outputs, 1, 1, 1e-6)
# print(tmp.shape)
mean = np.mean(tmp[:, 1, :, :])
std = np.sqrt(np.var(tmp[:, 1, :, :]))
print(mean, std)
2. 卷积层
import numpy as np
def conv_forward_naive(x, w, b, conv_param):
'''
:param x: [N, C_in, H, W]
:param w: [C_out, C_in, k1, k2]
:param b: [C_out]
:param conv_param:
- 'stride':
- 'pad': the number of pixels that will be used to zero-pad the input
:return:
- 'out': (N, C_out, H', W')
- 'cache': (x, w, b, conv_param)
'''
out = None
N, C_in, H, W = x.shape
C_out, _, k1, k2 = w.shape
stride, padding = conv_param['stride'], conv_param['pad']
H_out = (H-k1+2*padding) // stride + 1
W_out = (W-k2+2*padding) // stride + 1
out = np.zeros((N, C_out, H_out, W_out))
x_pad = np.zeros((N, C_in, H+2*padding, W+2*padding))
x_pad[:, :, padding:padding+H, padding:padding+W] = x
for i in range(H_out):
for j in range(W_out):
x_pad_mask = x_pad[:, :, i*stride:i*stride+k1, j*stride:j*stride+k2]
for c in range(C_out):
out[:, c, i, j] = np.sum(x_pad_mask*w[c, :, :, :], axis=(1,2,3))
out += b[None, :, None, None]
cache = (x, w, b, conv_param)
return out, cache
3. maxpooling
import numpy as np
def maxpooling_forward(feature, kernel, stride):
'''
:param feature: [N, C, H, W]
:param kernel: [k1, k2]
:param stride: [s1, s2]
:return:
'''
N, C, H, W = feature.shape
k1, k2 = kernel
s1, s2 = stride
H_out = (H - k1) // s1 + 1
W_out = (W - k2) // s2 + 1
out = np.zeros((N, C, H_out, W_out))
for i in range(H_out):
for j in range(W_out):
feature_mask = feature[:, :, i*s1:i*s1+k1, j*s2:j*s2+k2]
out[:, :, i, j] = np.max(feature_mask, axis=(2,3)) # 注意这里的2,3!!!
return out
4. cross Entropy
import numpy as np
def cross_entropy(label, outputs, reduce=True):
'''
:param label: B x 1
:param outputs: B x c
:return: loss
'''
loss_list = []
for i in range(len(label)):
y = label[i]
output = outputs[i]
sum_exp = np.sum([np.exp(k) for k in output])
prop = np.exp(output[y]) / sum_exp
loss_list.append(-np.log(prop))
if reduce:
return np.mean(loss_list)
else:
return np.sum(loss_list)
def softmax(t):
return np.exp(t) / np.sum(np.exp(t), axis=1, keepdims=True)
def softmax2(t):
return np.exp(t) / np.sum(np.exp(t), axis=1, keepdims=True)
def cross_entropy_2(y, y_, onehot=True, reduce=True):
y = softmax(y)
if not onehot:
cates = y.shape
y_ = np.eye(cates[-1])[y_]
if reduce:
return np.mean(-np.sum(y_ * np.log(y), axis=1))
else:
return np.sum(-np.sum(y_ * np.log(y), axis=1))
if __name__ == '__main__':
outputs = [[0.5, 0.5], [0, 1], [1, 0]]
label = [0, 0, 1]
print(cross_entropy(label, outputs, True))
print(cross_entropy_2(outputs, label, False))
5. sgd
import numpy as np
import random
class MYSGD:
def __init__(self, training_data, epochs, batch_size, lr, model):
self.training_data = training_data
self.epochs = epochs
self.batch_size = batch_size
self.lr = lr
self.weight = [...]
self.bias = [...]
def run(self):
n = len(self.training_data)
for j in range(self.epochs):
random.shuffle(self.training_data)
mini_batches = [self.training_data[k*self.batch_size: (k+1)*self.batch_size]
for k in range(n//self.batch_size)]
for mini_batch in mini_batches:
self.updata(mini_batch)
def update(self, mini_batch):
nabla_b = [np.zeros(b.shape) for b in self.bias]
nabla_w = [np.zeros(w.shape) for w in self.weight]
for x, y in mini_batch:
delta_nabla_b, delta_nabla_w = self.backprop(x, y)
nabla_b = [nb+dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]
nabla_w = [nw+dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]
self.weight = [w-(self.eta/len(mini_batch))*nw for w, nw in zip(self.weight, nabla_w)]
self.bias = [b-(self.eta/len(mini_batch))*nb for b, nb in zip(self.bias, nabla_b)]
def backprop(self, x, y):
6. nms
import numpy as np
def iou_calculate(bbox1, bbox2, mode='x1y1x2y2'):
# 我的
x11, y11, x12, y12 = bbox1
x21, y21, x22, y22 = bbox2
area1 = (y12-y11+1)*(x12-x11+1)
area2 = (y22-y21+1)*(x22-x21+1)
overlap = max(min(y12, y22) - max(y11, y21) + 1, 0) * max(min(x12, x22) - max(x11, x21) + 1, 0)
return overlap / (area2 + area1 - overlap + 1e-6)
def bb_intersection_over_union(boxA, boxB):
# 别人的
boxA = [int(x) for x in boxA]
boxB = [int(x) for x in boxB]
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
interArea = max(0, xB - xA + 1) * max(0, yB - yA + 1)
boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)
iou = interArea / float(boxAArea + boxBArea - interArea)
return iou
def nms(outputs, scores, T):
'''
:param outputs: bboxes, x1y1x2y2
:param scores: confidence of each bbox
:param T: threshold
:return:
'''
# 我的
outputs = np.array(outputs)[np.argsort(-np.array(scores))]
saved = [True for _ in range(outputs.shape[0])]
for i in range(outputs.shape[0]):
if saved[i]:
for j in range(i+1, outputs.shape[0]):
if saved[j]:
iou = iou_calculate(outputs[i], outputs[j])
if iou >= T:
saved[j] = False
scores = np.sort(-np.array(scores))
return outputs[saved], -scores[saved]
# 别人的
def nms_others(bboxes, scores, iou_thresh):
"""
:param bboxes: 检测框列表
:param scores: 置信度列表
:param iou_thresh: IOU阈值
:return:
"""
x1 = bboxes[:, 0]
y1 = bboxes[:, 1]
x2 = bboxes[:, 2]
y2 = bboxes[:, 3]
areas = (y2 - y1) * (x2 - x1)
# 结果列表
result = []
index = scores.argsort()[::-1] # 对检测框按照置信度进行从高到低的排序,并获取索引
# 下面的操作为了安全,都是对索引处理
while index.size > 0:
# 当检测框不为空一直循环
i = index[0]
result.append(i) # 将置信度最高的加入结果列表
# 计算其他边界框与该边界框的IOU
x11 = np.maximum(x1[i], x1[index[1:]])
y11 = np.maximum(y1[i], y1[index[1:]])
x22 = np.minimum(x2[i], x2[index[1:]])
y22 = np.minimum(y2[i], y2[index[1:]])
w = np.maximum(0, x22 - x11 + 1) # 两个边重叠时,也有1列/行像素点是重叠的
h = np.maximum(0, y22 - y11 + 1)
overlaps = w * h
ious = overlaps / (areas[i] + areas[index[1:]] - overlaps)
# 只保留满足IOU阈值的索引
idx = np.where(ious <= iou_thresh)[0]
index = index[idx + 1] # 处理剩余的边框
bboxes, scores = bboxes[result], scores[result]
return bboxes, scores
def mynms(bboxes, scores, iou_T):
x1 = bboxes[:, 0]
y1 = bboxes[:, 1]
x2 = bboxes[:, 2]
y2 = bboxes[:, 3]
areas = (y2-y1+1) * (x2-x1+1)
ids = np.argsort(scores)[::-1]
res = []
while len(ids) > 0:
i = ids[0]
res.append(i)
x11 = np.maximum(x1[i], x1[ids[1:]])
x22 = np.minimum(x2[i], x2[ids[1:]])
y11 = np.maximum(y1[i], y1[ids[1:]])
y22 = np.minimum(y2[i], y1[ids[1:]])
# np.maximum(X,Y,None) : X与Y逐位取最大者. 最少两个参数
overlap = np.maximum(x22-x11+1, 0) * np.maximum(y22-y11+1, 0)
iou = overlap / (areas[i] +areas[ids[1:]] - overlap)
ids = ids[1:][iou<T]
return bboxes[res], scores[res]
if __name__ == '__main__':
outputs = [[10, 10, 20, 20], [15, 15, 25, 25], [9, 15, 25, 13]]
scores = [0.6, 0.8, 0.7]
T = 0.1
print(nms(outputs, scores, T))
print(nms_others(np.array(outputs), np.array(scores), T))
print(mynms(np.array(outputs), np.array(scores), T))
7. k-means
import numpy as np
import copy
def check(clusters_last, clusters_center):
# clusters_last.sort()
# clusters_center.sort()
if len(clusters_last) == 0:
return False
for c1, c2 in zip(clusters_last, clusters_center):
if np.linalg.norm(c1 - c2) > 0:
return False
return True
def kMeans(data, k):
'''
:param data: [n, c]
:param k: the number of clusters
:return:
'''
clusters_last = []
clusters_center = [data[i] for i in range(k)] # random choosed
while not check(clusters_last, clusters_center):
clusters_last = copy.deepcopy(clusters_center)
clusters = [[] for _ in range(k)]
for i in range(data.shape[0]):
min_dis = float('inf')
for j, center in enumerate(clusters_center):
distance = np.linalg.norm(center-data[i])
if distance < min_dis:
min_dis = distance
idx = j
clusters[idx].append(data[i])
clusters_center = []
for i in range(k):
clusters_center.append(np.mean(clusters[i], axis=0))
return clusters_center
def kMeans2(data, k):
'''
:param data: [n, c]
:param k: the number of clusters
:return:
'''
clusters_last = []
clusters_center = copy.deepcopy(data[:k]) # random choosed
while not check(clusters_last, clusters_center):
clusters_last = copy.deepcopy(clusters_center)
clusters = [[] for _ in range(k)]
for i in range(data.shape[0]):
distance = np.linalg.norm(clusters_center - data[i], axis=1)
idx = np.argmin(distance)
clusters[idx].append(data[i])
clusters_center = []
for i in range(k):
clusters_center.append(np.mean(clusters[i], axis=0))
clusters_center = np.array(clusters_center)
return clusters_center
if __name__ == '__main__':
data = np.random.random((20, 2))
print(kMeans(data, 5))
print(kMeans2(data, 5))
文章来源:https://www.toymoban.com/news/detail-582474.html
到了这里,关于机器学习/深度学习常见算法实现(秋招版)的文章就介绍完了。如果您还想了解更多内容,请在右上角搜索TOY模板网以前的文章或继续浏览下面的相关文章,希望大家以后多多支持TOY模板网!