-
-
Notifications
You must be signed in to change notification settings - Fork 92
/
4K_Nearest_Neighbours.py
88 lines (67 loc) · 3.07 KB
/
4K_Nearest_Neighbours.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import math
import sys
sys.path.append("D:\Github\Machine-Learning-Basic-Codes")
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from utils.visualize import *
class Skylark_KNeighborsClassifier():
def __init__(self, k_neighbors):
super().__init__()
self.k_neighbors = k_neighbors
def predict(self, X_test, X_train, y_train):
y_predict = np.zeros(X_test.shape[0])
for i in range(X_test.shape[0]):
distances = np.zeros((X_train.shape[0], 2)) # 测试的数据和训练的各个数据的欧式距离
for j in range(X_train.shape[0]):
dis = self.euclidean_distance(X_test[i], X_train[j]) # 计算欧式距离
label = y_train[j] # 测试集到的每个训练集的数据的分类标签
distances[j] = [dis, label]
# argsort()得到测试集到训练的各个数据的欧式距离从小到大排列并且得到序列,然后再取前k个.
k_nearest_neighbors = distances[distances[:, 0].argsort(
)][:self.k_neighbors]
# 利用np.bincount统计k个近邻里面各类别出现的次数
counts = np.bincount(k_nearest_neighbors[:, 1].astype('int'))
# 得出每个测试数据k个近邻里面各类别出现的次数最多的类别
testLabel = counts.argmax()
y_predict[i] = testLabel
return np.array(y_predict)
def euclidean_distance(self, x1, x2):
""" Calculates the l2 distance between two vectors """
distance = 0
# Squared distance between each coordinate
for i in range(len(x1)):
distance += pow((x1[i] - x2[i]), 2)
return math.sqrt(distance)
if __name__ == '__main__':
use_sklearn = False
# Data Preprocessing
dataset = pd.read_csv('./dataset/Social_Network_Ads.csv')
X = dataset.iloc[:, [2, 3]].values
Y = dataset.iloc[:, 4].values
# Making Dataset
X_train, X_test, Y_train, Y_test = train_test_split(
X, Y, test_size=0.25, random_state=0)
# Feature Scaling
sc = StandardScaler()
X_train = sc.fit_transform(X_train.astype(np.float64))
X_test = sc.transform(X_test.astype(np.float64))
if use_sklearn:
from sklearn.neighbors import KNeighborsClassifier
classifier = KNeighborsClassifier(
n_neighbors=5, metric='minkowski', p=2)
classifier.fit(X_train, Y_train)
Y_pred = classifier.predict(X_test)
else:
classifier = Skylark_KNeighborsClassifier(
k_neighbors=5
)
Y_pred = classifier.predict(X_test, X_train, Y_train)
# Y_pred = classifier.predict(X_test)
# Making the Confusion Matrix
print_confusion_matrix(Y_test, Y_test, clf_name='KNN')
# Visualization TODO
# visualization_clf(X_train, Y_train, classifier, clf_name='KNN', set_name='Training')
# visualization_clf(X_test, Y_test, classifier, clf_name='KNN', set_name='Test')