数据挖掘十大算法(二):C4.5算法
发布时间
阅读量:
阅读量
理解决策树的基本构造

内部结点(internal node):表示一个特征或属性,长方形。
叶结点(leaf node): 表示一个类, 椭圆形。
最上面的节点就是决策树的根节点。
那么怎样选择分叉的特征呢?每一次分叉选择哪个特征对样本进行划分可以最快最准确的对样本分类呢?不同的决策树算法有着不同的特征选择方案。ID3用信息增益,C4.5用信息增益率,CART用gini系数。
让我们看一组实例,贷款申请样本数据表。

年龄:0代表青年,1代表中年,2代表老年;
有工作:0代表否,1代表是;
有自己的房子:0代表否,1代表是;
信贷情况:0代表一般,1代表好,2代表非常好;
类别(是否给贷款):no代表否,yes代表是
计算信息熵
def creat_data():
dataSet = [[0, 0, 0, 0, 'no'], # 数据集
[0, 0, 0, 1, 'no'],
[0, 1, 0, 1, 'yes'],
[0, 1, 1, 0, 'yes'],
[0, 0, 0, 0, 'no'],
[1, 0, 0, 0, 'no'],
[1, 0, 0, 1, 'no'],
[1, 1, 1, 1, 'yes'],
[1, 0, 1, 2, 'yes'],
[1, 0, 1, 2, 'yes'],
[2, 0, 1, 2, 'yes'],
[2, 0, 1, 1, 'yes'],
[2, 1, 0, 1, 'yes'],
[2, 1, 0, 2, 'yes']]
features = ['年龄', '有工作', '有自己的房子', '信贷情况']
return dataSet, features
def entropy(dataset):
num_data = len(dataset)
class_count = {}
for k in dataset:
label = k[-1]
class_count[label] = class_count.get(label, 0) + 1
ent = 0.0
for key in class_count:
prob = class_count[key] / float(num_data)
ent = ent - prob * log(prob, 2)
return ent
数据集划分以及信息增益计算方法
# 理解(这里返回的new_data的feature比mydata少一个!)
def spliet_dataset(mydata, axis, value):
new_data = []
for rows in mydata:
if rows[axis] == value:
reduce = rows[:axis]
reduce.extend(rows[axis+1:])
new_data.append(reduce)
return new_data
def choose_best_feature(dataset):
'''
理解熵:越大说明越不靠谱!划分之后,平均信息量肯定减少!那个减少的更多,说明这个特征更好。
:param dataset:
:return: 返回的是最好的特征的维度
'''
num_feature = len(dataset[0]) - 1
num_dataset = len(dataset)
old_ent = entropy(dataset)
best_gain = 0.0
best_feature = 999
for feature in range(num_feature):
# 获取所有第i个特征的取值
feature_list = [example[feature] for example in dataset]
feature_set = set(feature_list)
new_ent = 0.0
for value in feature_set:
ret = spliet_dataset(dataset, feature, value)
prob = len(ret) / float(num_dataset)
new_ent = new_ent + prob * entropy(ret) # 这个地方想一下为什么这么计算?
now_gain = old_ent - new_ent
print('特征{}的信息增益为:{}'.format(feature,now_gain))
if now_gain > best_gain:
best_gain = now_gain
best_feature = feature
return '最优特征索引值为:{}'.format(best_feature)
输出结果:
特征0的信息增益为:0.2467498197744391
特征1的信息增益为:0.30316563448891654
特征2的信息增益为:0.3948950998563653
特征3的信息增益为:0.31493685137324035
最优特征索引值为:2
注意上述选择最优特征的方法就是大名鼎鼎的ID3算法,我们接下来修正为C4.5算法
信息增益率=信息增益/属性分裂信息度量
# 重点注意选择方式的变化,同时对entropy函数进行了改进!
def choose_best_feature(dataset):
'''
理解熵:越大说明越不靠谱!划分之后,平均信息量肯定减少!那个减少的更多,说明这个特征更好。
:param dataset:
:return: 返回的是最好的特征的维度
'''
num_feature = len(dataset[0]) - 1
num_dataset = len(dataset)
old_ent = entropy(dataset, -1)
best_radio = 0.0
best_feature = 999
for feature in range(num_feature):
now_information = entropy(dataset, feature)
# 获取所有第i个特征的取值
feature_list = [example[feature] for example in dataset]
feature_set = set(feature_list)
new_ent = 0.0
for value in feature_set:
ret = spliet_dataset(dataset, feature, value)
prob = len(ret) / float(num_dataset)
new_ent = new_ent + prob * entropy(ret, -1) # 这个地方想一下为什么这么计算?
now_gain = old_ent - new_ent
now_ratio = now_gain / now_information
print('特征{}的信息增益率为:{}'.format(feature, now_ratio))
if now_gain > best_radio:
best_radio = now_ratio
best_feature = feature
return best_feature
完整C4.5算法进行分类
import operator
from math import log
def creat_data():
dataSet = [[0, 0, 0, 0, 'no'], # 数据集
[0, 0, 0, 1, 'no'],
[0, 1, 0, 1, 'yes'],
[0, 1, 1, 0, 'yes'],
[0, 0, 0, 0, 'no'],
[1, 0, 0, 0, 'no'],
[1, 0, 0, 1, 'no'],
[1, 1, 1, 1, 'yes'],
[1, 0, 1, 2, 'yes'],
[1, 0, 1, 2, 'yes'],
[2, 0, 1, 2, 'yes'],
[2, 0, 1, 1, 'yes'],
[2, 1, 0, 1, 'yes'],
[2, 1, 0, 2, 'yes']]
features = ['年龄', '有工作', '有自己的房子', '信贷情况']
return dataSet, features
def entropy(dataset, label_axis):
num_data = len(dataset)
class_count = {}
for k in dataset:
label = k[label_axis]
class_count[label] = class_count.get(label, 0) + 1
ent = 0.0
for key in class_count:
prob = class_count[key] / float(num_data)
ent = ent - prob * log(prob, 2)
return ent
def spliet_dataset(mydata, axis, value):
new_data = []
for rows in mydata:
if rows[axis] == value:
reduce = rows[:axis]
reduce.extend(rows[axis+1:])
new_data.append(reduce)
return new_data
def choose_best_feature(dataset):
'''
理解熵:越大说明越不靠谱!划分之后,平均信息量肯定减少!那个减少的更多,说明这个特征更好。
:param dataset:
:return: 返回的是最好的特征的维度
'''
num_feature = len(dataset[0]) - 1
num_dataset = len(dataset)
old_ent = entropy(dataset, -1)
best_radio = 0.0
best_feature = 999
for feature in range(num_feature):
now_information = entropy(dataset, feature)
# 获取所有第i个特征的取值
feature_list = [example[feature] for example in dataset]
feature_set = set(feature_list)
new_ent = 0.0
for value in feature_set:
ret = spliet_dataset(dataset, feature, value)
prob = len(ret) / float(num_dataset)
new_ent = new_ent + prob * entropy(ret, -1) # 这个地方想一下为什么这么计算?
now_gain = old_ent - new_ent
now_ratio = now_gain / now_information
# print('特征{}的信息增益率为:{}'.format(feature, now_ratio))
if now_gain > best_radio:
best_radio = now_ratio
best_feature = feature
return best_feature
def majority(class_lsit):
class_count = {}
for vote in class_lsit:
if vote not in class_count.keys():
class_count[vote] = 0
class_count[vote] = class_count[vote] + 1
sorted_count = sorted(class_count.items(), key=operator.itemgetter(1), reverse=True)
return sorted_count[0][0]
# 本质通过递归去实现
def create_tree(dataset, features):
class_list = [example[-1] for example in dataset] # 把所有label汇总到列表中
if class_list.count(class_list[0]) == len(class_list): # 如果类别完全相同则停止划分
return class_list[0]
elif len(dataset[0]) == 1: # 若此时已经遍历完所有feature
return majority(class_list) # 则返回类别最多的标签(等效于k算法)
else:
pass
best_feature_index = choose_best_feature(dataset)
best_feature = features[best_feature_index]
my_tree = {best_feature: {}}
del(features[best_feature_index])
# 拿到该特征所有可能的取值(集合)
feature_vale = [example[best_feature_index] for example in dataset]
feature_vale = set(feature_vale)
for value in feature_vale:
sub_labels = features[:]
my_tree[best_feature][value] = create_tree(spliet_dataset(dataset, best_feature_index, value), sub_labels)
return my_tree
def classify(input_tree, features, test_vec):
'''
:param input_tree:训练好的树
:param features:特征名称列表
:param test_vec:
:return:
'''
first_str = list(input_tree.keys())[0]
second_dic = input_tree[first_str]
feat_index = features.index(first_str)
for key in second_dic.keys():
if test_vec[feat_index] == key:
if type(second_dic[key]).__name__ == 'dict':
class_label = classify(second_dic[key], features, test_vec)
else:
class_label = second_dic[key]
return class_label
if __name__ == '__main__':
my_data, feature = creat_data()
my_tree = create_tree(my_data, feature)
feature = ['年龄', '有工作', '有自己的房子', '信贷情况']
print(my_tree)
result = classify(my_tree, feature, [1, 0, 1, 1])
if result == 'yes':
print('允许贷款!')
else:
print('不允许贷款!')
决策树的存储和读取
import pickle
# 存储方法
def storeTree(inputTree, filename):
with open(filename, 'wb') as fw:
pickle.dump(inputTree, fw)
if __name__ == '__main__':
myTree = {'有自己的房子': {0: {'有工作': {0: 'no', 1: 'yes'}}, 1: 'yes'}}
storeTree(myTree, 'classifierStorage.txt')
# 读取方法
def grabTree(filename):
fr = open(filename, 'rb')
return pickle.load(fr)
if __name__ == '__main__':
myTree = grabTree('classifierStorage.txt')
print(myTree)
Sklearn之使用决策树预测隐形眼镜类型
一共有24组数据,数据的Labels依次是age、prescript、astigmatic、tearRate、class,也就是第一列是年龄,第二列是症状,第三列是是否散光,第四列是眼泪数量,第五列是最终的分类标签。
sklearn.tree模块提供了决策树模型,用于解决分类问题和回归问题。方法如下图所示:

本次实战内容使用的是DecisionTreeClassifier和export_graphviz,前者用于决策树构建,后者用于决策树可视化




from sklearn import tree
if __name__ == '__main__':
fr = open('lenses.txt')
lenses = [inst.strip().split('\t') for inst in fr.readlines()]
print(lenses)
lenses_features = ['age', 'prescript', 'astigmatic', 'tearRate']
clf = tree.DecisionTreeClassifier()
lenses = clf.fit(lenses, lenses_features) # 创建决策树
当我们直接运行的时候会报错:
ValueError: could not convert string to float: 'young’
提供两种方法解决:

from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.externals.six import StringIO
from sklearn import tree
import pandas as pd
import pydotplus
if __name__ == '__main__':
data = []
label = []
fr = open('lenses.txt')
for line in fr:
data.append(line.strip().split('\t')[0:-1])
label.append(line.strip().split('\t')[-1])
lenses = pd.DataFrame(data, columns=['age', 'prescript', 'astigmatic', 'tearRate'])
le = LabelEncoder() # 创建LabelEncoder()对象,用于序列化
for col in lenses.columns: # 为每一列序列化
lenses[col] = le.fit_transform(lenses[col])
print(lenses)
# print(label)
clf = tree.DecisionTreeClassifier(criterion='entropy', max_depth=4) # 创建DecisionTreeClassifier()类
clf = clf.fit(lenses.values, label) # 使用数据,构建决策树
predict = clf.predict([[1, 1, 1, 0]])
print('预测结果为:{}'.format(predict))
dot_data = StringIO()
tree.export_graphviz(clf, out_file=dot_data, # 绘制决策树
feature_names=lenses.keys(),
class_names=clf.classes_,
filled=True, rounded=True,
special_characters=True)
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
graph.write_pdf("tree.pdf")
可视化结果:

全部评论 (0)
还没有任何评论哟~
