概率算法_二项分布和泊松分布,统总结法_概率基础

接二连三统总计法,这一次也没怎么越发的,还没到那么透彻,也是相比较基础的
1、方差-样本
2、协方差(标准差)-样本
叁、变异全面
四、相关周全

本次函数有

此番有以下函数

原著链接:

BP神经网络是深度学习的根本基础,它是深浅学习的关键发展算法之壹,因而通晓BP神经网络原理以及贯彻技术格外有至关重要。接下来,大家对公理和贯彻展开商讨。

1.原理

空闲再慢慢补上,请先参考老外壹篇不错的篇章:A Step by Step
Backpropagation Example

激活函数参考:纵深学习常用激活函数之— Sigmoid & ReLU &
Softmax

浅显易懂的开首化:CS23一n课程笔记翻译:神经网络笔记
2

有效的Trick:神经网络锻练中的Tricks之神速BP(反向传播算法)

经过简单演示BPNN的估摸进度:一文弄懂神经网络中的反向传播法——BackPropagation

二.贯彻—-Batch随机梯度法

此地完毕了层数可定义的BP神经网络,可经过参数net_struct实行定义网络结果,如定义唯有输出层,未有隐藏层的网络布局,激活函数为”sigmoid”,学习率,可正如概念

net_struct = [[10,”sigmoid”,0.01]]#互联网布局

如定义壹层隐藏层为九十六个神经元,再接一层隐藏层为四十八个神经元,输出层为12个神经元的互连网布局,如下

net_struct =
[[100,”sigmoid”,0.01],[50,”sigmoid”,0.01],[10,”sigmoid”,0.01]]#网络布局

码农最爱的完结如下:

# # encoding=utf8

”’

Created on 2017-7-3

@author: Administrator

”’

import random

import pandas as pd

import numpy as np

from matplotlib import pyplot as plt

from sklearn.model_selection import train_test_split as ttsplit

class LossFun:

def __init__(self, lf_type=”least_square”):

self.name = “loss function”

self.type = lf_type

def cal(self, t, z):

loss = 0

if self.type == “least_square”:

loss = self.least_square(t, z)

return loss

def cal_deriv(self, t, z):

delta = 0

if self.type == “least_square”:

delta = self.least_square_deriv(t, z)

return delta

def least_square(self, t, z):

zsize = z.shape

sample_num = zsize[1]

return np.sum(0.5 * (t – z) * (t – z) * t) / sample_num

def least_square_deriv(self, t, z):

return z – t

class ActivationFun:

”’

激活函数

”’

def __init__(self, atype=”sigmoid”):

self.name = “activation function library”

self.type = atype;

def cal(self, a):

z = 0

if self.type == “sigmoid”:

z = self.sigmoid(a)

elif self.type == “relu”:

z = self.relu(a)

return z

def cal_deriv(self, a):

z = 0

if self.type == “sigmoid”:

z = self.sigmoid_deriv(a)

elif self.type == “relu”:

z = self.relu_deriv(a)

return z

def sigmoid(self, a):

return 1 / (1 + np.exp(-a))

def sigmoid_deriv(self, a):

fa = self.sigmoid(a)

return fa * (1 – fa)

def relu(self, a):

idx = a <= 0

a[idx] = 0.1 * a[idx]

美高梅开户网址,return a  # np.maximum(a, 0.0)

def relu_deriv(self, a):

# print a

a[a > 0] = 1.0

a[a <= 0] = 0.1

# print a

return a

class Layer:

”’

神经网络层

”’

def __init__(self, num_neural, af_type=”sigmoid”,
learn_rate=0.5):

self.af_type = af_type  # active function type

self.learn_rate = learn_rate

self.num_neural = num_neural

self.dim = None

self.W = None

self.a = None

self.X = None

self.z = None

self.delta = None

self.theta = None

self.act_fun = ActivationFun(self.af_type)

def fp(self, X):

”’

Foward Propagation

”’

self.X = X

xsize = X.shape

self.dim = xsize[0]

self.num = xsize[1]

if self.W == None:

# self.W = np.random.random((self.dim, self.num_neural))-0.5

# self.W = np.random.uniform(-1,1,size=(self.dim,self.num_neural))

if(self.af_type == “sigmoid”):

self.W = np.random.normal(0, 1, size=(self.dim, self.num_neural)) /
np.sqrt(self.num)

elif(self.af_type == “relu”):

self.W = np.random.normal(0, 1, size=(self.dim, self.num_neural)) *
np.sqrt(2.0 / self.num)

if self.theta == None:

# self.theta = np.random.random((self.num_neural, 1))-0.5

# self.theta = np.random.uniform(-1,1,size=(self.num_neural,1))

if(self.af_type == “sigmoid”):

self.theta = np.random.normal(0, 1, size=(self.num_neural, 1)) /
np.sqrt(self.num)

elif(self.af_type == “relu”):

self.theta = np.random.normal(0, 1, size=(self.num_neural, 1)) *
np.sqrt(2.0 / self.num)

# calculate the foreward a

self.a = (self.W.T).dot(self.X)

###calculate the foreward z####

self.z = self.act_fun.cal(self.a)

return self.z

def bp(self, delta):

”’

Back Propagation

”’

self.delta = delta * self.act_fun.cal_deriv(self.a)

self.theta = np.array([np.mean(self.theta – self.learn_rate *
self.delta, 1)]).T  # 求全部样本的theta均值

dW = self.X.dot(self.delta.T) / self.num

self.W = self.W – self.learn_概率算法_二项分布和泊松分布,统总结法_概率基础。rate * dW

delta_out = self.W.dot(self.delta);

return delta_out

class BpNet:

”’

BP神经网络

”’

def __init__(self, net_struct, stop_crit, max_iter,
batch_size=10):

self.name = “net work”

self.net_struct = net_struct

if len(self.net_struct) == 0:

print “no layer is specified!”

return

self.stop_crit = stop_crit

self.max_iter = max_iter

self.batch_size = batch_size

self.layers = []

self.num_layers = 0;

# 创设网络

self.create_net(net_struct)

self.loss_fun = LossFun(“least_square”);

def create_net(self, net_struct):

”’

始建网络

”’

self.num_layers = len(net_struct)

for i in range(self.num_layers):

self.layers.append(Layer(net_struct[i][0], net_struct[i][1],
net_struct[i][2]))

def train(self, X, t, Xtest=None, ttest=None):

”’

教练互联网

”’

eva_acc_list = []

eva_loss_list = []

xshape = X.shape;

num = xshape[0]

dim = xshape[1]

for k in range(self.max_iter):

# i = random.randint(0,num-1)

idxs = random.sample(range(num), self.batch_size)

xi = np.array([X[idxs, :]]).T[:, :, 0]

ti = np.array([t[idxs, :]]).T[:, :, 0]

# 前向总括

zi = self.fp(xi)

# 偏差计算

delta_i = self.loss_fun.cal_deriv(ti, zi)

# 反馈计算

self.bp(delta_i)

# 评估精度

if Xtest != None:

if k % 100 == 0:

[eva_acc, eva_loss] = self.test(Xtest, ttest)

eva_acc_list.append(eva_acc)

eva_loss_list.append(eva_loss)

print “%4d,%4f,%4f” % (k, eva_acc, eva_loss)

else:

print “%4d” % (k)

return [eva_acc_list, eva_loss_list]

def test(self, X, t):

”’

测试模型精度

”’

xshape = X.shape;

num = xshape[0]

z = self.fp_eval(X.T)

t = t.T

est_pos = np.argmax(z, 0)

real_pos = np.argmax(t, 0)

corrct_count = np.sum(est_pos == real_pos)

acc = 1.0 * corrct_count / num

loss = self.loss_fun.cal(t, z)

# print “%4f,loss:%4f”%(loss)

return [acc, loss]

def fp(self, X):

”’

前向计算

”’

z = X

for i in range(self.num_layers):

z = self.layers[i].fp(z)

return z

def bp(self, delta):

”’

反映总括

”’

z = delta

for i in range(self.num_layers – 1, -1, -1):

z = self.layers[i].bp(z)

return z

def fp_eval(self, X):

”’

前向计算

”’

layers = self.layers

z = X

for i in range(self.num_layers):

z = layers[i].fp(z)

return z

def z_score_normalization(x):

mu = np.mean(x)

sigma = np.std(x)

x = (x – mu) / sigma;

return x;

def sigmoid(X, useStatus):

if useStatus:

return 1.0 / (1 + np.exp(-float(X)));

else:

return float(X);

def plot_curve(data, title, lege, xlabel, ylabel):

num = len(data)

idx = range(num)

plt.plot(idx, data, color=”r”, linewidth=1)

plt.xlabel(xlabel, fontsize=”xx-large”)

plt.ylabel(ylabel, fontsize=”xx-large”)

plt.title(title, fontsize=”xx-large”)

plt.legend([lege], fontsize=”xx-large”, loc=’upper left’);

plt.show()

if __name__ == “__main__”:

print (‘This is main of module “bp_nn.py”‘)

print(“Import data”)

raw_data = pd.read_csv(‘./train.csv’, header=0)

data = raw_data.values

imgs = data[0::, 1::]

labels = data[::, 0]

train_features, test_features, train_labels, test_labels = ttsplit(

imgs, labels, test_size=0.33, random_state=23323)

train_features = z_score_normalization(train_features)

test_features = z_score_normalization(test_features)

sample_num = train_labels.shape[0]

tr_labels = np.zeros([sample_num, 10])

for i in range(sample_num):

tr_labels[i][train_labels[i]] = 1

sample_num = test_labels.shape[0]

te_labels = np.zeros([sample_num, 10])

for i in range(sample_num):

te_labels[i][test_labels[i]] = 1

print train_features.shape

print tr_labels.shape

print test_features.shape

print te_labels.shape

stop_crit = 100  # 停止

max_iter = 10000  # 最大迭代次数

batch_size = 100  # 每一遍磨炼的样书个数

net_struct = [[100, “relu”, 0.01], [10, “sigmoid”, 0.1]]  #
网络布局[[batch_size,active function, learning rate]]

# net_struct =
[[200,”sigmoid”,0.5],[100,”sigmoid”,0.5],[10,”sigmoid”,0.5]] 
网络布局[[batch_size,active function, learning rate]]

bpNNCls = BpNet(net_struct, stop_crit, max_iter, batch_size);

# train model

[acc, loss] = bpNNCls.train(train_features, tr_labels,
test_features, te_labels)

# [acc, loss] = bpNNCls.train(train_features, tr_labels)

print(“training model finished”)

# create test data

plot_curve(acc, “Bp Network Accuracy”, “accuracy”, “iter”, “Accuracy”)

plot_curve(loss, “Bp Network Loss”, “loss”, “iter”, “Loss”)

# test model

[acc, loss] = bpNNCls.test(test_features, te_labels);

print “test accuracy:%f” % (acc)

试行数据为mnist数据集合,可从以下地方下载:https://github.com/WenDesi/lihang\_book\_algorithm/blob/master/data/train.csv

a.使用sigmoid激活函数和net_struct =
[10,”sigmoid”]的网络布局(可用作是softmax
回归),其校验精度和损失函数的变迁,如下图所示:

美高梅开户网址 1

美高梅开户网址 2

测试精度达到0.916017,效果还是不错的。可是自由梯度法,依赖于参数的起初化,假设开头化不好,会熄灭缓慢,甚至有不完美的结果。

b.使用sigmoid激活函数和net_struct =

[200,”sigmoid”,100,”sigmoid”,10,”sigmoid”] 的互联网布局(贰个200的隐藏层,1个100的隐藏层,和多个10的输出层),其校验精度和损失函数的扭转,如下图所示:

美高梅开户网址 3

美高梅开户网址 4

其校验精度达到0.96363陆,比softmax要好不少。从损失曲线能够见见,参与隐藏层后,算法收敛要比无隐藏层的载歌载舞。

Make Change – Focus on Computer Vision and Pattern Recognition

版权表明:本文为博主原创小说,未经博主允许不得转发

分类:Deep
Learning,MachineLearning

标签:Deep
Learning,机器学习,方式识别

依然是先造个list,此次把这几个功能写个函数,方便今后调用,别的上一篇写过的函数此番也会再三再四
def create_rand_list(min_num,max_num,count_list):
  case_list = []
  while len(case_list) < count_list:
    rand_float = random.uniform(min_num,max_num)
    if rand_float in case_list:
      continue
    case_list.append(rand_float)
  case_list = [round(case,2) for case in case_list]
  return case_list

1、阶乘

一、简单边际可能率

下边是历史函数
sum_fun() #累加
len_fun() #总括个数
multiply_fun()
#累乘
sum_mean_fun()
#算数平平均数量
sum_mean_rate()
#算数平平均数量总结回报
median_fun()
#中位数
modes_fun() #众数
ext_minus_fun()
#极差
geom_mean_fun()
#几何平平均数量
geom_mean_rate()
#几何平均回报

二、计算组合数C

二、联合概率

新函数代码

3、二项可能率分布

三、条件可能率

import random

# 先生成一个随机list,已有函数,不赘述
rand_list = [15.79, 6.83, 12.83, 22.32, 17.92, 6.29, 10.19, 10.13, 24.23, 25.56]

# 1、方差-样本S^2,list中的每个元素减整个list的平均数的平方累加,结果比个数-1,方差总量不-1
def var_fun(rand_list):
  mean_num = sum_mean_fun(rand_list) #计算平均数
  len_num = len_fun(rand_list) #计算总量
  var_list = [(x-mean_num)**2 for x in rand_list]
  var_sum = sum_fun(var_list)
  var_num = var_sum/(len_num - 1)
  return var_num

# 2、协方差(标准差)-样本S,这个简单,用方差开平方就可以了
def covar_fun(rand_list):
  var_num = var_fun(rand_list)
  covar_num = var_num ** 0.5
  return covar_num

# 3、变异系数CV,变异程度度量,协方差/算数平均数*100%
# 说明(百度百科):在进行数据统计分析时,如果变异系数大于15%,则要考虑该数据可能不正常,应该剔除
def  trans_coef_fun(rand_list):
  covar_num = covar_fun(rand_list)
  mean_num = sum_mean_fun(rand_list)
  trans_coef_num = covar_num / mean_num
  return trans_coef_num

# 4、相关系数-样本r,表示两个维之间的线性关系,-1 < r < 1,越接近1关系维间的关系越强
#    因为是两个维,因此需要输入两维的list,算法比较麻烦
'''
((x1-mean(x))(y1-mean(y))+(x2-mean(x))(y2-mean(y))+...(xn-mean(x))(yn-mean(y)))
/((x1-mean(x))^2+(x2-mean(x))^2+...(xn-mean(x))^2)^0.5*((y1-mean(y))^2+(y2-mean(y))^2+...(yn-mean(y))^2)^0.5
'''
x_list = rand_list
y_list = [4.39, 13.84, 9.21, 9.91, 15.69, 14.92, 25.77, 23.99, 8.15, 25.07]
def pearson_fun(x_list,y_list):
  x_mean = sum_mean_fun(x_list)
  y_mean = sum_mean_fun(y_list)
  len_num = len_fun(x_list)
  if len_num == len_fun(y_list):
    xy_multiply_list = [(x_list[i]-x_mean)*(y_list[i]-y_mean) for i in range(len_num)]
    xy_multiply_num = sum_fun(xy_multiply_list)
  else:
    print 'input list wrong,another input try'
    return None
  x_covar_son_list = [(x-x_mean)**2 for x in x_list]
  y_covar_son_list = [(y-y_mean)**2 for y in y_list]
  x_covar_son_num = sum_fun(x_covar_son_list)
  y_covar_son_num = sum_fun(y_covar_son_list)
  xy_covar_son_multiply_num = (x_covar_son_num ** 0.5) * (y_covar_son_num ** 0.5)
  pearson_num = xy_multiply_num / xy_covar_son_multiply_num
  return pearson_num

四、泊松分布

四、随机变量期望值

 

以下是野史函数

五、随机变量方差

create_rand_list() #创建叁个含有钦点数量成分的list
sum_fun() #累加
len_fun() #总计个数
multiply_fun() #累乘
sum_mean_fun() #算数平平均数量
sum_mean_rate() #算数平平均数量总计回报
median_fun() #中位数
modes_fun() #众数
ext_minus_fun() #极差
geom_mean_fun() #几何平平均数量
geom_mean_rate() #几何平均回报

陆、随机变量协方差

var_fun() #方差-样本S^2
covar_fun() #协方差-样本S
trans_coef_fun() #变异全面CV
pearson_fun() #相关周全-样本r

7、联合协方差

unite_rate_fun #1道可能率
condition_rate_fun #标准可能率
e_x #随机变量期望值
var_rand_fun #随机变量方差
covar_rand_fun #随机变量协方差
covar_rand_xy_fun #一块协方差
e_p #组合期望回报
var_p_fun #投资组合风险
bayes #贝叶斯

八、组合期望回报

—————以上是旧的————————————————————————
—————以下是新的————————————————————————

玖、投资组合危害

继承可能率,此番是2项分布和泊松分布,那些四个依旧挺好玩的,能够看做预测函数用,因为函数比较少,本次就不给例子了,不过会对函数做逐壹表明

 

1、阶乘n!
不畏每回-一乘,直到*1,例如5! = 5 * 4 * 3 * 2 * 一 =
120,那一个是常规的,然而在写函数的时候那样算法功能会低些,因而平素扭转,一*2*3…那种,那么函数正是

 

def fact_fun:  if n == 0:    return 1  n += 1  fact_list = [i for i in range(1,n)]  fact_num = multiply_fun(fact_list)  return fact_num

 

2、总计组合数C
C = n! / (x! *
表示从n个样本中抽取x个样本单元,也许出现结果的组合数,例如从多少个物品中抽取三个物品,那四个物品的组合数就是10种

 

def c_n_x(case_count,real_count):  fact_n = fact_fun(case_count)  fact_x = fact_fun(real_count)  fact_n_x = fact_fun(case_count - real_count)  c_n_x_num = fact_n / (fact_x * fact_n_x)  return c_n_x_num

说可能率前复习下历史函数
create_rand_list()
#制造一个带有钦点数量成分的list
sum_fun() #累加
len_fun() #总结个数
multiply_fun()
#累乘
sum_mean_fun()
#算数平平均数量
sum_mean_rate()
#算数平平均数量总计回报
median_fun()
#中位数
modes_fun() #众数
ext_minus_fun()
#极差
geom_mean_fun()
#几何平平均数量
geom_mean_rate()
#几何平均回报

3、2项可能率分布
执行n次伯努利试验,伯努利试验正是履行一回唯有二种大概且两种或者互斥的风浪,比如丢硬币实验,执行n次,成功k次的票房价值
P = C * p^k * ^
n=5 k=3 P = p + p + p
p表示三个轩然大波的成功可能率,战败则是一 – p

var_fun()
#方差-样本S^2
covar_fun()
#协方差(标准差)-样本S
trans_coef_fun()
#变异周详CV
pearson_fun()
#相关周密-样本r
—————以上是旧的————————————————————————
—————以下是新的————————————————————————
概率那块全部给笔者看了个懵逼,前边的代码都以比照自身要好知道写的,假诺有错误,欢迎指正
其余表达的是可能率是很精致的事体,所以浮点型的数字会比较多,而且小数位数10分纯粹,除优秀情形,小编就四舍5入截取到小数点后多少人
简短事件,就是只有二个特点的轩然大波,全体相当的大可能率事件的集合就是样本空间,举个例证
有两口袋花生米,第二个袋子有37个花生米,当中有二个坏的,第二个袋子有壹两个花生米,个中有5个坏的,这几个事例的样本空间就是下面那样。小编想说,假使自家选了B袋子小编决然诅咒卖花生的小业主吃方便面未有佐料
袋子|是或不是坏的|花生米个数
A   |0       |3
A   |1       |29
B   |0       |5
B   |1       |12
为了便利起见,是True用0表示,否false用1代表
1、不难边际可能率,记做P(A)
其壹不难精晓,比如总计坏花生米的出现率,这几个不难,就不单独写代码了
P(A) = 坏花生米/总数 = 8/4玖 = 0.163叁

def binomial_fun(case_count,real_count,p):  c_n_k_num = c_n_x(case_count,real_count)  pi = (p ** real_count) *  ** (case_count - real_count))  binomial_num = c_n_k_num * pi  return binomial_num

二、联合概率

四、泊松分布
加以的三个机会域中,机会域能够是多少个限制,也能够是1段时间,在那一个机会域中恐怕爆发某些总结事件的票房价值,举个例子,比有个公司,每时辰平均有1几人消费者光顾,那么贰个刻钟有15个人顾客光临的可能率,正是泊松分布,十几人消费者光顾正是总计事件
P = /X! = (2.7182818^-10*10^13)/13! = 0.0729
这里的λ是指平均值,能够运用算数平平均数量得到,e是当然常数~=2.7182818,有函数

既是是同步了,就必要多个事件,记为P(A且B),∩这个家伙正是且
正是A事件和B事件联合成同一个事变的概率,从A袋子吃出1个坏花生米的票房价值正是一道可能率,事件A是坏花生米,事件B是A袋子
其一相比有冲突,比较普遍使用的是
P(A∩B) = 3/49 = 0.0612
另一种正是
P(A∩B) = 3/32*0.5 = 0.0517
自个儿个人比较同意第3种,然则遭到其余事件的影响比较大,记挂若是B袋子有一千0个花生,坏花生数不变,结果会有很大差异
那么函数就有了

def poisson_fun(chance_x, case_list = [0],mean_num = 0):  chance_x_fact = fact_fun  e = 2.7182818  if len_fun(case_list) == 1 and case_list[0] == 0:    poisson_num = ((e ** (0-mean_num)) * mean_num ** chance_x) / chance_x_fact  else:    mean_num = sum_mean_fun(case_list)    poisson_num = ((e ** (0-mean_num)) * mean_num ** chance_x) / chance_x_fact  return poisson_num
def unite_rate_fun(condition_count,all_count):
  p_a_with_b = float(condition_count) / all_count
  return p_a_with_b

那一个函数须求证实下,实际需求的是四个参数,2个平均值另三个是指望计算量,之所以钦赐了叁个函数是因为恐怕输入的不肯定是二个数字,也恐怕是个list,那么会有两种计算办法,那一个已在if中显示,引用方法有二种,例如

3、条件可能率
一个事件已发出的景观下,获得另三个事变的爆发概率,相比较文言的布道是,给定事件B,事件A的产生概率,当然也足以反过来
P(A|B) = P(A∩B)/P(B)
反过来
P(B|A) = P(A∩B)/P(A)
抑或这些事例,以往已知B事件是从A袋子取,那么P(B) = 350%玖
P(A|B) = (3/49)/(32/49) = 3/32 = 0.0937
其1函数正是

if __name__ == '__main__':  # 第一种  poisson_rate = poisson_fun(mean_num = 10,chance_x = 13)  print poisson_rate   # 第二种  case_list = [8,9,10,11,12]  poisson_rate = poisson_fun(case_list = case_list ,chance_x = 13)  print poisson_rate 
def condition_rate_fun(p_a_with_b,p_b):
  p_a_from_b = p_a_with_b / p_b
  return p_a_from_b

 

上面包车型大巴故事情节用花生米的例子就不体面了,换个高校的事
三个班印度语印尼语考试各分数的百分比
分数|占比
20  |0.1
40  |0.1
60  |0.3
80  |0.4
100 |0.1

四、随机变量期望值
和算数平平均数量差不多,实际结果不应与那个数有太多偏向
μ = E(X) = NΣXiP(Xi)
E(X) = 20 * 0.1 + 40 * 0.1 + 60 * 0.3 + 80 * 0.4 + 100 * 0.1 = 66

def e_x(count_list,rate_list):
  e_len = len_fun(count_list)
  if e_len == len_fun(rate_list):
    e_list = [count_list[i] * rate_list[i] for i in range(e_len)]
    e_num = sum_fun(e_list)
  else: return None
  return e_num

伍、随机变量方差
和样本方差功用雷同,不多说了
σ^2 = NΣ[Xi-E(X)]^2P(Xi)

def var_rand_fun(count_list,rate_list):
  e_num = e_x(count_list,rate_list)
  var_len = len_fun(count_list)
  if var_len == len_fun(rate_list):
    var_list = [((count_list[i] - e_num) ** 2) * rate_list[i] for i in range(var_len)]
    var_num = sum_fun(var_list)
  else: return None
  return var_num

陆、随机变量协方差
函数简单,套用协方差函数即可

def covar_rand_fun(count_list,rate_list):
  var_rand_num = var_rand_fun(count_list,rate_list)
  covar_num = var_rand_num ** 0.5
  return covar_num

柒、联合协方差
σxy = NΣ[Xi-E(X)][Yi-E(Y)]P(XiYi)

def covar_rand_xy_fun(x_count_list,y_count_list,xy_rate_list):
  e_x_num = e_x(x_count_list,xy_rate_list)
  e_y_num = e_x(y_count_list,xy_rate_list)
  covar_len = len_fun(x_count_list)
  if covar_len == len_fun(y_count_list) and covar_len == len_fun(xy_rate_list):
    covar_rand_xy_list = [(x_count_list[i] - e_x_num) * (y_count_list[i] - e_y_num) * xy_rate_list[i] for i in range(covar_len)]
    covar_rand_xy_num = sum_fun(covar_rand_xy_list)
  else: return None
  return covar_rand_xy_num

八、组合期望回报
用小小的风险能博取的最大回报
E(P) = wE(X) + (1 – w)E(Y)
w是投资资金财产x的比重

def e_p(x_count_list,y_count_list,xy_rate_list):
  e_x_num = e_x(x_count_list,xy_rate_list)
  e_y_num = e_x(y_count_list,xy_rate_list)
  w = sum_fun(x_count_list) / (sum_fun(x_count_list) + sum_fun(y_count_list))
  e_p_num = w * e_x_num + (1 - w) * e_y_num
  return e_p_num

九、投资组合危害
其一从未搞懂是做什么的,应该是愿意回报的不是值吗
σ(p) = [w^2σ(x)^2 + (1 – w)^2σ(y)^2 + 2w(1 – w)σ(xy)]^0.5

def var_p_fun(x_count_list,y_count_list,xy_rate_list):
  w = sum_fun(x_count_list) / (sum_fun(x_count_list) + sum_fun(y_count_list))
  var_rand_x_num = var_rand_fun(x_count_list,xy_rate_list)
  var_rand_y_num = var_rand_fun(y_count_list,xy_rate_list)
  covar_rand_xy_num = covar_rand_xy_fun(x_count_list,y_count_list,xy_rate_list)
  var_p_num = (w * w * var_rand_y_num + (1 - w) * (1 - w) * var_rand_y_num + 2 * w * (1 - w) * covar_rand_xy_num) ** 0.5
  return var_p_num

other、贝叶斯
那一个确实是看的最懵逼的,感觉小编写的这一个不准,就作为参考吧

def bayes(true_coef,event_rate,event_bool,manage_num):
  'True = 0,False = 1'
  manage_num = manage_num - 1
  false_coef = 1 - true_coef
  event_count = len_fun(event_rate)
  if event_bool[manage_num] == 0:
    main_rate = event_rate[manage_num] * true_coef
  else:
    main_rate = event_rate[manage_num] * false_coef
  event_true_list = [event_rate[n] * true_coef for n in range(event_count) if event_bool[n] == 0]
  event_false_list = [event_rate[n] * true_coef for n in range(event_count) if event_bool[n] == 1]
  event_sum = sum_fun(event_true_list) + sum-fun(evemt_false_list)
  event_succe_rate = main_rate/event_sum
  return event_succe_rate

 

发表评论

电子邮件地址不会被公开。 必填项已用*标注

网站地图xml地图