-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathgamtolint.py
288 lines (259 loc) · 12.6 KB
/
gamtolint.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
import numpy as np
import scipy.stats
import pandas as pd
import scipy.optimize as opt
import math
import scipy.stats
import scipy.integrate as integrate
import warnings
warnings.filterwarnings('ignore')
def Kfactor(n, f = None, alpha = 0.05, P = 0.99, side = 1, method = 'HE', m=50):
K=None
if f == None:
f = n-1
if (len((n,)*1)) != len((f,)*1) and (len((f,)*1) > 1):
return 'Length of \'f\' needs to match length of \'n\'!'
if (side != 1) and (side != 2):
return 'Must specify one sided or two sided procedure'
if side ==1:
zp = scipy.stats.norm.ppf(P)
ncp = np.sqrt(n)*zp
ta = scipy.stats.nct.ppf(1-alpha,df = f, nc=ncp) #students t noncentralized
K = ta/np.sqrt(n)
else:
def Ktemp(n, f, alpha, P, method, m):
chia = scipy.stats.chi2.ppf(alpha, df = f)
k2 = np.sqrt(f*scipy.stats.ncx2.ppf(P,df=1,nc=(1/n))/chia) #noncentralized chi 2 (ncx2))
if method == 'HE':
def TEMP4(n, f, P, alpha):
chia = scipy.stats.chi2.ppf(alpha, df = f)
zp = scipy.stats.norm.ppf((1+P)/2)
za = scipy.stats.norm.ppf((2-alpha)/2)
dfcut = n**2*(1+(1/za**2))
V = 1 + (za**2)/n + ((3-zp**2)*za**4)/(6*n**2)
K1 = (zp * np.sqrt(V * (1 + (n * V/(2 * f)) * (1 + 1/za**2))))
G = (f-2-chia)/(2*(n+1)**2)
K2 = (zp * np.sqrt(((f * (1 + 1/n))/(chia)) * (1 + G)))
if f > dfcut:
K = K1
else:
K = K2
if K == np.nan or K == None:
K = 0
return K
#TEMP5 = np.vectorize(TEMP4())
K = TEMP4(n, f, P, alpha)
return K
elif method == 'HE2':
zp = scipy.stats.norm.ppf((1+P)/2)
K = zp * np.sqrt((1+1/n)*f/chia)
return K
elif method == 'WBE':
r = 0.5
delta = 1
while abs(delta) > 0.00000001:
Pnew = scipy.stats.norm.cdf(1/np.sqrt(n)+r) - scipy.stats.norm.cdf(1/np.sqrt(n)-r)
delta = Pnew-P
diff = scipy.stats.norm.pdf(1/np.sqrt(n)+r) + scipy.stats.norm.pdf(1/np.sqrt(n)-r)
r = r-delta/diff
K = r*np.sqrt(f/chia)
return K
elif method == 'ELL':
if f < n**2:
print("Warning Message:\nThe ellison method should only be used for f appreciably larger than n^2")
r = 0.5
delta = 1
zp = scipy.stats.norm.ppf((1+P)/2)
while abs(delta) > 0.00000001:
Pnew = scipy.stats.norm.cdf(zp/np.sqrt(n)+r) - scipy.stats.norm.cdf(zp/np.sqrt(n)-r)
delta = Pnew - P
diff = scipy.stats.norm.pdf(zp/np.sqrt(n)+r) + scipy.stats.norm.pdf(zp/np.sqrt(n)-r)
r = r-delta/diff
K = r*np.sqrt(f/chia)
return K
elif method == 'KM':
K = k2
return K
elif method == 'OCT':
delta = np.sqrt(n)*scipy.stats.norm.ppf((1+P)/2)
def Fun1(z,P,ke,n,f1,delta):
return (2 * scipy.stats.norm.cdf(-delta + (ke * np.sqrt(n * z))/(np.sqrt(f1))) - 1) * scipy.stats.chi2.pdf(z,f1)
def Fun2(ke, P, n, f1, alpha, m, delta):
if n < 75:
return integrate.quad(Fun1,a = f1 * delta**2/(ke**2 * n), b = np.inf, args=(P,ke,n,f1,delta),limit = m)
else:
return integrate.quad(Fun1,a = f1 * delta**2/(ke**2 * n), b = n*1000, args=(P,ke,n,f1,delta),limit = m)
def Fun3(ke,P,n,f1,alpha,m,delta):
f = Fun2(ke = ke, P = P, n = n, f1 = f1, alpha = alpha, m = m, delta = delta)
return abs(f[0] - (1-alpha))
K = opt.minimize(fun=Fun3, x0=k2,args=(P,n,f,alpha,m,delta), method = 'L-BFGS-B')['x']
return float(K)
elif method == 'EXACT':
def fun1(z,df1,P,X,n):
k = (scipy.stats.chi2.sf(df1*scipy.stats.ncx2.ppf(P,1,z**2)/X**2,df=df1)*np.exp(-0.5*n*z**2))
return k
def fun2(X,df1,P,n,alpha,m):
return integrate.quad(fun1,a =0, b = 5, args=(df1,P,X,n),limit=m)
def fun3(X,df1,P,n,alpha,m):
return np.sqrt(2*n/np.pi)*fun2(X,df1,P,n,alpha,m)[0]-(1-alpha)
K = opt.brentq(f=fun3,a=0,b=k2+(1000)/n, args=(f,P,n,alpha,m))
return K
K = Ktemp(n=n,f=f,alpha=alpha,P=P,method=method,m=m)
return K
def length(x):
if type(x) == int or type(x) == float or type(x) == np.float64:
return 1
else:
return len(x)
def mean(x,n):
#using this because for some reason it's the same as R
x = sum(x)
return x/(n)
def gamtolint(x,alpha=0.05,P=0.99,side=1,method = 'HE',m=50,loggamma=False):
'''
Description:
Provides 1-sided or 2-sided tolerance intervals for data distributed
according to either a gamma distribution or log-gamma distribution
gamtolint(x, alpha = 0.05, P = 0.99, side = 1,
method = c("HE", "HE2", "WBE", "ELL", "KM", "EXACT",
"OCT"), m = 50, log.gamma = FALSE)
Parameters
----------
x : list
A vector of data which is distributed according to either a gamma
distribution or a log-gamma distribution.
alpha : float, optional
The level chosen such that 1-alpha is the confidence level.
The default is 0.05.
P : float, optional
The proportion of the population to be covered by this tolerance
interval. The default is 0.99.
side : 1 or 2, optional
Whether a 1-sided or 2-sided tolerance interval is required
(determined by side = 1 or side = 2, respectively). The default is 1.
method : string, optional
The method for calculating the k-factors. The k-factor for the 1-sided
tolerance intervals is performed exactly and thus is the same for the
chosen method. "HE" is the Howe method and is often viewed as being
extremely accurate, even for small sample sizes. "HE2" is a second
method due to Howe, which performs similarly to the Weissberg-Beatty
method, but is computationally simpler. "WBE" is the Weissberg-Beatty
method (also called the Wald-Wolfowitz method), which performs
similarly to the first Howe method for larger sample sizes. "ELL" is
the Ellison correction to the Weissberg-Beatty method when f is
appreciably larger than n^2. A warning message is displayed if f is
not larger than n^2. "KM" is the Krishnamoorthy-Mathew approximation
to the exact solution, which works well for larger sample sizes.
"EXACT" computes the k-factor exactly by finding the integral solution
to the problem via the integrate function. Note the computation time
of this method is largely determined by m. "OCT" is the Owen approach
to compute the k-factor when controlling the tails so that there is
not more than (1-P)/2 of the data in each tail of the distribution.
m : int, optional
The maximum number of subintervals to be used in the integrate
function. This is necessary only for method = "EXACT" and method =
"OCT". The larger the number, the more accurate the solution. Too low
of a value can result in an error. A large value can also cause the
function to be slow for method = "EXACT".
log.gamma
If TRUE, then the data is considered to be from a log-gamma
distribution, in which case the output gives tolerance intervals for
the log-gamma distribution. The default is FALSE.
Returns
-------
gamtolint returns a dataframe with items:
alpha:
The specified significance level.
P:
The proportion of the population covered by this tolerance
interval.
1-sided.lower:
The 1-sided lower tolerance bound. This is given
only if side = 1.
1-sided.upper:
The 1-sided upper tolerance bound. This is given
only if side = 1.
2-sided.lower:
The 2-sided lower tolerance bound. This is given
only if side = 2.
2-sided.upper:
The 2-sided upper tolerance bound. This is given
only if side = 2.
Details
-------
Recall that if the random variable X is distributed according to a
log-gamma distribution, then the random variable Y = ln(X) is distributed
according to a gamma distribution.
Note
----
R uses the Newton miniminzation method and this code uses the SLSQP
minimization method. Both methods are non linear minimization methods.
Their results have a minor discrepancy.
References
----------
Derek S. Young (2010). tolerance: An R Package for Estimating Tolerance
Intervals. Journal of Statistical Software, 36(5), 1-39.
URL http://www.jstatsoft.org/v36/i05/.
Krishnamoorthy, K., Mathew, T., and Mukherjee, S. (2008), Normal-Based
Methods for a Gamma Distribution: Prediction and Tolerance Intervals
and Stress-Strength Reliability, Technometrics, 50, 69–78.
Examples
--------
## 99%/99% 1-sided gamma tolerance intervals for a sample of size 50.
x = np.random.gamma(size=50)
gamtolint(x = x, alpha = 0.01, P = 0.99, side = 1, method = "HE")
'''
if side != 1 and side != 2:
return 'Must specify a one-sided or two-sided procedure'
if method == 'ELL':
return 'Must specify the HE, WBE, or EXACT method'
if loggamma:
x = np.log(x)
n = len(x)
xbar = np.mean(x)
x2bar = np.mean(np.array(x)**2) #np.mean(np.array(x)**2)
inits = [xbar**2/(x2bar-xbar**2),(x2bar-xbar**2)/xbar]
x = np.array(x)
def gammall(pars,x):
return sum(-scipy.stats.gamma.logpdf(x,a=pars[0],scale=pars[1])) #for y in x])
out = (opt.minimize(gammall,x0 = inits, args = (x),method = 'BFGS')['x'])
ahat = out[0]
bhat = out[1]
x = x**(1/3)
xbar = bhat**(1/3) * (np.exp(math.lgamma(ahat + (1/3))-math.lgamma(ahat)))
s = np.sqrt(bhat**(2/3) * (np.exp(math.lgamma(ahat + (2/3))-math.lgamma(ahat))) - (xbar)**2)
K = Kfactor(n = n, alpha = alpha, P = P, side = side, method = method, m = m)
lower = max(0,(xbar-s*K)**3)
upper = (xbar+s*K)**3
if loggamma:
lower = np.exp(lower)
upper = np.exp(upper)
xbar = np.exp(xbar)
if side == 1:
return pd.DataFrame({"alpha":[alpha], "P":[P], "1-sided.lower":lower, "1-sided.upper":upper})
else:
return (pd.DataFrame({"alpha":[alpha], "P":[P], "2-sided.lower":lower, "2-sided.upper":upper}))
#x = [1,2,3,4,5,6,7,8,9,9,8,4,6,2,1,6,8,4,3,2,4,6,8,4,2,4,6,7,8,2,3,5,7,5,2,3,4,0]
#x = [1,2]
# x = [6, 2, 1, 4, 8, 3, 3, 14, 2, 1, 21, 5, 18, 2, 30, 10, 8, 2,
# 11, 4, 16, 13, 17, 1, 7, 1, 1, 28, 19, 27, 2, 7, 7, 13, 1,
# 15, 1, 16, 9, 9, 7, 29, 3, 10, 3, 1, 20, 8, 12, 6, 11, 5, 1,
# 5, 23, 3, 3, 14, 6, 9, 1, 24, 5, 11, 15, 1, 5, 5, 4, 10, 1,
# 12, 1, 3, 4, 2, 9, 2, 1, 25, 6, 8, 2, 1, 1, 1, 4, 6, 7, 26,
# 10, 2, 1, 2, 17, 4, 3, 22, 8, 2,12,1,1,1,1,1,1,1,1,1,2,1,1,1,1,
# 2,2,2,2,2,3,2,2,2,1,1,1,1,1,1,1,12,1,1,1,1,1,1,2,2,2,2,2,2,3,1,1,1,1,1,
# 2,2,2,2,2,2,2,2,2,2,22,4,1,1,1,1,11,1,1,1,1,1,1,1,1]
# print(gamtolint(x, side = 1, method = 'HE'))
# print(gamtolint(x, side = 2, method = 'HE'))
# print(gamtolint(x, side = 1, method = 'HE2'))
# print(gamtolint(x, side = 2, method = 'HE2'))
# print(gamtolint(x, side = 1, method = 'WBE'))
# print(gamtolint(x, side = 2, method = 'WBE'))
# print(gamtolint(x, side = 1, method = 'ELL'))
# print(gamtolint(x, side = 2, method = 'ELL'))
# print(gamtolint(x, side = 1, method = 'KM'))
# print(gamtolint(x, side = 2, method = 'KM'))
# print(gamtolint(x, side = 1, method = 'EXACT'))
# print(gamtolint(x, side = 2, method = 'EXACT'))
# print(gamtolint(x, side = 1, method = 'OCT'))
# print(gamtolint(x, side = 2, method = 'OCT'))