Skip to content

Commit 37c4b82

Browse files
authored
Merge branch 'main' into bayes_nonconj
2 parents ac0ef48 + bbe71da commit 37c4b82

22 files changed

+2025
-1633
lines changed

environment-cn.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ dependencies:
1313
- sphinxext-rediraffe==0.2.7
1414
- sphinx_reredirects==0.1.4
1515
- sphinx-exercise==1.0.1
16-
- sphinx-proof==0.2.1
16+
- sphinx-proof==0.3.0
1717
- ghp-import==1.1.0
1818
- sphinxcontrib-youtube==1.4.1 #Version 1.3.0 is required as quantecon-book-theme is only compatible with sphinx<=5
1919
- sphinx-togglebutton==0.3.2

environment.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ dependencies:
1111
- sphinx-tojupyter==0.3.1
1212
- sphinxext-rediraffe==0.2.7
1313
- sphinx-exercise==1.0.1
14-
- sphinx-proof==0.2.1
14+
- sphinx-proof==0.3.0
1515
- sphinxcontrib-youtube==1.4.1
1616
- sphinx-togglebutton==0.3.2
1717
- sphinx-reredirects==0.1.4

lectures/_config.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,7 @@ latex:
3434
sphinx:
3535
extra_extensions: [sphinx_multitoc_numbering, sphinxext.rediraffe, sphinx_tojupyter, sphinxcontrib.youtube, sphinx.ext.todo, sphinx_exercise, sphinx_proof, sphinx_togglebutton, sphinx.ext.intersphinx, sphinx_reredirects]
3636
config:
37+
language: zh_CN
3738
# false-positive links
3839
linkcheck_ignore: ['https://online.stat.psu.edu/stat415/book/export/html/834']
3940
bibtex_reference_style: author_year
@@ -90,7 +91,6 @@ sphinx:
9091
google_analytics_id: G-J0SMYR4SG3
9192
launch_buttons:
9293
colab_url : https://colab.research.google.com
93-
intersphinx_mapping:
9494
intersphinx_mapping:
9595
intermediate:
9696
- https://python.quantecon.org/
Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,11 @@
1-
def solve_model_time_iter(model, # Class with model information
2-
σ, # Initial condition
1+
def solve_model_time_iter(model, # 含有模型信息的类
2+
σ, # 初始条件
33
tol=1e-4,
44
max_iter=1000,
55
verbose=True,
66
print_skip=25):
77

8-
# Set up loop
8+
# 设置迭代循环
99
i = 0
1010
error = tol + 1
1111

@@ -14,12 +14,12 @@ def solve_model_time_iter(model, # Class with model information
1414
error = np.max(np.abs(σ - σ_new))
1515
i += 1
1616
if verbose and i % print_skip == 0:
17-
print(f"Error at iteration {i} is {error}.")
17+
print(f"{i} 次迭代的误差为 {error}")
1818
σ = σ_new
1919

2020
if error > tol:
21-
print("Failed to converge!")
21+
print("未能收敛!")
2222
elif verbose:
23-
print(f"\nConverged in {i} iterations.")
23+
print(f"\n{i} 次迭代后收敛。")
2424

2525
return σ_new
Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11

22
def v_star(y, α, β, μ):
33
"""
4-
True value function
4+
真实价值函数
55
"""
66
c1 = np.log(1 - α * β) / (1 - β)
77
c2 = (μ + α * np.log(α * β)) / (1 - α)
@@ -11,7 +11,7 @@ def v_star(y, α, β, μ):
1111

1212
def σ_star(y, α, β):
1313
"""
14-
True optimal policy
14+
真实最优策略
1515
"""
1616
return (1 - α * β) * y
1717

lectures/_static/lecture_specific/optgrowth/solve_model.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -4,12 +4,12 @@ def solve_model(og,
44
verbose=True,
55
print_skip=25):
66
"""
7-
Solve model by iterating with the Bellman operator.
7+
通过迭代贝尔曼算子求解
88
99
"""
1010

11-
# Set up loop
12-
v = og.u(og.grid) # Initial condition
11+
# 设置迭代循环
12+
v = og.u(og.grid) # 初始条件
1313
i = 0
1414
error = tol + 1
1515

@@ -18,12 +18,12 @@ def solve_model(og,
1818
error = np.max(np.abs(v - v_new))
1919
i += 1
2020
if verbose and i % print_skip == 0:
21-
print(f"Error at iteration {i} is {error}.")
21+
print(f"{i} 次迭代的误差为 {error}")
2222
v = v_new
2323

2424
if error > tol:
25-
print("Failed to converge!")
25+
print("未能收敛!")
2626
elif verbose:
27-
print(f"\nConverged in {i} iterations.")
27+
print(f"\n{i} 次迭代后收敛。")
2828

2929
return v_greedy, v_new

lectures/_static/lecture_specific/optgrowth_fast/ogm.py

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -2,12 +2,12 @@
22
from numba.experimental import jitclass
33

44
opt_growth_data = [
5-
('α', float64), # Production parameter
6-
('β', float64), # Discount factor
7-
('μ', float64), # Shock location parameter
8-
('s', float64), # Shock scale parameter
9-
('grid', float64[:]), # Grid (array)
10-
('shocks', float64[:]) # Shock draws (array)
5+
('α', float64), # 生产参数
6+
('β', float64), # 折现因子
7+
('μ', float64), # 冲击的均值参数
8+
('s', float64), # 冲击的尺度参数
9+
('grid', float64[:]), # 网格(数组)
10+
('shocks', float64[:]) # 冲击样本(数组)
1111
]
1212

1313
@jitclass(opt_growth_data)
@@ -25,32 +25,32 @@ def __init__(self,
2525

2626
self.α, self.β, self.μ, self.s = α, β, μ, s
2727

28-
# Set up grid
28+
# 设置网格
2929
self.grid = np.linspace(1e-5, grid_max, grid_size)
3030

31-
# Store shocks (with a seed, so results are reproducible)
31+
# 存储冲击(设置随机种子以确保结果可重复)
3232
np.random.seed(seed)
3333
self.shocks = np.exp(μ + s * np.random.randn(shock_size))
3434

3535

3636
def f(self, k):
37-
"The production function"
37+
"生产函数"
3838
return k**self.α
3939

4040

4141
def u(self, c):
42-
"The utility function"
42+
"效用函数"
4343
return np.log(c)
4444

4545
def f_prime(self, k):
46-
"Derivative of f"
46+
"生产函数的一阶导数"
4747
return self.α * (k**(self.α - 1))
4848

4949

5050
def u_prime(self, c):
51-
"Derivative of u"
51+
"效用函数的一阶导数"
5252
return 1/c
5353

5454
def u_prime_inv(self, c):
55-
"Inverse of u'"
55+
"效用函数一阶导数的反函数"
5656
return 1/c

lectures/_static/lecture_specific/optgrowth_fast/ogm_crra.py

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -2,13 +2,13 @@
22
from numba.experimental import jitclass
33

44
opt_growth_data = [
5-
('α', float64), # Production parameter
6-
('β', float64), # Discount factor
7-
('μ', float64), # Shock location parameter
8-
('γ', float64), # Preference parameter
9-
('s', float64), # Shock scale parameter
10-
('grid', float64[:]), # Grid (array)
11-
('shocks', float64[:]) # Shock draws (array)
5+
('α', float64), # 生产参数
6+
('β', float64), # 折现因子
7+
('μ', float64), # 冲击的均值参数
8+
('γ', float64), # 偏好参数
9+
('s', float64), # 冲击的尺度参数
10+
('grid', float64[:]), # 网格(数组)
11+
('shocks', float64[:]) # 冲击样本(数组)
1212
]
1313

1414
@jitclass(opt_growth_data)
@@ -27,29 +27,29 @@ def __init__(self,
2727

2828
self.α, self.β, self.γ, self.μ, self.s = α, β, γ, μ, s
2929

30-
# Set up grid
30+
# 设置网格
3131
self.grid = np.linspace(1e-5, grid_max, grid_size)
3232

33-
# Store shocks (with a seed, so results are reproducible)
33+
# 存储冲击(设置随机种子以确保结果可重复)
3434
np.random.seed(seed)
3535
self.shocks = np.exp(μ + s * np.random.randn(shock_size))
3636

37-
3837
def f(self, k):
39-
"The production function."
38+
"生产函数"
4039
return k**self.α
4140

4241
def u(self, c):
43-
"The utility function."
42+
"效用函数"
4443
return c**(1 - self.γ) / (1 - self.γ)
4544

4645
def f_prime(self, k):
47-
"Derivative of f."
46+
"生产函数的一阶导数"
4847
return self.α * (k**(self.α - 1))
4948

5049
def u_prime(self, c):
51-
"Derivative of u."
50+
"效用函数的一阶导数"
5251
return c**(-self.γ)
5352

54-
def u_prime_inv(c):
53+
def u_prime_inv(self, c):
54+
"效用函数一阶导数的反函数"
5555
return c**(-1 / self.γ)

0 commit comments

Comments
 (0)