-
Notifications
You must be signed in to change notification settings - Fork 10
Expand file tree
/
Copy pathest.py
More file actions
138 lines (113 loc) · 5.02 KB
/
est.py
File metadata and controls
138 lines (113 loc) · 5.02 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
#!/usr/bin/env python
"""PIN model estimation -- one year at a time, parallelized across permnos."""
import argparse
import json
import os
import sys
from concurrent.futures import ProcessPoolExecutor
from pathlib import Path
import pandas as pd
def parse_args():
parser = argparse.ArgumentParser(description='Estimate PIN models.')
parser.add_argument('model', choices=['gpin', 'owr'],
help='Model to estimate')
parser.add_argument('year', type=int, help='Year to estimate')
parser.add_argument('--starts', type=int, default=10,
help='Random starts (default: 10)')
parser.add_argument('--workers', type=int, default=2,
help='Number of workers (default: 2, for hyperthreads)')
parser.add_argument('--data', type=str,
default='/scratch/nyu/hue/taqdfx_all6.h5',
help='HDF5 data path')
parser.add_argument('--chunk', type=int, default=None,
help='Chunk index (0-based). If set, only estimate this chunk.')
parser.add_argument('--nchunks', type=int, default=8,
help='Total number of chunks per year (default: 8)')
return parser.parse_args()
def load_warm_start(model, permno, year):
"""Load prior estimate as initial parameter values for warm-starting."""
fname = f'{model}/{permno}-{year}.json'
if os.path.exists(fname):
with open(fname) as f:
prior = json.load(f)
param_names = {
'gpin': ['a', 'p', 'eta', 'r', 'd', 'th'],
'owr': ['a', 'su', 'sz', 'si', 'spd', 'spo'],
}
return {k: prior[k] for k in param_names[model] if k in prior}
return {}
def estimate_one(args):
"""Estimate a single permno. Called by ProcessPoolExecutor."""
permno, d, model_name, year, starts = args
d = d.dropna()
if len(d) < 30: # skip stocks with too few observations
return None
# Load warm start if available
warm = load_warm_start(model_name, permno, year)
try:
# Estimate
if model_name == 'gpin':
import gpin_model as gpin
r = gpin.fit(d.n_buys, d.n_sells, starts=starts, **warm)
elif model_name == 'owr':
import owr_model as owr
r = owr.fit(d.uy_e, d.ur_d, d.ur_o, starts=starts, **warm)
except Exception as e:
import sys
print(f'[ERROR] {model_name} permno={permno} yyyy={year}: {e}',
file=sys.stderr, flush=True)
return None
r.update({'permno': int(permno), 'yyyy': int(year)})
return r
def main():
args = parse_args()
# Output path — include chunk suffix if chunking
if args.chunk is not None:
outfile = Path(f'{args.model}-{args.year}-c{args.chunk}.jsonl')
else:
outfile = Path(f'{args.model}-{args.year}.jsonl')
# Read all data for this year in one shot (avoids repeated NFS opens)
# .copy() forces data into regular memory — releases HDF5 mmap so
# forked workers don't inherit stale memory mappings (causes bus errors)
print(f'[LOAD] Reading {args.data} for year {args.year}...',
flush=True)
df = pd.read_hdf(args.data, key='data', where=f'yyyy=={args.year}').copy()
all_permnos = sorted(df.permno.unique())
# Select chunk if specified
if args.chunk is not None:
chunk_size = (len(all_permnos) + args.nchunks - 1) // args.nchunks
start = args.chunk * chunk_size
permnos = all_permnos[start:start + chunk_size]
print(f'[CHUNK] {args.chunk}/{args.nchunks}: permnos {start}-{start+len(permnos)-1} '
f'({len(permnos)} of {len(all_permnos)})', flush=True)
else:
permnos = all_permnos
# Job-skip: check if output already exists with expected count
if outfile.exists():
existing = sum(1 for _ in open(outfile))
if existing >= len(permnos):
print(f'[SKIP] {outfile} already has {existing} rows '
f'(expected {len(permnos)})')
return
print(f'[START] {args.model} {args.year}: {len(permnos)} permnos, '
f'{args.starts} starts, {args.workers} workers')
# Build work items with pre-sliced data (no file I/O in workers)
grouped = {p: g for p, g in df.groupby('permno') if p in set(permnos)}
work = [(p, grouped[p], args.model, args.year, args.starts)
for p in permnos]
# Run estimation in parallel (fork is fine — .copy() above detached from HDF5 mmap)
results = []
with ProcessPoolExecutor(max_workers=args.workers) as executor:
for i, r in enumerate(executor.map(estimate_one, work,
chunksize=4)):
if r is not None:
results.append(r)
if (i + 1) % 100 == 0:
print(f'[PROGRESS] {i + 1}/{len(work)} complete')
# Write JSONL output
with open(outfile, 'w') as f:
for r in results:
f.write(json.dumps(r) + '\n')
print(f'[DONE] {outfile}: {len(results)} estimates')
if __name__ == '__main__':
main()