@@ -134,6 +134,7 @@ let s:NODE_ENV = 88
134
134
let s: NODE_REG = 89
135
135
let s: NODE_CURLYNAMEPART = 90
136
136
let s: NODE_CURLYNAMEEXPR = 91
137
+ let s: NODE_LAMBDA = 92
137
138
138
139
let s: TOKEN_EOF = 1
139
140
let s: TOKEN_EOL = 2
@@ -199,6 +200,7 @@ let s:TOKEN_SEMICOLON = 61
199
200
let s: TOKEN_BACKTICK = 62
200
201
let s: TOKEN_DOTDOTDOT = 63
201
202
let s: TOKEN_SHARP = 64
203
+ let s: TOKEN_ARROW = 65
202
204
203
205
let s: MAX_FUNC_ARGS = 20
204
206
@@ -398,6 +400,7 @@ endfunction
398
400
" REG .value
399
401
" CURLYNAMEPART .value
400
402
" CURLYNAMEEXPR .value
403
+ " LAMBDA .rlist .left
401
404
function ! s: Node (type )
402
405
return {' type' : a: type }
403
406
endfunction
@@ -2568,8 +2571,13 @@ function! s:ExprTokenizer.get2()
2568
2571
call r .seek_cur (1 )
2569
2572
return self .token (s: TOKEN_PLUS , ' +' , pos)
2570
2573
elseif c == # ' -'
2571
- call r .seek_cur (1 )
2572
- return self .token (s: TOKEN_MINUS , ' -' , pos)
2574
+ if r .p (1 ) == # ' >'
2575
+ call r .seek_cur (2 )
2576
+ return self .token (s: TOKEN_ARROW , ' ->' , pos)
2577
+ else
2578
+ call r .seek_cur (1 )
2579
+ return self .token (s: TOKEN_MINUS , ' -' , pos)
2580
+ endif
2573
2581
elseif c == # ' .'
2574
2582
if r .p (1 ) == # ' .' && r .p (2 ) == # ' .'
2575
2583
call r .seek_cur (3 )
@@ -3200,6 +3208,7 @@ endfunction
3200
3208
" 'string'
3201
3209
" [expr1, ...]
3202
3210
" {expr1: expr1, ...}
3211
+ " {args -> expr1}
3203
3212
" &option
3204
3213
" (expr1)
3205
3214
" variable
@@ -3251,42 +3260,121 @@ function! s:ExprParser.parse_expr9()
3251
3260
endwhile
3252
3261
endif
3253
3262
elseif token.type == s: TOKEN_COPEN
3254
- let node = s: Node (s: NODE_DICT )
3255
- let node.pos = token.pos
3256
- let node.value = []
3257
- let token = self .tokenizer.peek ()
3258
- if token.type == s: TOKEN_CCLOSE
3259
- call self .tokenizer.get ()
3260
- else
3263
+ let savepos = self .reader.tell ()
3264
+ let nodepos = token.pos
3265
+ let token = self .tokenizer.get ()
3266
+ let lambda = token.type == s: TOKEN_ARROW
3267
+ if ! lambda && ! (token.type == s: TOKEN_SQUOTE || token.type == s: TOKEN_DQUOTE )
3268
+ " if the token type is stirng, we cannot peek next token and we can
3269
+ " assume it's not lambda.
3270
+ let token2 = self .tokenizer.peek ()
3271
+ let lambda = token2.type == s: TOKEN_ARROW || token2.type == s: TOKEN_COMMA
3272
+ endif
3273
+ " fallback to dict or {expr} if true
3274
+ let fallback = 0
3275
+ if lambda
3276
+ " lambda {token,...} {->...} {token->...}
3277
+ let node = s: Node (s: NODE_LAMBDA )
3278
+ let node.pos = nodepos
3279
+ let node.rlist = []
3280
+ let named = {}
3261
3281
while 1
3262
- let key = self .parse_expr1 ()
3263
- let token = self .tokenizer.get ()
3264
- if token.type == s: TOKEN_CCLOSE
3265
- if ! empty (node.value)
3266
- throw s: Err (printf (' unexpected token: %s' , token.value), token.pos)
3267
- endif
3268
- call self .reader.seek_set (pos)
3269
- let node = self .parse_identifier ()
3282
+ if token.type == s: TOKEN_ARROW
3270
3283
break
3271
- endif
3272
- if token.type != s: TOKEN_COLON
3273
- throw s: Err (printf (' unexpected token: %s' , token.value), token.pos)
3274
- endif
3275
- let val = self .parse_expr1 ()
3276
- call add (node.value, [key , val])
3277
- let token = self .tokenizer.get ()
3278
- if token.type == s: TOKEN_COMMA
3279
- if self .tokenizer.peek ().type == s: TOKEN_CCLOSE
3284
+ elseif token.type == s: TOKEN_IDENTIFIER
3285
+ if ! s: isargname (token.value)
3286
+ throw s: Err (printf (' E125: Illegal argument: %s' , token.value), token.pos)
3287
+ elseif has_key (named, token.value)
3288
+ throw s: Err (printf (' E853: Duplicate argument name: %s' , token.value), token.pos)
3289
+ endif
3290
+ let named[token.value] = 1
3291
+ let varnode = s: Node (s: NODE_IDENTIFIER )
3292
+ let varnode.pos = token.pos
3293
+ let varnode.value = token.value
3294
+ " XXX: Vim doesn't skip white space before comma. {a ,b -> ...} => E475
3295
+ if s: iswhite (self .reader.p (0 )) && self .tokenizer.peek ().type == s: TOKEN_COMMA
3296
+ throw s: Err (' E475: Invalid argument: White space is not allowed before comma' , self .reader.getpos ())
3297
+ endif
3298
+ let token = self .tokenizer.get ()
3299
+ call add (node.rlist, varnode)
3300
+ if token.type == s: TOKEN_COMMA
3301
+ " XXX: Vim allows last comma. {a, b, -> ...} => OK
3302
+ let token = self .tokenizer.peek ()
3303
+ if token.type == s: TOKEN_ARROW
3304
+ call self .tokenizer.get ()
3305
+ break
3306
+ endif
3307
+ elseif token.type == s: TOKEN_ARROW
3308
+ break
3309
+ else
3310
+ throw s: Err (printf (' unexpected token: %s, type: %d' , token.value, token.type ), token.pos)
3311
+ endif
3312
+ elseif token.type == s: TOKEN_DOTDOTDOT
3313
+ let varnode = s: Node (s: NODE_IDENTIFIER )
3314
+ let varnode.pos = token.pos
3315
+ let varnode.value = token.value
3316
+ call add (node.rlist, varnode)
3317
+ let token = self .tokenizer.peek ()
3318
+ if token.type == s: TOKEN_ARROW
3280
3319
call self .tokenizer.get ()
3281
3320
break
3321
+ else
3322
+ throw s: Err (printf (' unexpected token: %s' , token.value), token.pos)
3282
3323
endif
3283
- elseif token.type == s: TOKEN_CCLOSE
3284
- break
3285
3324
else
3286
- throw s: Err (printf (' unexpected token: %s' , token.value), token.pos)
3325
+ let fallback = 1
3326
+ break
3287
3327
endif
3328
+ let token = self .tokenizer.get ()
3288
3329
endwhile
3330
+ if ! fallback
3331
+ let node.left = self .parse_expr1 ()
3332
+ let token = self .tokenizer.get ()
3333
+ if token.type != s: TOKEN_CCLOSE
3334
+ throw s: Err (printf (' unexpected token: %s' , token.value), token.pos)
3335
+ endif
3336
+ return node
3337
+ endif
3289
3338
endif
3339
+ " dict
3340
+ let node = s: Node (s: NODE_DICT )
3341
+ let node.pos = nodepos
3342
+ let node.value = []
3343
+ call self .reader.seek_set (savepos)
3344
+ let token = self .tokenizer.peek ()
3345
+ if token.type == s: TOKEN_CCLOSE
3346
+ call self .tokenizer.get ()
3347
+ return node
3348
+ endif
3349
+ while 1
3350
+ let key = self .parse_expr1 ()
3351
+ let token = self .tokenizer.get ()
3352
+ if token.type == s: TOKEN_CCLOSE
3353
+ if ! empty (node.value)
3354
+ throw s: Err (printf (' unexpected token: %s' , token.value), token.pos)
3355
+ endif
3356
+ call self .reader.seek_set (pos)
3357
+ let node = self .parse_identifier ()
3358
+ break
3359
+ endif
3360
+ if token.type != s: TOKEN_COLON
3361
+ throw s: Err (printf (' unexpected token: %s' , token.value), token.pos)
3362
+ endif
3363
+ let val = self .parse_expr1 ()
3364
+ call add (node.value, [key , val])
3365
+ let token = self .tokenizer.get ()
3366
+ if token.type == s: TOKEN_COMMA
3367
+ if self .tokenizer.peek ().type == s: TOKEN_CCLOSE
3368
+ call self .tokenizer.get ()
3369
+ break
3370
+ endif
3371
+ elseif token.type == s: TOKEN_CCLOSE
3372
+ break
3373
+ else
3374
+ throw s: Err (printf (' unexpected token: %s' , token.value), token.pos)
3375
+ endif
3376
+ endwhile
3377
+ return node
3290
3378
elseif token.type == s: TOKEN_POPEN
3291
3379
let node = self .parse_expr1 ()
3292
3380
let token = self .tokenizer.get ()
@@ -3955,6 +4043,8 @@ function! s:Compiler.compile(node)
3955
4043
return self .compile_curlynamepart (a: node )
3956
4044
elseif a: node .type == s: NODE_CURLYNAMEEXPR
3957
4045
return self .compile_curlynameexpr (a: node )
4046
+ elseif a: node .type == s: NODE_LAMBDA
4047
+ return self .compile_lambda (a: node )
3958
4048
else
3959
4049
throw printf (' Compiler: unknown node: %s' , string (a: node ))
3960
4050
endif
@@ -4410,6 +4500,11 @@ function! s:Compiler.compile_curlynameexpr(node)
4410
4500
return ' {' . self .compile (a: node .value) . ' }'
4411
4501
endfunction
4412
4502
4503
+ function ! s: Compiler .compile_lambda (node)
4504
+ let rlist = map (a: node .rlist, ' self.compile(v:val)' )
4505
+ return printf (' (lambda (%s) %s)' , join (rlist, ' ' ), self .compile (a: node .left ))
4506
+ endfunction
4507
+
4413
4508
" TODO: under construction
4414
4509
let s: RegexpParser = {}
4415
4510
0 commit comments