@@ -5,6 +5,10 @@ function update!(opt::AbstractOptimiser, x::AbstractArray, x̄)
55end
66
77function update!(opt:: AbstractOptimiser , xs:: Params , gs)
8+ @warn """ The method `Flux.update!(optimiser, ps::Params, grads)` is deprecated,
9+ as part of Flux's move away from Zyote's implicit mode.
10+ Please use explicit-style `update!(opt_state, model, grad)` instead,
11+ where `grad = Flux.gradient(m -> loss(m,x,y), model)` and `opt_state = Flux.setup(rule, model)`.""" maxlog= 1
812 for x in xs
913 isnothing(gs[x]) && continue
1014 update!(opt, x, gs[x])
@@ -21,6 +25,10 @@ batchmemaybe(x) = tuple(x)
2125batchmemaybe(x:: Tuple ) = x
2226
2327function train!(loss, ps:: Params , data, opt:: AbstractOptimiser ; cb = () -> ())
28+ @warn """ The method `Flux.train!(loss2, ps::Params, data, optimiser)` is deprecated,
29+ as part of Flux's move away from Zyote's implicit parameters.
30+ Please use explicit-style `train!(loss, model, data, opt_state)` instead,
31+ where `loss(m, xy...)` accepts the model, and `opt_state = Flux.setup(rule, model)`.""" maxlog= 1
2432 cb = runall(cb)
2533 itrsz = Base. IteratorSize(typeof(data))
2634 n = (itrsz == Base. HasLength()) || (itrsz == Base. HasShape{1 }()) ? length(data) : 0
0 commit comments