Skip to content

Commit 42be126

Browse files
committed
Update docs
1 parent f603544 commit 42be126

File tree

1 file changed

+10
-6
lines changed

1 file changed

+10
-6
lines changed

deepxde/gradients.py

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -97,6 +97,8 @@ def __call__(self, ys, xs, i=0, j=None):
9797
def jacobian(ys, xs, i=0, j=None):
9898
"""Compute Jacobian matrix J: J[i][j] = dy_i/dx_j, where i=0,...,dim_y-1 and j=0,...,dim_x-1.
9999
100+
Use this function to compute first-order derivatives instead of ``tf.gradients()``, because
101+
100102
- It is lazy evaluation, i.e., it only computes J[i][j] when needed.
101103
- It will remember the gradients that have already been computed to avoid duplicate computation.
102104
@@ -135,27 +137,29 @@ def __call__(self, y, xs, component=None, i=0, j=0, grad_y=None):
135137
return self.Hs[key](i, j)
136138

137139

138-
def hessian(y, xs, component=None, i=0, j=0, grad_y=None):
140+
def hessian(ys, xs, component=None, i=0, j=0, grad_y=None):
139141
"""Compute Hessian matrix H: H[i][j] = d^2y/dx_idx_j, where i,j=0,...,dim_x-1.
140142
143+
Use this function to compute second-order derivatives instead of ``tf.gradients()``, because
144+
141145
- It is lazy evaluation, i.e., it only computes H[i][j] when needed.
142146
- It will remember the gradients that have already been computed to avoid duplicate computation.
143147
144148
Args:
145-
y: Output Tensor of shape (batch_size, 1) or (batch_size, dim_y > 1).
149+
ys: Output Tensor of shape (batch_size, dim_y).
146150
xs: Input Tensor of shape (batch_size, dim_x).
147-
component: If `y` has the shape (batch_size, dim_y > 1), then `y[:, component]` is used to compute the
148-
Hessian. Do not use if `y` has the shape (batch_size, 1).
151+
component: If dim_y > 1, then `ys[:, component]` is used as y to compute the Hessian. If dim_y = 1, `component`
152+
must be ``None``.
149153
i (int):
150154
j (int):
151-
grad_y: The gradient of `y` w.r.t. `xs`. Provide `grad_y` if known to avoid duplicate computation. `grad_y` can
155+
grad_y: The gradient of y w.r.t. `xs`. Provide `grad_y` if known to avoid duplicate computation. `grad_y` can
152156
be computed from ``jacobian``. Even if you do not provide `grad_y`, there is no duplicate computation if
153157
you use ``jacobian`` to compute first-order derivatives.
154158
155159
Returns:
156160
H[`i`][`j`].
157161
"""
158-
return hessian._fn(y, xs, component=component, i=i, j=j, grad_y=grad_y)
162+
return hessian._fn(ys, xs, component=component, i=i, j=j, grad_y=grad_y)
159163

160164

161165
hessian._fn = Hessians()

0 commit comments

Comments
 (0)