Skip to content

Commit b2a1273

Browse files
committed
* Upgrade presets for PyTorch 2.6.0
1 parent 56229de commit b2a1273

File tree

1,536 files changed

+3066
-3199
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

1,536 files changed

+3066
-3199
lines changed

CHANGELOG.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11

22
* Introduce `macosx-arm64` builds for ARPACK-NG, CMINPACK, FFTW, GSL, TensorFlow Lite, ONNX, ONNX Runtime ([issue #1069](https://github.com/bytedeco/javacpp-presets/issues/1069))
3-
* Upgrade presets for OpenCV 4.11.0, DNNL 3.6.2, CPython 3.13.1, NumPy 2.2.1, SciPy 1.15.1, LLVM 19.1.6, ONNX Runtime 1.20.1
3+
* Upgrade presets for OpenCV 4.11.0, DNNL 3.6.2, CPython 3.13.1, NumPy 2.2.1, SciPy 1.15.1, LLVM 19.1.6, PyTorch 2.6.0, ONNX Runtime 1.20.1
44

55
### November 16, 2024 version 1.5.11
66
* Enable distributed package using Gloo in presets for PyTorch ([pull #1510](https://github.com/bytedeco/javacpp-presets/pull/1510))

README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -223,7 +223,7 @@ Each child module in turn relies by default on the included [`cppbuild.sh` scrip
223223
* NVIDIA Video Codec SDK 12.2.x https://developer.nvidia.com/nvidia-video-codec-sdk
224224
* OpenCL 3.0.x https://github.com/KhronosGroup/OpenCL-ICD-Loader
225225
* MXNet 1.9.x https://github.com/apache/incubator-mxnet
226-
* PyTorch 2.5.x https://github.com/pytorch/pytorch
226+
* PyTorch 2.6.x https://github.com/pytorch/pytorch
227227
* SentencePiece 0.2.0 https://github.com/google/sentencepiece
228228
* TensorFlow 1.15.x https://github.com/tensorflow/tensorflow
229229
* TensorFlow Lite 2.18.x https://github.com/tensorflow/tensorflow

platform/pom.xml

+1-1
Original file line numberDiff line numberDiff line change
@@ -292,7 +292,7 @@
292292
<dependency>
293293
<groupId>org.bytedeco</groupId>
294294
<artifactId>pytorch-platform</artifactId>
295-
<version>2.5.1-${project.version}</version>
295+
<version>2.6.0-${project.version}</version>
296296
</dependency>
297297
<dependency>
298298
<groupId>org.bytedeco</groupId>

pytorch/README.md

+6-6
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ Introduction
99
------------
1010
This directory contains the JavaCPP Presets module for:
1111

12-
* PyTorch 2.5.1 https://pytorch.org/
12+
* PyTorch 2.6.0 https://pytorch.org/
1313

1414
Please refer to the parent README.md file for more detailed information about the JavaCPP Presets.
1515

@@ -40,36 +40,36 @@ We can use [Maven 3](http://maven.apache.org/) to download and install automatic
4040
<modelVersion>4.0.0</modelVersion>
4141
<groupId>org.bytedeco.pytorch</groupId>
4242
<artifactId>simplemnist</artifactId>
43-
<version>1.5.11</version>
43+
<version>1.5.12-SNAPSHOT</version>
4444
<properties>
4545
<exec.mainClass>SimpleMNIST</exec.mainClass>
4646
</properties>
4747
<dependencies>
4848
<dependency>
4949
<groupId>org.bytedeco</groupId>
5050
<artifactId>pytorch-platform</artifactId>
51-
<version>2.5.1-1.5.11</version>
51+
<version>2.6.0-1.5.12-SNAPSHOT</version>
5252
</dependency>
5353

5454
<!-- Additional dependencies required to use CUDA, cuDNN, and NCCL -->
5555
<dependency>
5656
<groupId>org.bytedeco</groupId>
5757
<artifactId>pytorch-platform-gpu</artifactId>
58-
<version>2.5.1-1.5.11</version>
58+
<version>2.6.0-1.5.12-SNAPSHOT</version>
5959
</dependency>
6060

6161
<!-- Additional dependencies to use bundled CUDA, cuDNN, and NCCL -->
6262
<dependency>
6363
<groupId>org.bytedeco</groupId>
6464
<artifactId>cuda-platform-redist</artifactId>
65-
<version>12.6-9.5-1.5.11</version>
65+
<version>12.6-9.5-1.5.12-SNAPSHOT</version>
6666
</dependency>
6767

6868
<!-- Additional dependencies to use bundled full version of MKL -->
6969
<dependency>
7070
<groupId>org.bytedeco</groupId>
7171
<artifactId>mkl-platform-redist</artifactId>
72-
<version>2025.0-1.5.11</version>
72+
<version>2025.0-1.5.12-SNAPSHOT</version>
7373
</dependency>
7474
</dependencies>
7575
<build>

pytorch/cppbuild.sh

+3-1
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,8 @@ if [[ -z "$PLATFORM" ]]; then
88
fi
99

1010
export BUILD_TEST=0
11+
#export CUDAHOSTCC="clang"
12+
#export CUDAHOSTCXX="clang++"
1113
export CUDACXX="/usr/local/cuda/bin/nvcc"
1214
export CUDA_HOME="/usr/local/cuda"
1315
export CUDNN_HOME="/usr/local/cuda"
@@ -38,7 +40,7 @@ if [[ $PLATFORM == windows* ]]; then
3840
export PYTHON_BIN_PATH=$(which python.exe)
3941
fi
4042

41-
PYTORCH_VERSION=2.5.1
43+
PYTORCH_VERSION=2.6.0
4244

4345
export PYTORCH_BUILD_VERSION="$PYTORCH_VERSION"
4446
export PYTORCH_BUILD_NUMBER=1

pytorch/platform/gpu/pom.xml

+1-1
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212

1313
<groupId>org.bytedeco</groupId>
1414
<artifactId>pytorch-platform-gpu</artifactId>
15-
<version>2.5.1-${project.parent.version}</version>
15+
<version>2.6.0-${project.parent.version}</version>
1616
<name>JavaCPP Presets Platform GPU for PyTorch</name>
1717

1818
<properties>

pytorch/platform/pom.xml

+1-1
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212

1313
<groupId>org.bytedeco</groupId>
1414
<artifactId>pytorch-platform</artifactId>
15-
<version>2.5.1-${project.parent.version}</version>
15+
<version>2.6.0-${project.parent.version}</version>
1616
<name>JavaCPP Presets Platform for PyTorch</name>
1717

1818
<properties>

pytorch/pom.xml

+1-1
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111

1212
<groupId>org.bytedeco</groupId>
1313
<artifactId>pytorch</artifactId>
14-
<version>2.5.1-${project.parent.version}</version>
14+
<version>2.6.0-${project.parent.version}</version>
1515
<name>JavaCPP Presets for PyTorch</name>
1616

1717
<dependencies>

pytorch/samples/pom.xml

+2-2
Original file line numberDiff line numberDiff line change
@@ -12,14 +12,14 @@
1212
<dependency>
1313
<groupId>org.bytedeco</groupId>
1414
<artifactId>pytorch-platform</artifactId>
15-
<version>2.5.1-1.5.12-SNAPSHOT</version>
15+
<version>2.6.0-1.5.12-SNAPSHOT</version>
1616
</dependency>
1717

1818
<!-- Additional dependencies required to use CUDA, cuDNN, and NCCL -->
1919
<dependency>
2020
<groupId>org.bytedeco</groupId>
2121
<artifactId>pytorch-platform-gpu</artifactId>
22-
<version>2.5.1-1.5.12-SNAPSHOT</version>
22+
<version>2.6.0-1.5.12-SNAPSHOT</version>
2323
</dependency>
2424

2525
<!-- Additional dependencies to use bundled CUDA, cuDNN, and NCCL -->

pytorch/src/gen/java/org/bytedeco/pytorch/AOTIModelContainerRunner.java

+9-2
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE
1+
// Targeted by JavaCPP version 1.5.12-SNAPSHOT: DO NOT EDIT THIS FILE
22

33
package org.bytedeco.pytorch;
44

@@ -32,11 +32,18 @@ public class AOTIModelContainerRunner extends Pointer {
3232

3333

3434
public native @ByVal TensorVector run(
35-
@ByRef TensorVector inputs);
35+
@Const @ByRef TensorVector inputs,
36+
Pointer stream_handle/*=nullptr*/);
37+
public native @ByVal TensorVector run(
38+
@Const @ByRef TensorVector inputs);
3639

3740
public native @ByVal ExtraFilesMap getConstantNamesToOriginalFQNs();
3841
public native @ByVal StringIntMap getConstantNamesToDtypes();
3942
public native void update_inactive_constant_buffer(@Cast("const torch::inductor::TensorConstantMap*") @ByRef SizeTStringMap const_map);
43+
public native void update_constant_buffer(
44+
@ByRef StringTensorUMap tensor_map,
45+
@Cast("bool") boolean use_inactive,
46+
@Cast("bool") boolean validate_full_updates);
4047
public native void update_constant_buffer(
4148
@Cast("const torch::inductor::TensorConstantMap*") @ByRef SizeTStringMap const_map,
4249
@Cast("bool") boolean use_inactive,

pytorch/src/gen/java/org/bytedeco/pytorch/AOTIModelContainerRunnerCpu.java

+6-2
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE
1+
// Targeted by JavaCPP version 1.5.12-SNAPSHOT: DO NOT EDIT THIS FILE
22

33
package org.bytedeco.pytorch;
44

@@ -45,5 +45,9 @@ public AOTIModelContainerRunnerCpu(
4545
private native void allocate(
4646
@StdString String model_so_path);
4747

48-
public native @ByVal TensorVector run(@ByRef TensorVector inputs);
48+
public native @ByVal TensorVector run(
49+
@Const @ByRef TensorVector inputs,
50+
Pointer stream_handle/*=nullptr*/);
51+
public native @ByVal TensorVector run(
52+
@Const @ByRef TensorVector inputs);
4953
}

pytorch/src/gen/java/org/bytedeco/pytorch/ASMoutput.java

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE
1+
// Targeted by JavaCPP version 1.5.12-SNAPSHOT: DO NOT EDIT THIS FILE
22

33
package org.bytedeco.pytorch;
44

pytorch/src/gen/java/org/bytedeco/pytorch/AcceleratorHooksInterface.java

+13-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE
1+
// Targeted by JavaCPP version 1.5.12-SNAPSHOT: DO NOT EDIT THIS FILE
22

33
package org.bytedeco.pytorch;
44

@@ -36,6 +36,8 @@ public class AcceleratorHooksInterface extends Pointer {
3636
// Whether the device at device_index is fully initialized or not.
3737
public native @Cast("bool") boolean hasPrimaryContext(@Cast("c10::DeviceIndex") byte device_index);
3838

39+
public native void init();
40+
3941
public native @Cast("c10::DeviceIndex") byte deviceCount();
4042

4143
public native void setCurrentDevice(@Cast("c10::DeviceIndex") byte device);
@@ -49,4 +51,14 @@ public class AcceleratorHooksInterface extends Pointer {
4951
public native @Cast("bool") boolean isPinnedPtr(@Const Pointer data);
5052

5153
public native Allocator getPinnedMemoryAllocator();
54+
55+
public native @ByVal Device getDeviceFromPtr(Pointer data);
56+
57+
public native @Const @ByRef Generator getDefaultGenerator(
58+
@Cast("c10::DeviceIndex") byte device_index/*=-1*/);
59+
public native @Const @ByRef Generator getDefaultGenerator();
60+
61+
public native @ByVal Generator getNewGenerator(
62+
@Cast("c10::DeviceIndex") byte device_index/*=-1*/);
63+
public native @ByVal Generator getNewGenerator();
5264
}

pytorch/src/gen/java/org/bytedeco/pytorch/ActivityTypeSet.java

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE
1+
// Targeted by JavaCPP version 1.5.12-SNAPSHOT: DO NOT EDIT THIS FILE
22

33
package org.bytedeco.pytorch;
44

pytorch/src/gen/java/org/bytedeco/pytorch/Adagrad.java

+5-5
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE
1+
// Targeted by JavaCPP version 1.5.12-SNAPSHOT: DO NOT EDIT THIS FILE
22

33
package org.bytedeco.pytorch;
44

@@ -26,15 +26,15 @@ public class Adagrad extends Optimizer {
2626
public Adagrad(Pointer p) { super(p); }
2727

2828
public Adagrad(
29-
@ByVal OptimizerParamGroupVector param_groups,
29+
@Const @ByRef OptimizerParamGroupVector param_groups,
3030
@ByVal(nullValue = "torch::optim::AdagradOptions{}") AdagradOptions defaults) { super((Pointer)null); allocate(param_groups, defaults); }
3131
private native void allocate(
32-
@ByVal OptimizerParamGroupVector param_groups,
32+
@Const @ByRef OptimizerParamGroupVector param_groups,
3333
@ByVal(nullValue = "torch::optim::AdagradOptions{}") AdagradOptions defaults);
3434
public Adagrad(
35-
@ByVal OptimizerParamGroupVector param_groups) { super((Pointer)null); allocate(param_groups); }
35+
@Const @ByRef OptimizerParamGroupVector param_groups) { super((Pointer)null); allocate(param_groups); }
3636
private native void allocate(
37-
@ByVal OptimizerParamGroupVector param_groups);
37+
@Const @ByRef OptimizerParamGroupVector param_groups);
3838

3939
public Adagrad(@ByVal TensorVector params, @ByVal(nullValue = "torch::optim::AdagradOptions{}") AdagradOptions defaults) { super((Pointer)null); allocate(params, defaults); }
4040
private native void allocate(@ByVal TensorVector params, @ByVal(nullValue = "torch::optim::AdagradOptions{}") AdagradOptions defaults);

pytorch/src/gen/java/org/bytedeco/pytorch/AdagradOptions.java

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE
1+
// Targeted by JavaCPP version 1.5.12-SNAPSHOT: DO NOT EDIT THIS FILE
22

33
package org.bytedeco.pytorch;
44

@@ -17,7 +17,7 @@
1717
import static org.bytedeco.javacpp.global.chrono.*;
1818

1919
import static org.bytedeco.pytorch.global.torch.*;
20-
// namespace torch
20+
// namespace torch::serialize
2121

2222
@Namespace("torch::optim") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
2323
public class AdagradOptions extends OptimizerCloneableAdagradOptions {

pytorch/src/gen/java/org/bytedeco/pytorch/AdagradParamState.java

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE
1+
// Targeted by JavaCPP version 1.5.12-SNAPSHOT: DO NOT EDIT THIS FILE
22

33
package org.bytedeco.pytorch;
44

pytorch/src/gen/java/org/bytedeco/pytorch/Adam.java

+5-5
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE
1+
// Targeted by JavaCPP version 1.5.12-SNAPSHOT: DO NOT EDIT THIS FILE
22

33
package org.bytedeco.pytorch;
44

@@ -26,15 +26,15 @@ public class Adam extends Optimizer {
2626
public Adam(Pointer p) { super(p); }
2727

2828
public Adam(
29-
@ByVal OptimizerParamGroupVector param_groups,
29+
@Const @ByRef OptimizerParamGroupVector param_groups,
3030
@ByVal(nullValue = "torch::optim::AdamOptions{}") AdamOptions defaults) { super((Pointer)null); allocate(param_groups, defaults); }
3131
private native void allocate(
32-
@ByVal OptimizerParamGroupVector param_groups,
32+
@Const @ByRef OptimizerParamGroupVector param_groups,
3333
@ByVal(nullValue = "torch::optim::AdamOptions{}") AdamOptions defaults);
3434
public Adam(
35-
@ByVal OptimizerParamGroupVector param_groups) { super((Pointer)null); allocate(param_groups); }
35+
@Const @ByRef OptimizerParamGroupVector param_groups) { super((Pointer)null); allocate(param_groups); }
3636
private native void allocate(
37-
@ByVal OptimizerParamGroupVector param_groups);
37+
@Const @ByRef OptimizerParamGroupVector param_groups);
3838
public Adam(@ByVal TensorVector params, @ByVal(nullValue = "torch::optim::AdamOptions{}") AdamOptions defaults) { super((Pointer)null); allocate(params, defaults); }
3939
private native void allocate(@ByVal TensorVector params, @ByVal(nullValue = "torch::optim::AdamOptions{}") AdamOptions defaults);
4040
public Adam(@ByVal TensorVector params) { super((Pointer)null); allocate(params); }

pytorch/src/gen/java/org/bytedeco/pytorch/AdamOptions.java

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE
1+
// Targeted by JavaCPP version 1.5.12-SNAPSHOT: DO NOT EDIT THIS FILE
22

33
package org.bytedeco.pytorch;
44

@@ -17,7 +17,7 @@
1717
import static org.bytedeco.javacpp.global.chrono.*;
1818

1919
import static org.bytedeco.pytorch.global.torch.*;
20-
// namespace torch
20+
// namespace torch::serialize
2121

2222
@Namespace("torch::optim") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
2323
public class AdamOptions extends OptimizerCloneableAdamOptions {

pytorch/src/gen/java/org/bytedeco/pytorch/AdamParamState.java

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE
1+
// Targeted by JavaCPP version 1.5.12-SNAPSHOT: DO NOT EDIT THIS FILE
22

33
package org.bytedeco.pytorch;
44

pytorch/src/gen/java/org/bytedeco/pytorch/AdamW.java

+5-5
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE
1+
// Targeted by JavaCPP version 1.5.12-SNAPSHOT: DO NOT EDIT THIS FILE
22

33
package org.bytedeco.pytorch;
44

@@ -26,15 +26,15 @@ public class AdamW extends Optimizer {
2626
public AdamW(Pointer p) { super(p); }
2727

2828
public AdamW(
29-
@ByVal OptimizerParamGroupVector param_groups,
29+
@Const @ByRef OptimizerParamGroupVector param_groups,
3030
@ByVal(nullValue = "torch::optim::AdamWOptions{}") AdamWOptions defaults) { super((Pointer)null); allocate(param_groups, defaults); }
3131
private native void allocate(
32-
@ByVal OptimizerParamGroupVector param_groups,
32+
@Const @ByRef OptimizerParamGroupVector param_groups,
3333
@ByVal(nullValue = "torch::optim::AdamWOptions{}") AdamWOptions defaults);
3434
public AdamW(
35-
@ByVal OptimizerParamGroupVector param_groups) { super((Pointer)null); allocate(param_groups); }
35+
@Const @ByRef OptimizerParamGroupVector param_groups) { super((Pointer)null); allocate(param_groups); }
3636
private native void allocate(
37-
@ByVal OptimizerParamGroupVector param_groups);
37+
@Const @ByRef OptimizerParamGroupVector param_groups);
3838
public AdamW(@ByVal TensorVector params, @ByVal(nullValue = "torch::optim::AdamWOptions{}") AdamWOptions defaults) { super((Pointer)null); allocate(params, defaults); }
3939
private native void allocate(@ByVal TensorVector params, @ByVal(nullValue = "torch::optim::AdamWOptions{}") AdamWOptions defaults);
4040
public AdamW(@ByVal TensorVector params) { super((Pointer)null); allocate(params); }

pytorch/src/gen/java/org/bytedeco/pytorch/AdamWOptions.java

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE
1+
// Targeted by JavaCPP version 1.5.12-SNAPSHOT: DO NOT EDIT THIS FILE
22

33
package org.bytedeco.pytorch;
44

@@ -17,7 +17,7 @@
1717
import static org.bytedeco.javacpp.global.chrono.*;
1818

1919
import static org.bytedeco.pytorch.global.torch.*;
20-
// namespace torch
20+
// namespace torch::serialize
2121

2222
@Namespace("torch::optim") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
2323
public class AdamWOptions extends OptimizerCloneableAdamWOptions {

pytorch/src/gen/java/org/bytedeco/pytorch/AdamWParamState.java

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE
1+
// Targeted by JavaCPP version 1.5.12-SNAPSHOT: DO NOT EDIT THIS FILE
22

33
package org.bytedeco.pytorch;
44

pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImpl.java

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE
1+
// Targeted by JavaCPP version 1.5.12-SNAPSHOT: DO NOT EDIT THIS FILE
22

33
package org.bytedeco.pytorch;
44

pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImplBase.java

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE
1+
// Targeted by JavaCPP version 1.5.12-SNAPSHOT: DO NOT EDIT THIS FILE
22

33
package org.bytedeco.pytorch;
44

pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImplCloneable.java

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE
1+
// Targeted by JavaCPP version 1.5.12-SNAPSHOT: DO NOT EDIT THIS FILE
22

33
package org.bytedeco.pytorch;
44

pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dOptions.java

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE
1+
// Targeted by JavaCPP version 1.5.12-SNAPSHOT: DO NOT EDIT THIS FILE
22

33
package org.bytedeco.pytorch;
44

pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImpl.java

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// Targeted by JavaCPP version 1.5.11: DO NOT EDIT THIS FILE
1+
// Targeted by JavaCPP version 1.5.12-SNAPSHOT: DO NOT EDIT THIS FILE
22

33
package org.bytedeco.pytorch;
44

0 commit comments

Comments
 (0)