@@ -280,7 +280,7 @@ void DmlToCpuMemCpy(void* dst, const void* src, size_t num_bytes) {
280280 uint32_t readback_heap_size = gsl::narrow_cast<uint32_t >(sizeof (readback_heap));
281281 ORT_THROW_IF_FAILED (d3d12_device->GetPrivateData (dml_readback_heap_guid, &readback_heap_size, &readback_heap));
282282
283- // ReadbackFromGpu already syncs with the CPU and waits for the copy to be completed, so we don't need to sync after
283+ // ReadbackFromGpu already syncs with the CPU and waits for the copy to be completed, so we dont need to sync after
284284 // this call
285285 readback_heap->ReadbackFromGpu (
286286 gsl::make_span (static_cast <std::byte*>(dst), num_bytes),
@@ -428,7 +428,7 @@ MLDataType NumpyTypeToOnnxRuntimeTensorType(int numpy_type) {
428428 // Special, not a C type expands to enum value of 16
429429 {NPY_FLOAT16, DataTypeImpl::GetType<MLFloat16>()},
430430 {NPY_DOUBLE, DataTypeImpl::GetType<double >()},
431- // We don't want to use size specific types such
431+ // We dont want to use size specific types such
432432 // as NPY_INT32 bc they are not enums but hash defines
433433 // which may map into other enums and may conflict with other entries here
434434 // also NPY docs define these sizes as platform specific, thus we
@@ -581,6 +581,7 @@ static void CopyDataToTensor(PyArrayObject* darray, int npy_type, Tensor& tensor
581581 for (int i = 0 ; i < total_items; ++i, src += item_size) {
582582 // Python unicode strings are assumed to be USC-4. Strings are stored as UTF-8.
583583 PyObject* item = PyArray_GETITEM (darray, src);
584+ UniqueDecRefPtr<PyObject> itemGuard (item, DecRefFn<PyObject>());
584585 PyObject* pStr = PyObject_Str (item);
585586 UniqueDecRefPtr<PyObject> strGuard (pStr, DecRefFn<PyObject>());
586587 dst[i] = py::reinterpret_borrow<py::str>(pStr);
0 commit comments