diff --git a/processing/src/main/java/org/apache/druid/java/util/common/io/smoosh/FileSmoosher.java b/processing/src/main/java/org/apache/druid/java/util/common/io/smoosh/FileSmoosher.java index 1d1bed181dff..2c8d8bec13d4 100644 --- a/processing/src/main/java/org/apache/druid/java/util/common/io/smoosh/FileSmoosher.java +++ b/processing/src/main/java/org/apache/druid/java/util/common/io/smoosh/FileSmoosher.java @@ -23,6 +23,7 @@ import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableMap; import com.google.common.primitives.Ints; +import org.apache.druid.error.DruidException; import org.apache.druid.java.util.common.FileUtils; import org.apache.druid.java.util.common.IAE; import org.apache.druid.java.util.common.IOE; @@ -158,7 +159,13 @@ public SegmentFileChannel addWithChannel(final String name, final long size) thr public SmooshedWriter addWithSmooshedWriter(final String name, final long size) throws IOException { if (size > maxChunkSize) { - throw new IAE("Asked to add buffers[%,d] larger than configured max[%,d]", size, maxChunkSize); + throw DruidException.forPersona(DruidException.Persona.ADMIN) + .ofCategory(DruidException.Category.RUNTIME_FAILURE) + .build("Serialized buffer size[%,d] for column[%s] exceeds the maximum[%,d]. " + + "Consider adjusting the tuningConfig - for example, reduce maxRowsPerSegment, " + + "or partition your data further.", + size, name, maxChunkSize + ); } // If current writer is in use then create a new SmooshedWriter which diff --git a/processing/src/test/java/org/apache/druid/java/util/common/io/smoosh/SmooshedFileMapperTest.java b/processing/src/test/java/org/apache/druid/java/util/common/io/smoosh/SmooshedFileMapperTest.java index 11660f6d3b54..3d8c01d1af92 100644 --- a/processing/src/test/java/org/apache/druid/java/util/common/io/smoosh/SmooshedFileMapperTest.java +++ b/processing/src/test/java/org/apache/druid/java/util/common/io/smoosh/SmooshedFileMapperTest.java @@ -22,10 +22,13 @@ import com.google.common.io.Files; import com.google.common.primitives.Ints; import junit.framework.Assert; +import org.apache.druid.error.DruidException; +import org.apache.druid.error.DruidExceptionMatcher; import org.apache.druid.java.util.common.BufferUtils; import org.apache.druid.java.util.common.ISE; import org.apache.druid.java.util.common.StringUtils; import org.apache.druid.segment.file.SegmentFileChannel; +import org.hamcrest.MatcherAssert; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; @@ -80,6 +83,22 @@ public void testWhenFirstWriterClosedInTheMiddle() throws Exception validateOutput(baseDir); } + @Test + public void testColumnSerializedSizeExceedsMaximum() throws Exception + { + File baseDir = folder.newFolder("base"); + try (FileSmoosher smoosher = new FileSmoosher(baseDir, 5)) { + MatcherAssert.assertThat( + org.junit.Assert.assertThrows( + DruidException.class, + () -> smoosher.addWithChannel("foo", 10) + ), + new DruidExceptionMatcher(DruidException.Persona.ADMIN, DruidException.Category.RUNTIME_FAILURE, "general") + .expectMessageContains("Serialized buffer size[10] for column[foo] exceeds the maximum[5].") + ); + } + } + @Test(expected = ISE.class) public void testExceptionForUnClosedFiles() throws Exception {