@@ -430,14 +430,18 @@ private void updateHighWatermarkMetadata(LogOffsetMetadata newHighWatermark) {
430430 synchronized (lock ) {
431431 if (newHighWatermark .getMessageOffset () < highWatermarkMetadata .getMessageOffset ()) {
432432 LOG .warn (
433- "Non-monotonic update of high watermark from {} to {}" ,
433+ "Non-monotonic update of high watermark from {} to {} for bucket {} " ,
434434 highWatermarkMetadata ,
435- newHighWatermark );
435+ newHighWatermark ,
436+ localLog .getTableBucket ());
436437 }
437438 highWatermarkMetadata = newHighWatermark ;
438439 // TODO log offset listener to update log offset.
439440 }
440- LOG .trace ("Setting high watermark {}" , newHighWatermark );
441+ LOG .trace (
442+ "Setting high watermark {} for bucket {}" ,
443+ newHighWatermark ,
444+ localLog .getTableBucket ());
441445 }
442446
443447 /**
@@ -567,17 +571,19 @@ private void deleteSegments(long cleanUpToOffset) {
567571 long localLogStartOffset = localLog .getLocalLogStartOffset ();
568572 if (cleanUpToOffset < localLogStartOffset ) {
569573 LOG .debug (
570- "Ignore the delete segments action while the input cleanUpToOffset {} "
574+ "Ignore the delete segments action for bucket {} while the input cleanUpToOffset {} "
571575 + "is smaller than the current localLogStartOffset {}" ,
576+ getTableBucket (),
572577 cleanUpToOffset ,
573578 localLogStartOffset );
574579 return ;
575580 }
576581
577582 if (cleanUpToOffset > getHighWatermark ()) {
578583 LOG .warn (
579- "Ignore the delete segments action while the input cleanUpToOffset {} "
584+ "Ignore the delete segments action for bucket {} while the input cleanUpToOffset {} "
580585 + "is larger than the current highWatermark {}" ,
586+ getTableBucket (),
581587 cleanUpToOffset ,
582588 getHighWatermark ());
583589 return ;
@@ -716,11 +722,13 @@ private LogAppendInfo append(MemoryLogRecords records, boolean appendAsLeader)
716722 // todo update the first unstable offset (which is used to compute lso)
717723
718724 LOG .trace (
719- "Appended message set with last offset: {}, first offset {}, next offset: {} and messages {}" ,
725+ "Appended message set with last offset: {}, first offset {}, next offset: {} "
726+ + "and messages {} for bucket {}" ,
720727 appendInfo .lastOffset (),
721728 appendInfo .firstOffset (),
722729 localLog .getLocalLogEndOffset (),
723- validRecords );
730+ validRecords ,
731+ getTableBucket ());
724732
725733 if (localLog .unflushedMessages () >= logFlushIntervalMessages ) {
726734 flush (false );
@@ -787,11 +795,12 @@ private void flush(long offset, boolean includingOffset) throws IOException {
787795 if (flushOffset > localLog .getRecoveryPoint ()) {
788796 if (LOG .isDebugEnabled ()) {
789797 LOG .debug (
790- "Flushing log up to offset {} ({}) with recovery point {}, unflushed: {}" ,
798+ "Flushing log up to offset {} ({}) with recovery point {}, unflushed: {}, for bucket {} " ,
791799 offset ,
792800 includingOffsetStr ,
793801 flushOffset ,
794- localLog .unflushedMessages ());
802+ localLog .unflushedMessages (),
803+ getTableBucket ());
795804 }
796805
797806 localLog .flush (flushOffset );
@@ -810,7 +819,9 @@ private void maybeRoll(int messageSize, LogAppendInfo appendInfo) throws Excepti
810819 new RollParams (maxSegmentFileSize , appendInfo .lastOffset (), messageSize ))) {
811820 if (LOG .isDebugEnabled ()) {
812821 LOG .debug (
813- "Rolling new log segment (log_size = {}/{}), offset_index_size = {}/{}, time_index_size = {}/{}" ,
822+ "Rolling new log segment for bucket {} (log_size = {}/{}), offset_index_size = {}/{}, "
823+ + "time_index_size = {}/{}" ,
824+ getTableBucket (),
814825 segment .getSizeInBytes (),
815826 maxSegmentFileSize ,
816827 segment .offsetIndex ().entries (),
@@ -863,12 +874,13 @@ boolean truncateTo(long targetOffset) throws LogStorageException {
863874
864875 if (targetOffset >= localLog .getLocalLogEndOffset ()) {
865876 LOG .info (
866- "Truncate to {} has no effect as the largest offset in the log is {}." ,
877+ "Truncate to {} for bucket {} has no effect as the largest offset in the log is {}." ,
867878 targetOffset ,
879+ getTableBucket (),
868880 localLog .getLocalLogEndOffset () - 1 );
869881 return false ;
870882 } else {
871- LOG .info ("Truncating to offset {}" , targetOffset );
883+ LOG .info ("Truncating to offset {} for bucket {} " , targetOffset , getTableBucket () );
872884 synchronized (lock ) {
873885 try {
874886 localLog .checkIfMemoryMappedBufferClosed ();
@@ -902,7 +914,7 @@ boolean truncateTo(long targetOffset) throws LogStorageException {
902914
903915 /** Delete all data in the log and start at the new offset. */
904916 void truncateFullyAndStartAt (long newOffset ) throws LogStorageException {
905- LOG .debug ("Truncate and start at offset {}" , newOffset );
917+ LOG .debug ("Truncate and start at offset {} for bucket {} " , newOffset , getTableBucket () );
906918 synchronized (lock ) {
907919 try {
908920 localLog .truncateFullyAndStartAt (newOffset );
@@ -950,14 +962,14 @@ public List<LogSegment> logSegments() {
950962 }
951963
952964 public void close () {
953- LOG .debug ("close log tablet" );
965+ LOG .debug ("close log tablet for bucket {}" , getTableBucket () );
954966 synchronized (lock ) {
955967 localLog .checkIfMemoryMappedBufferClosed ();
956968 writerExpireCheck .cancel (true );
957969 try {
958970 writerStateManager .takeSnapshot ();
959971 } catch (IOException e ) {
960- LOG .error ("Error while taking writer snapshot." , e );
972+ LOG .error ("Error while taking writer snapshot for bucket {}." , getTableBucket () , e );
961973 }
962974 localLog .close ();
963975 }
0 commit comments