Skip to content

Commit ece225c

Browse files
authored
Fix first line indent between pages (#1971)
2 parents 1c261ef + 69d97a3 commit ece225c

File tree

2 files changed

+88
-5
lines changed

2 files changed

+88
-5
lines changed

novelwriter/core/tokenizer.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -198,6 +198,7 @@ def __init__(self, project: NWProject) -> None:
198198
# Instance Variables
199199
self._hFormatter = HeadingFormatter(self._project)
200200
self._noSep = True # Flag to indicate that we don't want a scene separator
201+
self._noIndent = False # Flag to disable text indent on next paragraph
201202
self._showDialog = False # Flag for dialogue highlighting
202203

203204
# This File
@@ -873,19 +874,18 @@ def tokenizeText(self) -> None:
873874
pLines: list[T_Token] = []
874875

875876
tCount = len(tokens)
876-
pIndent = True
877877
for n, cToken in enumerate(tokens):
878878

879879
if n > 0:
880880
pToken = tokens[n-1] # Look behind
881881
if n < tCount - 1:
882882
nToken = tokens[n+1] # Look ahead
883883

884-
if not self._indentFirst and cToken[0] in self.L_SKIP_INDENT:
884+
if cToken[0] in self.L_SKIP_INDENT and not self._indentFirst:
885885
# Unless the indentFirst flag is set, we set up the next
886886
# paragraph to not be indented if we see a block of a
887887
# specific type
888-
pIndent = False
888+
self._noIndent = True
889889

890890
if cToken[0] == self.T_EMPTY:
891891
# We don't need to keep the empty lines after this pass
@@ -910,7 +910,7 @@ def tokenizeText(self) -> None:
910910
# Next token is not text, so we add the buffer to tokens
911911
nLines = len(pLines)
912912
cStyle = pLines[0][4]
913-
if self._firstIndent and pIndent and not cStyle & self.M_ALIGNED:
913+
if self._firstIndent and not (self._noIndent or cStyle & self.M_ALIGNED):
914914
# If paragraph indentation is enabled, not temporarily
915915
# turned off, and the block is not aligned, we add the
916916
# text indentation flag
@@ -938,7 +938,7 @@ def tokenizeText(self) -> None:
938938

939939
# Reset buffer and make sure text indent is on for next pass
940940
pLines = []
941-
pIndent = True
941+
self._noIndent = False
942942

943943
else:
944944
self._tokens.append(cToken)

tests/test_core/test_core_tokenizer.py

Lines changed: 83 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1324,6 +1324,89 @@ def testCoreToken_SpecialFormat(mockGUI):
13241324
]
13251325

13261326

1327+
@pytest.mark.core
1328+
def testCoreToken_TextIndent(mockGUI):
1329+
"""Test the handling of text indent in the Tokenizer class."""
1330+
project = NWProject()
1331+
tokens = BareTokenizer(project)
1332+
1333+
# No First Indent
1334+
tokens.setFirstLineIndent(True, 1.0, False)
1335+
1336+
assert tokens._noIndent is False
1337+
assert tokens._firstIndent is True
1338+
assert tokens._firstWidth == 1.0
1339+
assert tokens._indentFirst is False
1340+
1341+
# Page One
1342+
# Two paragraphs in the same scene
1343+
tokens._text = (
1344+
"# Title One\n\n"
1345+
"### Scene One\n\n"
1346+
"First paragraph.\n\n"
1347+
"Second paragraph.\n\n"
1348+
)
1349+
tokens.tokenizeText()
1350+
assert tokens._tokens == [
1351+
(Tokenizer.T_HEAD1, 1, "Title One", [], Tokenizer.A_NONE),
1352+
(Tokenizer.T_HEAD3, 2, "Scene One", [], Tokenizer.A_NONE),
1353+
(Tokenizer.T_TEXT, 2, "First paragraph.", [], Tokenizer.A_NONE),
1354+
(Tokenizer.T_TEXT, 2, "Second paragraph.", [], Tokenizer.A_IND_T),
1355+
]
1356+
assert tokens._noIndent is False
1357+
1358+
# Page Two
1359+
# New scene with only a synopsis
1360+
tokens._text = (
1361+
"### Scene Two\n\n"
1362+
"%Synopsis: Stuff happens.\n\n"
1363+
)
1364+
tokens.tokenizeText()
1365+
assert tokens._tokens == [
1366+
(Tokenizer.T_HEAD3, 1, "Scene Two", [], Tokenizer.A_NONE),
1367+
(Tokenizer.T_SYNOPSIS, 1, "Stuff happens.", [], Tokenizer.A_NONE),
1368+
]
1369+
assert tokens._noIndent is True
1370+
1371+
# Page Three
1372+
# Two paragraphs for the scene on the previous page
1373+
tokens._text = (
1374+
"First paragraph.\n\n"
1375+
"Second paragraph.\n\n"
1376+
)
1377+
tokens.tokenizeText()
1378+
assert tokens._tokens == [
1379+
(Tokenizer.T_TEXT, 0, "First paragraph.", [], Tokenizer.A_NONE),
1380+
(Tokenizer.T_TEXT, 0, "Second paragraph.", [], Tokenizer.A_IND_T),
1381+
]
1382+
assert tokens._noIndent is False
1383+
1384+
# First Indent
1385+
tokens.setFirstLineIndent(True, 1.0, True)
1386+
1387+
assert tokens._noIndent is False
1388+
assert tokens._firstIndent is True
1389+
assert tokens._firstWidth == 1.0
1390+
assert tokens._indentFirst is True
1391+
1392+
# Page Four
1393+
# Two paragraphs in the same scene
1394+
tokens._text = (
1395+
"# Title One\n\n"
1396+
"### Scene One\n\n"
1397+
"First paragraph.\n\n"
1398+
"Second paragraph.\n\n"
1399+
)
1400+
tokens.tokenizeText()
1401+
assert tokens._tokens == [
1402+
(Tokenizer.T_HEAD1, 1, "Title One", [], Tokenizer.A_NONE),
1403+
(Tokenizer.T_HEAD3, 2, "Scene One", [], Tokenizer.A_NONE),
1404+
(Tokenizer.T_TEXT, 2, "First paragraph.", [], Tokenizer.A_IND_T),
1405+
(Tokenizer.T_TEXT, 2, "Second paragraph.", [], Tokenizer.A_IND_T),
1406+
]
1407+
assert tokens._noIndent is False
1408+
1409+
13271410
@pytest.mark.core
13281411
def testCoreToken_ProcessHeaders(mockGUI):
13291412
"""Test the header and page parser of the Tokenizer class."""

0 commit comments

Comments
 (0)