|
| 1 | +Description: normalize out unicode ligatures |
| 2 | +Author: Yadd <yadd@debian.org> |
| 3 | +Origin: upstream, https://github.com/isaacs/node-tar/commit/3b1abfae |
| 4 | +Bug: https://github.com/isaacs/node-tar/security/advisories/GHSA-r6q2-hw4h-h46w |
| 5 | +Forwarded: not-needed |
| 6 | +Applied-Upstream: 7.5.4, commit:3b1abfae |
| 7 | +Reviewed-By: Xavier Guimard <yadd@debian.org> |
| 8 | +Last-Update: 2026-01-22 |
| 9 | + |
| 10 | +--- a/lib/normalize-unicode.js |
| 11 | ++++ b/lib/normalize-unicode.js |
| 12 | +@@ -6,7 +6,11 @@ |
| 13 | + const { hasOwnProperty } = Object.prototype |
| 14 | + module.exports = s => { |
| 15 | + if (!hasOwnProperty.call(normalizeCache, s)) { |
| 16 | +- normalizeCache[s] = s.normalize('NFD') |
| 17 | ++ // shake out identical accents and ligatures |
| 18 | ++ normalizeCache[s] = s |
| 19 | ++ .normalize('NFD') |
| 20 | ++ .toLocaleLowerCase('en') |
| 21 | ++ .toLocaleUpperCase('en') |
| 22 | + } |
| 23 | + return normalizeCache[s] |
| 24 | + } |
| 25 | +--- a/lib/path-reservations.js |
| 26 | ++++ b/lib/path-reservations.js |
| 27 | +@@ -123,7 +123,7 @@ |
| 28 | + // effectively removing all parallelization on windows. |
| 29 | + paths = isWindows ? ['win32 parallelization disabled'] : paths.map(p => { |
| 30 | + // don't need normPath, because we skip this entirely for windows |
| 31 | +- return stripSlashes(join(normalize(p))).toLowerCase() |
| 32 | ++ return stripSlashes(join(normalize(p))) |
| 33 | + }) |
| 34 | + |
| 35 | + const dirs = new Set( |
| 36 | +--- a/tap-snapshots/test/normalize-unicode.js.test.cjs |
| 37 | ++++ b/tap-snapshots/test/normalize-unicode.js.test.cjs |
| 38 | +@@ -6,25 +6,25 @@ |
| 39 | + */ |
| 40 | + 'use strict' |
| 41 | + exports[`test/normalize-unicode.js TAP normalize with strip slashes "1/4foo.txt" > normalized 1`] = ` |
| 42 | +-1/4foo.txt |
| 43 | ++1/4FOO.TXT |
| 44 | + ` |
| 45 | + |
| 46 | + exports[`test/normalize-unicode.js TAP normalize with strip slashes "\\\\a\\\\b\\\\c\\\\d\\\\" > normalized 1`] = ` |
| 47 | +-/a/b/c/d |
| 48 | ++/A/B/C/D |
| 49 | + ` |
| 50 | + |
| 51 | + exports[`test/normalize-unicode.js TAP normalize with strip slashes "¼foo.txt" > normalized 1`] = ` |
| 52 | +-¼foo.txt |
| 53 | ++¼FOO.TXT |
| 54 | + ` |
| 55 | + |
| 56 | + exports[`test/normalize-unicode.js TAP normalize with strip slashes "﹨aaaa﹨dddd﹨" > normalized 1`] = ` |
| 57 | +-﹨aaaa﹨dddd﹨ |
| 58 | ++﹨AAAA﹨DDDD﹨ |
| 59 | + ` |
| 60 | + |
| 61 | + exports[`test/normalize-unicode.js TAP normalize with strip slashes "\bbb\eee\" > normalized 1`] = ` |
| 62 | +-\bbb\eee\ |
| 63 | ++\BBB\EEE\ |
| 64 | + ` |
| 65 | + |
| 66 | + exports[`test/normalize-unicode.js TAP normalize with strip slashes "\\\\\eee\\\\\\" > normalized 1`] = ` |
| 67 | +-\\\\\eee\\\\\\ |
| 68 | ++\\\\\EEE\\\\\\ |
| 69 | + ` |
| 70 | +--- /dev/null |
| 71 | ++++ b/test/ghsa-r6q2-hw4h-h46w.js |
| 72 | +@@ -0,0 +1,49 @@ |
| 73 | ++const t = require('tap') |
| 74 | ++const normalizeUnicode = require('../lib/normalize-unicode.js') |
| 75 | ++const Header = require('../lib/header.js') |
| 76 | ++const { resolve } = require('path') |
| 77 | ++const { lstatSync, readFileSync, statSync } = require('fs') |
| 78 | ++const extract = require('../lib/extract.js') |
| 79 | ++ |
| 80 | ++// these characters are problems on macOS's APFS |
| 81 | ++const chars = { |
| 82 | ++ ['ff'.normalize('NFC')]: 'FF', |
| 83 | ++ ['fi'.normalize('NFC')]: 'FI', |
| 84 | ++ ['fl'.normalize('NFC')]: 'FL', |
| 85 | ++ ['ffi'.normalize('NFC')]: 'FFI', |
| 86 | ++ ['ffl'.normalize('NFC')]: 'FFL', |
| 87 | ++ ['ſt'.normalize('NFC')]: 'ST', |
| 88 | ++ ['st'.normalize('NFC')]: 'ST', |
| 89 | ++ ['ẛ'.normalize('NFC')]: 'Ṡ', |
| 90 | ++ ['ß'.normalize('NFC')]: 'SS', |
| 91 | ++ ['ẞ'.normalize('NFC')]: 'SS', |
| 92 | ++ ['ſ'.normalize('NFC')]: 'S', |
| 93 | ++} |
| 94 | ++ |
| 95 | ++for (const [c, n] of Object.entries(chars)) { |
| 96 | ++ t.test(`${c} => ${n}`, async t => { |
| 97 | ++ t.equal(normalizeUnicode(c), n) |
| 98 | ++ |
| 99 | ++ t.test('link then file', async t => { |
| 100 | ++ const tarball = Buffer.alloc(2048) |
| 101 | ++ new Header({ |
| 102 | ++ path: c, |
| 103 | ++ type: 'SymbolicLink', |
| 104 | ++ linkpath: './target', |
| 105 | ++ }).encode(tarball, 0) |
| 106 | ++ new Header({ |
| 107 | ++ path: n, |
| 108 | ++ type: 'File', |
| 109 | ++ size: 1, |
| 110 | ++ }).encode(tarball, 512) |
| 111 | ++ tarball[1024] = 'x'.charCodeAt(0) |
| 112 | ++ |
| 113 | ++ const cwd = t.testdir({ tarball }) |
| 114 | ++ |
| 115 | ++ await extract({ cwd, file: resolve(cwd, 'tarball') }) |
| 116 | ++ |
| 117 | ++ t.throws(() => statSync(resolve(cwd, 'target'))) |
| 118 | ++ t.equal(readFileSync(resolve(cwd, n), 'utf8'), 'x') |
| 119 | ++ }) |
| 120 | ++ }) |
| 121 | ++} |
| 122 | +--- a/test/normalize-unicode.js |
| 123 | ++++ b/test/normalize-unicode.js |
| 124 | +@@ -12,7 +12,7 @@ |
| 125 | + |
| 126 | + t.equal(normalize(cafe1), normalize(cafe2), 'matching unicodes') |
| 127 | + t.equal(normalize(cafe1), normalize(cafe2), 'cached') |
| 128 | +-t.equal(normalize('foo'), 'foo', 'non-unicode string') |
| 129 | ++t.equal(normalize('foo'), 'FOO', 'non-unicode string') |
| 130 | + |
| 131 | + t.test('normalize with strip slashes', t => { |
| 132 | + const paths = [ |
0 commit comments