Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
77 changes: 77 additions & 0 deletions .github/workflows/go-test.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
name: Go Tests

on:
push:
branches: [ main, develop ]
pull_request:
branches: [ main, develop ]

jobs:
test:
runs-on: ubuntu-latest

strategy:
matrix:
go: ['1.21', '1.22', '1.23']

steps:
- uses: actions/checkout@v4

- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go }}
cache: true

- name: Download dependencies
run: go mod download

- name: Run tests
run: go test -v -race -coverprofile=coverage.out ./...

- name: Upload coverage to Codecov
uses: codecov/codecov-action@v4
with:
file: ./coverage.out
flags: unittests

build:
runs-on: ubuntu-latest
needs: test

steps:
- uses: actions/checkout@v4

- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: '1.23'

- name: Build
run: go build -v ./...

- name: Build binary
run: go build -o tlsx ./cmd/tlsx/

- name: Upload artifact
uses: actions/upload-artifact@v4
with:
name: tlsx-binary
path: tlsx

lint:
runs-on: ubuntu-latest

steps:
- uses: actions/checkout@v4

- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: '1.23'

- name: Run golangci-lint
uses: golangci/golangci-lint-action@v4
with:
version: latest
args: --timeout=5m
918 changes: 533 additions & 385 deletions assets/root-certs.pem

Large diffs are not rendered by default.

67 changes: 67 additions & 0 deletions internal/pdcp/race_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
package pdcp_test

import (
"context"
"fmt"
"sync"
"testing"
"time"

"github.com/projectdiscovery/tlsx/pkg/tlsx/clients"
"github.com/projectdiscovery/tlsx/internal/pdcp"
pdcpauth "github.com/projectdiscovery/utils/auth/pdcp"
)

func TestUploadWriterExploit(t *testing.T) {
creds := &pdcpauth.PDCPCredentials{
Server: "http://localhost:8080",
APIKey: "test-key",
}

// Use a longer timeout to allow the race to manifest
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()
Comment on lines +16 to +23
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

The deadline can deadlock this test under backpressure.

In internal/pdcp/writer.go, autoCommit() stops consuming u.data when ctx.Done() fires, but this test waits for all sender goroutines before calling writer.Close(). If the 20s deadline expires first—or something on localhost:8080 is slow instead of refusing fast—the callback goroutines can block forever on channel sends and wg.Wait() never returns.

🧪 Suggested follow-up
 import (
 	"context"
 	"fmt"
+	"io"
+	"net/http"
+	"net/http/httptest"
 	"sync"
 	"testing"
-	"time"
 
 	"github.com/projectdiscovery/tlsx/pkg/tlsx/clients"
 	"github.com/projectdiscovery/tlsx/internal/pdcp"
 	pdcpauth "github.com/projectdiscovery/utils/auth/pdcp"
 )
 
 func TestUploadWriterExploit(t *testing.T) {
+	server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		w.Header().Set("Content-Type", "application/json")
+		_, _ = io.WriteString(w, `{"id":"0123456789abcdefghij"}`)
+	}))
+	defer server.Close()
+
 	creds := &pdcpauth.PDCPCredentials{
-		Server: "http://localhost:8080",
+		Server: server.URL,
 		APIKey: "test-key",
 	}
 
-	ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
+	ctx, cancel := context.WithCancel(context.Background())
 	defer cancel()

Also applies to: 47-65

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@internal/pdcp/race_test.go` around lines 16 - 23, The test's fixed 20s
context deadline can deadlock because autoCommit() stops reading u.data on
ctx.Done(), while the test waits for sender goroutines to finish before calling
writer.Close(), allowing senders to block forever on channel sends; fix by
avoiding a deadline that races with sender shutdown—either remove the context
timeout and use cancel/close-based shutdown, or ensure the test calls
writer.Close() (or cancel the context) before wg.Wait() so autoCommit() unblocks
u.data sends; locate and update the test setup where ctx is created and where
wg.Wait()/writer.Close() are used, and adjust to cancel/close first (references:
autoCommit, u.data, writer.Close, ctx, wg.Wait in the failing test).


writer, err := pdcp.NewUploadWriterCallback(ctx, creds)
if err != nil {
t.Fatalf("failed to create writer: %v", err)
}

var wg sync.WaitGroup
callback := writer.GetWriterCallback()

// Exploit Layer 1: Hammer the string headers with different lengths to trigger tearing
wg.Add(1)
go func() {
defer wg.Done()
for i := 0; i < 100000; i++ {
// Alternating lengths is key to triggering pointer/length tearing
if i % 2 == 0 {
writer.SetAssetID("short-id")
} else {
writer.SetAssetID("very-long-asset-group-identifier-that-exceeds-small-string-optimization")
}
}
}()

// Exploit Layer 2: High-pressure concurrent writes
for g := 0; g < 20; g++ {
wg.Add(1)
go func(id int) {
defer wg.Done()
for i := 0; i < 5000; i++ {
callback(&clients.Response{
Host: fmt.Sprintf("target-%d-%d.com", id, i),
Port: "443",
})
}
}(g)
}

fmt.Println("Exploit running: Hammering ARM64 memory model...")
wg.Wait()

fmt.Println("Attempting final close (Expect hang or crash here)...")
writer.Close()
fmt.Println("SUCCESS: Writer closed (Vulnerability NOT triggered).")
}
48 changes: 37 additions & 11 deletions internal/pdcp/writer.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ import (
"net/http"
"net/url"
"regexp"
"sync"
"sync/atomic"
"time"

Expand Down Expand Up @@ -49,6 +50,7 @@ type UploadWriter struct {
client *retryablehttp.Client
done chan struct{}
data chan *clients.Response
mu sync.RWMutex
assetGroupID string
assetGroupName string
counter atomic.Int32
Expand Down Expand Up @@ -100,17 +102,23 @@ func (u *UploadWriter) SetAssetID(id string) error {
if !xidRegex.MatchString(id) {
gologger.Warning().Msgf("invalid asset id provided (unknown xid format): %s", id)
}
u.mu.Lock()
defer u.mu.Unlock()
u.assetGroupID = id
return nil
}

// SetAssetGroupName sets the scan name for the upload writer
func (u *UploadWriter) SetAssetGroupName(name string) {
u.mu.Lock()
defer u.mu.Unlock()
u.assetGroupName = name
}

// SetTeamID sets the team id for the upload writer
func (u *UploadWriter) SetTeamID(id string) {
u.mu.Lock()
defer u.mu.Unlock()
u.TeamID = id
}

Expand All @@ -120,10 +128,14 @@ func (u *UploadWriter) autoCommit(ctx context.Context) {
u.done <- struct{}{}
close(u.done)
// if no scanid is generated no results were uploaded
if u.assetGroupID == "" {
u.mu.RLock()
assetGroupID := u.assetGroupID
teamID := u.TeamID
u.mu.RUnlock()
if assetGroupID == "" {
gologger.Verbose().Msgf("UI dashboard setup skipped, no results found to upload")
} else {
gologger.Info().Msgf("Found %v results, View found results in dashboard : %v", u.counter.Load(), getAssetsDashBoardURL(u.assetGroupID, u.TeamID))
gologger.Info().Msgf("Found %v results, View found results in dashboard : %v", u.counter.Load(), getAssetsDashBoardURL(assetGroupID, teamID))
}
}()
// temporary buffer to store the results
Expand Down Expand Up @@ -185,7 +197,11 @@ func (u *UploadWriter) uploadChunk(buff *bytes.Buffer) error {
// if successful, reset the buffer
buff.Reset()
// log in verbose mode
gologger.Warning().Msgf("Uploaded results chunk, you can view assets at %v", getAssetsDashBoardURL(u.assetGroupID, u.TeamID))
u.mu.RLock()
assetGroupID := u.assetGroupID
teamID := u.TeamID
u.mu.RUnlock()
gologger.Warning().Msgf("Uploaded results chunk, you can view assets at %v", getAssetsDashBoardURL(assetGroupID, teamID))
return nil
}

Expand All @@ -212,8 +228,12 @@ func (u *UploadWriter) upload(data []byte) error {
if err := json.Unmarshal(bin, &uploadResp); err != nil {
return errkit.Wrapf(err, "could not unmarshal response got %v", string(bin))
}
if uploadResp.ID != "" && u.assetGroupID == "" {
u.assetGroupID = uploadResp.ID
if uploadResp.ID != "" {
u.mu.Lock()
if u.assetGroupID == "" {
u.assetGroupID = uploadResp.ID
}
u.mu.Unlock()
}
return nil
}
Expand All @@ -224,12 +244,18 @@ func (u *UploadWriter) upload(data []byte) error {
func (u *UploadWriter) getRequest(bin []byte) (*retryablehttp.Request, error) {
var method, url string

if u.assetGroupID == "" {
u.mu.RLock()
assetID := u.assetGroupID
assetName := u.assetGroupName
teamID := u.TeamID
u.mu.RUnlock()

if assetID == "" {
u.uploadURL.Path = uploadEndpoint
method = http.MethodPost
url = u.uploadURL.String()
} else {
u.uploadURL.Path = fmt.Sprintf(appendEndpoint, u.assetGroupID)
u.uploadURL.Path = fmt.Sprintf(appendEndpoint, assetID)
method = http.MethodPatch
url = u.uploadURL.String()
}
Expand All @@ -240,14 +266,14 @@ func (u *UploadWriter) getRequest(bin []byte) (*retryablehttp.Request, error) {
// add pdtm meta params - version will be set by updateutils
req.Params.Merge(updateutils.GetpdtmParams("tlsx"))
// if it is upload endpoint also include name if it exists
if u.assetGroupName != "" && req.Path == uploadEndpoint {
req.Params.Add("name", u.assetGroupName)
if assetName != "" && req.Path == uploadEndpoint {
req.Params.Add("name", assetName)
}
req.Update()

req.Header.Set(pdcpauth.ApiKeyHeaderName, u.creds.APIKey)
if u.TeamID != "" {
req.Header.Set(teamIDHeader, u.TeamID)
if teamID != "" {
req.Header.Set(teamIDHeader, teamID)
}
req.Header.Set("Content-Type", "application/octet-stream")
req.Header.Set("Accept", "application/json")
Expand Down
16 changes: 14 additions & 2 deletions internal/runner/runner.go
Original file line number Diff line number Diff line change
Expand Up @@ -440,7 +440,13 @@ func (r *Runner) normalizeAndQueueInputs(inputs chan taskInput) error {
for scanner.Scan() {
text := scanner.Text()
if text != "" {
r.processInputItem(text, inputs)
// Split by comma like -u flag does
for _, item := range strings.Split(text, ",") {
item = strings.TrimSpace(item)
if item != "" {
r.processInputItem(item, inputs)
}
}
}
}
}
Expand All @@ -449,7 +455,13 @@ func (r *Runner) normalizeAndQueueInputs(inputs chan taskInput) error {
for scanner.Scan() {
text := scanner.Text()
if text != "" {
r.processInputItem(text, inputs)
// Split by comma like -u flag does
for _, item := range strings.Split(text, ",") {
item = strings.TrimSpace(item)
if item != "" {
r.processInputItem(item, inputs)
}
}
}
}
}
Expand Down
30 changes: 28 additions & 2 deletions pkg/output/file_writer.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,15 @@ package output
import (
"bufio"
"os"
"sync"
)

// fileWriter is a concurrent file based output writer.
type fileWriter struct {
file *os.File
writer *bufio.Writer
file *os.File
writer *bufio.Writer
mu sync.Mutex // Mutex for thread-safe flushing
flushCnt int // Counter for periodic flushing
}

// NewFileOutputWriter creates a new buffered writer for a file
Expand All @@ -22,16 +25,39 @@ func newFileOutputWriter(file string) (*fileWriter, error) {

// WriteString writes an output to the underlying file
func (w *fileWriter) Write(data []byte) error {
w.mu.Lock()
defer w.mu.Unlock()

_, err := w.writer.Write(data)
if err != nil {
return err
}
_, err = w.writer.WriteRune('\n')

// Periodic flush every 100 records to prevent buffer buildup on large scans
w.flushCnt++
if w.flushCnt >= 100 {
w.flushCnt = 0
if flushErr := w.writer.Flush(); flushErr != nil {
return flushErr
}
}

return err
}

// Flush flushes the underlying writer
func (w *fileWriter) Flush() error {
w.mu.Lock()
defer w.mu.Unlock()
return w.writer.Flush()
}

// Close closes the underlying writer flushing everything to disk
func (w *fileWriter) Close() error {
w.mu.Lock()
defer w.mu.Unlock()

if err := w.writer.Flush(); err != nil {
return err
}
Expand Down
Loading