Skip to content

Commit

Permalink
Use new call limiter in network state tables
Browse files Browse the repository at this point in the history
Potentially fixes #1043, #1294
  • Loading branch information
dhaavi committed Sep 28, 2023
1 parent 6fc7c8c commit 577299c
Show file tree
Hide file tree
Showing 5 changed files with 83 additions and 212 deletions.
4 changes: 2 additions & 2 deletions network/state/exists.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ func Exists(pktInfo *packet.Info, now time.Time) (exists bool) {
func (table *tcpTable) exists(pktInfo *packet.Info) (exists bool) {
// Update tables if older than the connection that is checked.
if table.lastUpdateAt.Load() < pktInfo.SeenAt.UnixNano() {
table.updateTables(table.updateIter.Load())
table.updateTables()
}

table.lock.RLock()
Expand All @@ -64,7 +64,7 @@ func (table *tcpTable) exists(pktInfo *packet.Info) (exists bool) {
func (table *udpTable) exists(pktInfo *packet.Info, now time.Time) (exists bool) {
// Update tables if older than the connection that is checked.
if table.lastUpdateAt.Load() < pktInfo.SeenAt.UnixNano() {
table.updateTables(table.updateIter.Load())
table.updateTables()
}

table.lock.RLock()
Expand Down
8 changes: 4 additions & 4 deletions network/state/info.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,12 +25,12 @@ type Info struct {
func GetInfo() *Info {
info := &Info{}

info.TCP4Connections, info.TCP4Listeners, _ = tcp4Table.updateTables(tcp4Table.updateIter.Load())
info.UDP4Binds, _ = udp4Table.updateTables(udp4Table.updateIter.Load())
info.TCP4Connections, info.TCP4Listeners = tcp4Table.updateTables()
info.UDP4Binds = udp4Table.updateTables()

if netenv.IPv6Enabled() {
info.TCP6Connections, info.TCP6Listeners, _ = tcp6Table.updateTables(tcp6Table.updateIter.Load())
info.UDP6Binds, _ = udp6Table.updateTables(udp6Table.updateIter.Load())
info.TCP6Connections, info.TCP6Listeners = tcp6Table.updateTables()
info.UDP6Binds = udp6Table.updateTables()
}

info.UpdateMeta()
Expand Down
33 changes: 14 additions & 19 deletions network/state/lookup.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,20 +66,18 @@ func (table *tcpTable) lookup(pktInfo *packet.Info, fast bool) (
var (
connections []*socket.ConnectionInfo
listeners []*socket.BindInfo
updateIter uint64

dualStackConnections []*socket.ConnectionInfo
dualStackListeners []*socket.BindInfo
dualStackUpdateIter uint64
)

// Search for the socket until found.
for i := 1; i <= lookupTries; i++ {
// Get or update tables.
// Use existing tables for first check if packet was seen after last table update.
if i == 1 && pktInfo.SeenAt.UnixNano() >= table.lastUpdateAt.Load() {
connections, listeners, updateIter = table.getCurrentTables()
connections, listeners = table.getCurrentTables()
} else {
connections, listeners, updateIter = table.updateTables(updateIter)
connections, listeners = table.updateTables()
}

// Check tables for socket.
Expand All @@ -97,11 +95,11 @@ func (table *tcpTable) lookup(pktInfo *packet.Info, fast bool) (
continue
}

// Get or update tables.
if i == 0 {
dualStackConnections, dualStackListeners, dualStackUpdateIter = table.dualStack.getCurrentTables()
// Use existing tables for first check if packet was seen after last table update.
if i == 1 && pktInfo.SeenAt.UnixNano() >= table.dualStack.lastUpdateAt.Load() {
dualStackConnections, dualStackListeners = table.dualStack.getCurrentTables()
} else {
dualStackConnections, dualStackListeners, dualStackUpdateIter = table.dualStack.updateTables(dualStackUpdateIter)
dualStackConnections, dualStackListeners = table.dualStack.updateTables()
}

// Check tables for socket.
Expand Down Expand Up @@ -169,20 +167,17 @@ func (table *udpTable) lookup(pktInfo *packet.Info, fast bool) (

// Prepare variables.
var (
binds []*socket.BindInfo
updateIter uint64

dualStackBinds []*socket.BindInfo
dualStackUpdateIter uint64
binds []*socket.BindInfo
dualStackBinds []*socket.BindInfo
)

// Search for the socket until found.
for i := 1; i <= lookupTries; i++ {
// Get or update tables.
if i == 1 && pktInfo.SeenAt.UnixNano() >= table.lastUpdateAt.Load() {
binds, updateIter = table.getCurrentTables()
binds = table.getCurrentTables()
} else {
binds, updateIter = table.updateTables(updateIter)
binds = table.updateTables()
}

// Check tables for socket.
Expand Down Expand Up @@ -212,10 +207,10 @@ func (table *udpTable) lookup(pktInfo *packet.Info, fast bool) (
}

// Get or update tables.
if i == 0 {
dualStackBinds, dualStackUpdateIter = table.dualStack.getCurrentTables()
if i == 1 && pktInfo.SeenAt.UnixNano() >= table.lastUpdateAt.Load() {
dualStackBinds = table.dualStack.getCurrentTables()
} else {
dualStackBinds, dualStackUpdateIter = table.dualStack.updateTables(dualStackUpdateIter)
dualStackBinds = table.dualStack.updateTables()
}

// Check tables for socket.
Expand Down
121 changes: 31 additions & 90 deletions network/state/tcp.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,13 @@ import (
"time"

"github.com/safing/portbase/log"
"github.com/safing/portbase/utils"
"github.com/safing/portmaster/network/socket"
)

const maxUpdateTries = 100
const (
minDurationBetweenTableUpdates = 10 * time.Millisecond
)

type tcpTable struct {
version int
Expand All @@ -19,29 +22,26 @@ type tcpTable struct {
listeners []*socket.BindInfo
lock sync.RWMutex

updateIter atomic.Uint64
// lastUpdateAt stores the time when the tables where last updated as unix nanoseconds.
lastUpdateAt atomic.Int64

fetchingLock sync.Mutex
fetchingInProgress bool
fetchingDoneSignal chan struct{}
fetchTable func() (connections []*socket.ConnectionInfo, listeners []*socket.BindInfo, err error)
fetchLimiter *utils.CallLimiter
fetchTable func() (connections []*socket.ConnectionInfo, listeners []*socket.BindInfo, err error)

dualStack *tcpTable
}

var (
tcp6Table = &tcpTable{
version: 6,
fetchingDoneSignal: make(chan struct{}),
fetchTable: getTCP6Table,
version: 6,
fetchLimiter: utils.NewCallLimiter(minDurationBetweenTableUpdates),
fetchTable: getTCP6Table,
}

tcp4Table = &tcpTable{
version: 4,
fetchingDoneSignal: make(chan struct{}),
fetchTable: getTCP4Table,
version: 4,
fetchLimiter: utils.NewCallLimiter(minDurationBetweenTableUpdates),
fetchTable: getTCP4Table,
}
)

Expand All @@ -54,97 +54,38 @@ func EnableTCPDualStack() {
func (table *tcpTable) getCurrentTables() (
connections []*socket.ConnectionInfo,
listeners []*socket.BindInfo,
updateIter uint64,
) {
table.lock.RLock()
defer table.lock.RUnlock()

return table.connections, table.listeners, table.updateIter.Load()
}

func (table *tcpTable) checkFetchingState() (fetch bool, signal chan struct{}) {
table.fetchingLock.Lock()
defer table.fetchingLock.Unlock()

// If fetching is already in progress, just return the signal.
if table.fetchingInProgress {
return false, table.fetchingDoneSignal
}

// Otherwise, tell caller to fetch.
table.fetchingInProgress = true
return true, nil
return table.connections, table.listeners
}

func (table *tcpTable) signalFetchComplete() {
table.fetchingLock.Lock()
defer table.fetchingLock.Unlock()

// Set fetching state.
table.fetchingInProgress = false

// Signal waiting goroutines.
close(table.fetchingDoneSignal)
table.fetchingDoneSignal = make(chan struct{})
}

func (table *tcpTable) updateTables(previousUpdateIter uint64) (
func (table *tcpTable) updateTables() (
connections []*socket.ConnectionInfo,
listeners []*socket.BindInfo,
updateIter uint64,
) {
var tries int

// Attempt to update the tables until we get a new version of the tables.
for previousUpdateIter == table.updateIter.Load() {
// Abort if it takes too long.
tries++
if tries > maxUpdateTries {
log.Warningf("state: failed to upate TCP%d socket table %d times", table.version, tries-1)
return table.getCurrentTables()
// Fetch tables.
table.fetchLimiter.Do(func() {
// Fetch new tables from system.
connections, listeners, err := table.fetchTable()
if err != nil {
log.Warningf("state: failed to get TCP%d socket table: %s", table.version, err)
return
}

// Check if someone is fetching or if we should fetch.
fetch, signal := table.checkFetchingState()
if fetch {
defer table.signalFetchComplete()

// Just to be sure, check again if there is a new version.
if previousUpdateIter < table.updateIter.Load() {
return table.getCurrentTables()
}

// Wait for 5 milliseconds.
time.Sleep(5 * time.Millisecond)

// Fetch new tables from system.
connections, listeners, err := table.fetchTable()
if err != nil {
log.Warningf("state: failed to get TCP%d socket table: %s", table.version, err)
// Return the current tables as fallback, as we need to trigger the defer to complete the fetch.
return table.getCurrentTables()
}

// Pre-check for any listeners.
for _, bindInfo := range listeners {
bindInfo.ListensAny = bindInfo.Local.IP.Equal(net.IPv4zero) || bindInfo.Local.IP.Equal(net.IPv6zero)
}

// Apply new tables.
table.lock.Lock()
defer table.lock.Unlock()
table.connections = connections
table.listeners = listeners
table.updateIter.Add(1)
table.lastUpdateAt.Store(time.Now().UnixNano())

// Return new tables immediately.
return table.connections, table.listeners, table.updateIter.Load()
// Pre-check for any listeners.
for _, bindInfo := range listeners {
bindInfo.ListensAny = bindInfo.Local.IP.Equal(net.IPv4zero) || bindInfo.Local.IP.Equal(net.IPv6zero)
}

// Otherwise, wait for fetch to complete.
<-signal
}
// Apply new tables.
table.lock.Lock()
defer table.lock.Unlock()
table.connections = connections
table.listeners = listeners
table.lastUpdateAt.Store(time.Now().UnixNano())
})

return table.getCurrentTables()
}
Loading

0 comments on commit 577299c

Please sign in to comment.