-
Notifications
You must be signed in to change notification settings - Fork 557
Closed
Description
kubernetes-client/javascript version: 0.12.0
node version: v14.9.0
The number of connections to the kubernetes API seems to double every 10 minutes. We've setup an endpoint informer to monitor when IPs change so we can update a service w/external IPs. We use this to discover Cassandra cluster seeds in other regions.
function startEndpointWatcher (labelSelector = '', namespace = POD_NAMESPACE) {
const informer = k8s.makeInformer(kc, `/api/v1/namespaces/${namespace}/endpoints?labelSelector=${labelSelector}`,
() => k8sApi.listNamespacedEndpoints(namespace, undefined, undefined, undefined, undefined, labelSelector))
_.forEach(['add', 'update', 'delete'], event => informer.on(event, updateEndpointsFn))
informer.on('error', err => {
log.error('Watcher ERROR event: \n', err, '\nRestarting Watcher after 5 sec...')
setTimeout(informer.start, 5000)
})
informer.start()
.then(() => log.info('HostIPs-Endpoint-watcher successfully started'))
.catch(err => log.error('HostIPs-Endpoint-watcher failed to start: \n', err))
}
async function updateEndpointsFn (endpointsObj) {
try {
if (!endpointsObj.subsets) return
const subsets = endpointsObj.subsets[0]
const namespace = endpointsObj.metadata.namespace
const getAddressWithHostIP = address =>
k8sApi.readNamespacedPodStatus(address.hostname, namespace).then(res => {
address.ip = res.body.status.hostIP
})
const addressesUpdater = async addresses => await Promise.all(_.map(addresses, getAddressWithHostIP))
await Promise.all(_.filter([subsets.addresses, subsets.notReadyAddresses]).map(addressesUpdater))
const endpointPatch = [{ op: 'replace', path: '/subsets', value: [subsets] }]
k8sApi.patchNamespacedEndpoints(endpointsObj.metadata.name + HOSTIP_ENDPOINTS_SUFFIX, namespace, endpointPatch,
undefined, undefined, undefined, undefined, { headers: { 'Content-type': 'application/json-patch+json' } }
).then(result => {
log.info('Successfully patched hostIPs Endpoint: ' + result.body.metadata.name)
log.debug(result.body)
hostipsEndpoints = result.body
}).catch(err => log.error('Endpoints patch error', err))
} catch (err) { log.error('UNEXPECTED ERROR: \n', err) }
}
I've tried explicitly closing the responses that I can with no luck, so I'm assuming that the leak must be in the listNamespacedEndpoints
call or somewhere in informer.
Here's logging from the pod:
9/4/2020, 6:08:41 PM INFO Successfully patched hostIPs Endpoint: cassandra-us-south-hostips (host-ips-service.js:57)
9/4/2020, 6:08:41 PM INFO Successfully patched hostIPs Endpoint: cassandra-us-south-hostips (host-ips-service.js:57)
9/4/2020, 6:18:41 PM INFO Successfully patched hostIPs Endpoint: cassandra-us-south-hostips (host-ips-service.js:57)
9/4/2020, 6:18:41 PM INFO Successfully patched hostIPs Endpoint: cassandra-us-south-hostips (host-ips-service.js:57)
9/4/2020, 6:18:42 PM INFO Successfully patched hostIPs Endpoint: cassandra-us-south-hostips (host-ips-service.js:57)
9/4/2020, 6:18:42 PM INFO Successfully patched hostIPs Endpoint: cassandra-us-south-hostips (host-ips-service.js:57)
9/4/2020, 6:28:42 PM INFO Successfully patched hostIPs Endpoint: cassandra-us-south-hostips (host-ips-service.js:57)
9/4/2020, 6:28:42 PM INFO Successfully patched hostIPs Endpoint: cassandra-us-south-hostips (host-ips-service.js:57)
9/4/2020, 6:28:42 PM INFO Successfully patched hostIPs Endpoint: cassandra-us-south-hostips (host-ips-service.js:57)
9/4/2020, 6:28:42 PM INFO Successfully patched hostIPs Endpoint: cassandra-us-south-hostips (host-ips-service.js:57)
9/4/2020, 6:28:42 PM INFO Successfully patched hostIPs Endpoint: cassandra-us-south-hostips (host-ips-service.js:57)
9/4/2020, 6:28:42 PM INFO Successfully patched hostIPs Endpoint: cassandra-us-south-hostips (host-ips-service.js:57)
9/4/2020, 6:28:42 PM INFO Successfully patched hostIPs Endpoint: cassandra-us-south-hostips (host-ips-service.js:57)
9/4/2020, 6:28:42 PM INFO Successfully patched hostIPs Endpoint: cassandra-us-south-hostips (host-ips-service.js:57)
Here's netstat output:
❯ date; k exec cassandra-prober-647dc7c5c7-gvw2k -c prober -- netstat -al | grep kube
Fri Sep 4 14:26:30 EDT 2020
tcp 0 0 cassandra-prober-:40182 kubernetes.default.:443 ESTABLISHED
tcp 0 0 cassandra-prober-:40098 kubernetes.default.:443 ESTABLISHED
tcp 0 0 cassandra-prober-:40154 kubernetes.default.:443 ESTABLISHED
tcp 0 0 cassandra-prober-:40126 kubernetes.default.:443 ESTABLISHED
❯ date; k exec cassandra-prober-647dc7c5c7-gvw2k -c prober -- netstat -al | grep kube
Fri Sep 4 14:30:16 EDT 2020
tcp 0 0 cassandra-prober-:43218 kubernetes.default.:443 ESTABLISHED
tcp 0 0 cassandra-prober-:43420 kubernetes.default.:443 ESTABLISHED
tcp 0 0 cassandra-prober-:43246 kubernetes.default.:443 ESTABLISHED
tcp 0 0 cassandra-prober-:43356 kubernetes.default.:443 ESTABLISHED
tcp 0 0 cassandra-prober-:43274 kubernetes.default.:443 ESTABLISHED
tcp 0 0 cassandra-prober-:43392 kubernetes.default.:443 ESTABLISHED
tcp 0 0 cassandra-prober-:43448 kubernetes.default.:443 ESTABLISHED
tcp 0 0 cassandra-prober-:43304 kubernetes.default.:443 ESTABLISHED
Any ideas as to what may be going on here?
allen-servedio
Metadata
Metadata
Assignees
Labels
No labels