Get timestamp from OSC bundle in CustomModule

In SuperCollider, you can send an OSC bundle with the timestamp that it was sent (see sendBundle function of Net Addr at https://doc.sccode.org/Classes/NetAddr.html. How can I unpack that timestamp in a custom module?

Currently you can't, but it could be exposed. Note that the bundle's timestamp is already interpreted by the server that delays the reception of the message if the timestamp is set in the future.

I can see that now. Very good to know. Unfortunately, now I am realizing that does not solve the problem I am trying to solve. Which is to sync events across multiple clients to the future timestamp received. Is that possible?

Purpose: I am actually making a visual metronome and am hoping that each performer can use their own device. So they should be synced. And I can delay things as much as I like to account for whatever latency there might be on the network.

I am happy to make a new thread for this question if you prefer.

Currently there's nothing implemented to compensate for the latency of the different clients and synchronize the reception time for a message sent by the custom module. Timing critical / real-time applications are considered out of the scope of open stage control but synchronized feedback across clients does make sense and seems achievable to some extent.

Here's a little custom module I wrote that provides a latency-compensated version of the receive() function, it works by getting a rough estimation of each client's latency based on their response time (every two seconds, but there is probably some fine tuning to do there) and adding a delay to the messages based on the worst client latency. It's a bit of a naive approach and will likely suffer from unpredictable connectivity issues and device lags, but it's a starting point I guess :slight_smile:

// Latency compensation for osc clients
// use receiveSync() instead of receive()
// to send synchronized messages to all clients

// keep track of connected clients
// use global object to keep the list
// from resetting when hot-reloading the custom module
global.clients = global.clients || {}
var clients = global.clients

app.on('open', (data, client)=>{
    if (!clients[client.id]) clients[client.id] = {
        latencies: [],
        latency: 0,
    }
})

app.on('close', (data, client)=>{
    delete clients[client.id]
})

function pingClients(){
    // ping clients and make them include their id and ping date in the reply
    var date = Date.now()
    for (let id in clients) {
        receive('/SCRIPT', `send('custom-module:null', '/pong', "${id}", ${date})`)
    }
}

function handleClientPong(id, date) {
    // client's ping response to measure their latency
    // estimated as half of the roundtrip latency
    if (clients[id]) {

        // accumulate latency measures
        // keep 5 latest values
        if (clients[id].latencies.unshift((Date.now() - date) / 2)  > 5) clients[id].latencies.pop()

        if (clients[id].latencies.length < 2)
            clients[id].latency = clients[id].latencies[0]
        else
            clients[id].latency = filterLatencies(clients[id].latencies)

    }
}

function filterLatencies(arr) {
    // As described at https://web.archive.org/web/20160310125700/http://mine-control.com/zack/timesync/timesync.html
    // with bith from https://github.com/enmasseio/timesync/blob/master/src/stat.js

    // compute median
    var sorted = arr.slice().sort((a,b)=> a > b ? 1 : a < b ? -1 : 0)
    var median
    if (sorted.length % 2 === 0)
        // even
        median = (sorted[arr.length / 2 - 1] + sorted[arr.length / 2]) / 2
    else
        // odd
        median = sorted[(arr.length - 1) / 2]


    // compute variance (average of the squared differences from the mean)
    // in the end divide by (arr.length - 1) because  we are working on
    // a sample and not on a full dataset (https://www.mathsisfun.com/data/standard-deviation.html)
    var mean = arr.reduce((a,b)=>a + b) / arr.length;
    var variance = arr.map(x => Math.pow(x - mean, 2))
                      .reduce((a,b)=>a + b) / (arr.length - 1)

    // if all values are the same, return the first one
    if (variance === 0) return arr[0]
  
    // compute standard deviation
    var stdDeviation = Math.sqrt(variance)

    // discard values above 1 standard-deviation from the median
    var filtered = arr.filter(x=>x < median + stdDeviation)

    // return arithmetic mean of remaining values
    return filtered.reduce((a,b)=>a + b) / filtered.length;

}


function receiveSync(host, port, address, ...args) {
    // Send a message to all clients with dalys based on their measured latency

    // get worst client latency
    var maxLatency = 0
    for (let id in clients) {
        if (clients[id].latency > maxLatency) maxLatency = clients[id].latency
    }

    // delay message by the latency delta with the worst client
    for (let id in clients) {
        setTimeout(()=>{
            receive(host, port, address, ...args, {clientId: id})
        }, maxLatency - clients[id].latency)
    }
}

// Measure clients latencies every 2 seconds
setInterval(pingClients, 2000)



module.exports = {

    oscOutFilter:function(data){

        var {address, args, host, port} = data

        if (host === 'custom-module') {

            // handle client's ping response
            if (address === '/pong') handleClientPong(args[0].value, args[1].value)

            return // bypass original message
        }

        return {address, args, host, port}

    }

}

Dear Jean-Emmanuel... Thank you so, so much for the detailed and thoughtful response. I read up about such a technique and will try out what you have implemented. My hunch is that my application is too time critical for this, but I will definitely give it a go and it is a great resource for all users for sure.

I understand that such a feature might be out of scope, but it would be very helpful I imagine. I could take a look at contributing if you pointed me out to the pipeline for communication from the server to the clients. It seems like it would not be too hard to tell each client to schedule the received message against its own ntp time. But maybe I am being too optimistic.

I just edited my post as I had left a delay in the ping function for debugging purpose.

From what I understand (I'm no expert though) NTP seems quite complex and takes a significant time to get things in sync accurately, I'm not sure it's the most appropriate for osc-related businesses. The implementation above is very close to its simple counterpart (SNTP) except it focuses on the latency part and leaves the client clock out of the equation which seems relevant here since things are driven from the server anyway. It may be improved with some smart averaging as described here : A Simple And Robust Time Synchronization Technique

Also interesting reading : How Does NTP Work? | Kevin Sookocheff

I just updated the code to add the aforementioned averaging technique.

Hi! This seems to be working sufficient for my purposes. However, something strange is happening. Even if I do not use receiveSync, it performs better than before if the ping interval is set low enough. I wonder if this has something to do with keeping the communication stream open. See below my implementation. The incoming message that is getting distributed to the clients is the if function for the address /playing in the oscInFilter. Note that I changed the send to receive in your receiveSync function. Admittedly, I am not completely clear on the difference between the two. I would have to send the SuperCollider code for you to see it in action and am happy to do so if you think it would be helpful.

// Latency compensation for osc clients
// use receiveSync() instead of receive()
// to send synchronized messages to all clients

// keep track of connected clients
// use global object to keep the list
// from resetting when hot-reloading the custom module
global.clients = global.clients || {}
var clients = global.clients

app.on('open', (data, client)=>{
    if (!clients[client.id]) clients[client.id] = {
        latencies: [],
        latency: 0,
    }
})

app.on('close', (data, client)=>{
    delete clients[client.id]
})

function pingClients(){
    // ping clients and make them include their id and ping date in the reply
    var date = Date.now()
    for (let id in clients) {
        receive('/SCRIPT', `send('custom-module:null', '/pong', "${id}", ${date})`)
    }
}

function handleClientPong(id, date) {
    // client's ping response to measure their latency
    // estimated as half of the roundtrip latency
    if (clients[id]) {

        // accumulate latency measures
        // keep 5 latest values
        if (clients[id].latencies.unshift((Date.now() - date) / 2)  > 5) clients[id].latencies.pop()

        if (clients[id].latencies.length < 2)
            clients[id].latency = clients[id].latencies[0]
        else
            clients[id].latency = filterLatencies(clients[id].latencies)

    }
}

function filterLatencies(arr) {
    // As described at https://web.archive.org/web/20160310125700/http://mine-control.com/zack/timesync/timesync.html
    // with bith from https://github.com/enmasseio/timesync/blob/master/src/stat.js

    // compute median
    var sorted = arr.slice().sort((a,b)=> a > b ? 1 : a < b ? -1 : 0)
    var median
    if (sorted.length % 2 === 0)
        // even
        median = (sorted[arr.length / 2 - 1] + sorted[arr.length / 2]) / 2
    else
        // odd
        median = sorted[(arr.length - 1) / 2]


    // compute variance (average of the squared differences from the mean)
    // in the end divide by (arr.length - 1) because  we are working on
    // a sample and not on a full dataset (https://www.mathsisfun.com/data/standard-deviation.html)
    var mean = arr.reduce((a,b)=>a + b) / arr.length;
    var variance = arr.map(x => Math.pow(x - mean, 2))
                      .reduce((a,b)=>a + b) / (arr.length - 1)

    // compute standard deviation
    var stdDeviation = Math.sqrt(variance)

    // discard values above 1 standard-deviation from the median
    var filtered = arr.filter(x=>x < median + stdDeviation)

    // return arithmetic mean of remaining values
    return filtered.reduce((a,b)=>a + b) / filtered.length;

}


function receiveSync(address, ...args) {
    // Send a message to all clients with dalys based on their measured latency

    // get worst client latency
    var maxLatency = 0
    for (let id in clients) {
        if (clients[id].latency > maxLatency) maxLatency = clients[id].latency
    }

    // delay message by the latency delta with the worst client
    for (let id in clients) {
        setTimeout(()=>{
            //send(host, port, address, ...args, {clientId: id})
            receive(address, ...args, {clientId: id})
        }, maxLatency - clients[id].latency)
    }
}

// Measure clients latencies every 2 seconds
setInterval(pingClients, 50)



module.exports = {

    oscInFilter:function(data) {

      //console.log(data)

      var {host, port, address, args} = data

      if (address === '/playing') {
        //receive("/measure", args[0].value)
        //receive("/beat", args[1].value)
        //receive("/visual_click", 1)
        receiveSync("/measure", args[0].value)
        receiveSync("/beat", args[1].value)
        receiveSync("/visual_click", 1)
      }

      return

    },

    oscOutFilter:function(data){

        var {address, args, host, port} = data

        if (host === 'custom-module') {

            // handle client's ping response
            if (address === '/pong') handleClientPong(args[0].value, args[1].value)

            return // bypass original message
        }

        if (address.substring(1, 6) == 'mixer') {
					tokens = address.split('/')
					ins = tokens[2]
					type = tokens[3]
					index = tokens[4]
					val = args[0].value
					args = [{'type': 's', 'value': ins}, {'type': 's', 'value': type}, {'type': 's', 'value': index}, {'type': 'f', 'value': val}]
          //console.log(data)
					address = '/mixer'
          return {host, port, address, args}
        }

				if (address === '/transport') {
          return {host, port, address, args}
        }

        return

    }

}


Oups, it was indeed supposed to be receive() here, not send(). send() sends a message outside of open stage control while receive() simulates an incoming message and passes it to the clients.

Even if I do not use receiveSync, it performs better than before if the ping interval is set low enough. I wonder if this has something to do with keeping the communication stream open.

I guess that's not impossible, however I couldn't find any source pointing toward this.

Interesting. I wonder what is up with the performance boost even without using receiveSync. FYI. I am testing against a client running on localhost and one on my phone across a local network. I wonder if the phone has a wakeup function that for received packets. Again, then maybe it is just staying open. Or if it receives the next message within the ping interval it gets scheduled.

Regardless, something is working better : ) I will keep testing as I develop the app and report if it remains sufficient for a synced metronome. Again, many thanks.

Circling back around to this after some more testing. There is one bug that I can identity, but not sure exactly how to handle the exception.

The code fails and disconnects the client when the array of client latencies are all equal. I assume this is because any latency more than 1 standard deviation above the median is being filtered and when they are all the same the standard deviation is 0:

var filtered = arr.filter(x=>x < median + stdDeviation)

Which then fails on the following reduce since the array is empty. Any clues on how to deal with this degenerate case most welcome?

Thanks!

Woops :), I added a sanity check in my code (look for if (variance === 0))

I suppose the solution below works assuming that you can just return the original array if they are all the same. Do you think there would be any speed benefit to simple checking earlier on if all values are the same? Also can I start and stop the setInterval function based on when things are playing (i.e., the time critical functions)?

function filterLatencies(arr) {
// As described at A Simple And Robust Time Synchronization Technique
// with bith from https://github.com/enmasseio/timesync/blob/master/src/stat.js

//console.log(arr)
// compute median
var sorted = arr.slice().sort((a,b)=> a > b ? 1 : a < b ? -1 : 0)
var median
if (sorted.length % 2 === 0)
    // even
    median = (sorted[arr.length / 2 - 1] + sorted[arr.length / 2]) / 2
else
    // odd
    median = sorted[(arr.length - 1) / 2]


// compute variance (average of the squared differences from the mean)
// in the end divide by (arr.length - 1) because  we are working on
// a sample and not on a full dataset (https://www.mathsisfun.com/data/standard-deviation.html)
var mean = arr.reduce((a,b)=>a + b) / arr.length;
var variance = arr.map(x => Math.pow(x - mean, 2))
                  .reduce((a,b)=>a + b) / (arr.length - 1)

if (variance === 0) {
  return arr
} else {
  // compute standard deviation
  var stdDeviation = Math.sqrt(variance)

  // discard values above 1 standard-deviation from the median
  var filtered = arr.filter(x=>x < median + stdDeviation)

  // return arithmetic mean of remaining values
  return filtered.reduce((a,b)=>a + b) / filtered.length;
}

}

Returning the array doesn't seem right since the function is supposed to return a number (see my implementation above).

Do you think there would be any speed benefit to simple checking earlier on if all values are the same?

I don't think the difference would be significant for such a small array (but only proper profiling will tell).

Also can I start and stop the setInterval function based on when things are playing (i.e., the time critical functions)?

Sounds doable, but this would be entering the world of heuristics (here be dragons !) and I'm not sure I'll be of much help there. Anyway, clearInterval() is the key function here. If you're concerned about bandwidth a good test would be to first reset the interval to a lower rate instead of 50ms in order to check what the latency compensation in itself changes, without the eventual "keep alive" performance boost (which I wouldn't rely on too much since it doesn't seem to be documented anywhere, it could well be specific to your testing environment).

Thanks for the catch and good point on all, especially

I will keep you posted!