Rendering snapshots of 3DTiles for list of known poses


First a bit of context: I am new to cesium and I am working on a deep learning project where the goal is to estimate position and orientation of a camera from pictures in a known scene (for each picture, estimate the camera pose when it was taken). The idea is to train a neural network on a scene using “synthetic photos” and then use domain adaptation tricks to make it also work with real photos. The network will learn the geometry and characteristics of the scene and then should be able to output a pose also for unmet images.

What we need to do with Cesium: We want to generate these synthetic photos from cesium, using a lidar point cloud colorized by an orthophoto. We have a list of wgs84 coordinates and orientations as an input and we want the corresponding synthetic photos + wgs84 3D coordinates of each pixel as an output. We already have a script that does this fine. It was a challenge for me to be able to generate geolocated 3D Tiles from the point cloud without manual alignment, but I finally managed using “entwine” tool.

My problem now is that my current way of generating the snapshots is really slow. It can take up to one minute to render a single view. When I manually move in the scene using the mouse, the rendering occurs much faster, which make me think I am doing something wrong. My simplified code is the following:

(function () {
    "use strict";
    var container = document.getElementById('cesiumContainer'); = '640px'; = '480px';
    var points = [list of poses]
    var viewer = new Cesium.Viewer('cesiumContainer', {
        requestRenderMode : true,
        maximumRenderTimeChange : Infinity,
        terrainProvider: Cesium.createWorldTerrain(),
        imageryProvider: Cesium.createWorldImagery(),
        baseLayerPicker: false,
        resolutionScale: 1.0,

    var tilesetLidar = viewer.scene.primitives.add(
  		new Cesium.Cesium3DTileset({
    		url: 'http://localhost:8080/Source/EPFLCesium3DTiles/tileset.json',
    		maximumScreenSpaceError: 4,
    		maximumMemoryUsage: 16384,
     		immediatelyLoadDesiredLevelOfDetail : true,
     		loadSiblings : false,

    var takeScreenshot = function(j){ 
        var canvas = viewer.scene.canvas;
	    var url = URL.createObjectURL(blob);
	    downloadURI(url, "img_"+j.toString()+".png");
    //Saving npy file
	function downloadNpy(array, name) {
        var link = document.createElement("a"); = name;
        var file = new Blob([], {
            type: "binary/octet-stream"
        link.href = URL.createObjectURL(file);

    function renderPic(j){
		var point = points[j]
        var x = point[0]; while (x>50){x/=10;}  // longitude
        var y = point[1]; while (y>50){y/=10;}  // latitude
        var trueHeight = point[2]
    	var position = new Cesium.Cartesian3.fromDegrees(x, y, trueHeight);
        var orientation = new Cesium.HeadingPitchRoll.fromDegrees(point[3], point[4], point[5]);{
            destination : position,
            orientation : {
                heading : orientation.heading,
                pitch : orientation.pitch,
                roll : orientation.roll
        var fov_deg = 84;  // DJI phantom = Cesium.Math.PI / 180 * fov_deg;
    	if (viewer.scene.globe.tilesLoaded) {
    	    console.log('Bypassing'); // If it is already rendered there won't be an event, launch manually.
        },500); // to do this directly does not work, we wait 500 ms

    function picRendered() {
        console.log('Finished rendering picture number ' + img_idx)
        if (pics) takeScreenshot(img_idx);
        if (img_idx < points.length) renderPic(img_idx)
    // Launch
    viewer.scene.globe.tileLoadProgressEvent.addEventListener(function() {
	    if (viewer.scene.globe.tilesLoaded) picRendered();
    var img_idx = 0;

So it works fine, but it is very slow and I don’t get why it can render fast when manually moving with the mouse, but it takes much more time when using camera.setView(). Any clue ?

Thank you very much in advance for any help or suggestion.