1/*
2  2022-09-16
3
4  The author disclaims copyright to this source code.  In place of a
5  legal notice, here is a blessing:
6
7  *   May you do good and not evil.
8  *   May you find forgiveness for yourself and forgive others.
9  *   May you share freely, never taking more than you give.
10
11  ***********************************************************************
12
13  A Worker which manages asynchronous OPFS handles on behalf of a
14  synchronous API which controls it via a combination of Worker
15  messages, SharedArrayBuffer, and Atomics. It is the asynchronous
16  counterpart of the API defined in sqlite3-api-opfs.js.
17
18  Highly indebted to:
19
20  https://github.com/rhashimoto/wa-sqlite/blob/master/src/examples/OriginPrivateFileSystemVFS.js
21
22  for demonstrating how to use the OPFS APIs.
23
24  This file is to be loaded as a Worker. It does not have any direct
25  access to the sqlite3 JS/WASM bits, so any bits which it needs (most
26  notably SQLITE_xxx integer codes) have to be imported into it via an
27  initialization process.
28
29  This file represents an implementation detail of a larger piece of
30  code, and not a public interface. Its details may change at any time
31  and are not intended to be used by any client-level code.
32*/
33"use strict";
34const toss = function(...args){throw new Error(args.join(' '))};
35if(self.window === self){
36  toss("This code cannot run from the main thread.",
37       "Load it as a Worker from a separate Worker.");
38}else if(!navigator.storage.getDirectory){
39  toss("This API requires navigator.storage.getDirectory.");
40}
41
42/**
43   Will hold state copied to this object from the syncronous side of
44   this API.
45*/
46const state = Object.create(null);
47/**
48   verbose:
49
50   0 = no logging output
51   1 = only errors
52   2 = warnings and errors
53   3 = debug, warnings, and errors
54*/
55state.verbose = 2;
56
57const loggers = {
58  0:console.error.bind(console),
59  1:console.warn.bind(console),
60  2:console.log.bind(console)
61};
62const logImpl = (level,...args)=>{
63  if(state.verbose>level) loggers[level]("OPFS asyncer:",...args);
64};
65const log =    (...args)=>logImpl(2, ...args);
66const warn =   (...args)=>logImpl(1, ...args);
67const error =  (...args)=>logImpl(0, ...args);
68const metrics = Object.create(null);
69metrics.reset = ()=>{
70  let k;
71  const r = (m)=>(m.count = m.time = m.wait = 0);
72  for(k in state.opIds){
73    r(metrics[k] = Object.create(null));
74  }
75  let s = metrics.s11n = Object.create(null);
76  s = s.serialize = Object.create(null);
77  s.count = s.time = 0;
78  s = metrics.s11n.deserialize = Object.create(null);
79  s.count = s.time = 0;
80};
81metrics.dump = ()=>{
82  let k, n = 0, t = 0, w = 0;
83  for(k in state.opIds){
84    const m = metrics[k];
85    n += m.count;
86    t += m.time;
87    w += m.wait;
88    m.avgTime = (m.count && m.time) ? (m.time / m.count) : 0;
89  }
90  console.log(self.location.href,
91              "metrics for",self.location.href,":\n",
92              metrics,
93              "\nTotal of",n,"op(s) for",t,"ms",
94              "approx",w,"ms spent waiting on OPFS APIs.");
95  console.log("Serialization metrics:",metrics.s11n);
96};
97
98/**
99   Map of sqlite3_file pointers (integers) to metadata related to a
100   given OPFS file handles. The pointers are, in this side of the
101   interface, opaque file handle IDs provided by the synchronous
102   part of this constellation. Each value is an object with a structure
103   demonstrated in the xOpen() impl.
104*/
105const __openFiles = Object.create(null);
106
107/**
108   Expects an OPFS file path. It gets resolved, such that ".."
109   components are properly expanded, and returned. If the 2nd arg is
110   true, the result is returned as an array of path elements, else an
111   absolute path string is returned.
112*/
113const getResolvedPath = function(filename,splitIt){
114  const p = new URL(
115    filename, 'file://irrelevant'
116  ).pathname;
117  return splitIt ? p.split('/').filter((v)=>!!v) : p;
118};
119
120/**
121   Takes the absolute path to a filesystem element. Returns an array
122   of [handleOfContainingDir, filename]. If the 2nd argument is truthy
123   then each directory element leading to the file is created along
124   the way. Throws if any creation or resolution fails.
125*/
126const getDirForFilename = async function f(absFilename, createDirs = false){
127  const path = getResolvedPath(absFilename, true);
128  const filename = path.pop();
129  let dh = state.rootDir;
130  for(const dirName of path){
131    if(dirName){
132      dh = await dh.getDirectoryHandle(dirName, {create: !!createDirs});
133    }
134  }
135  return [dh, filename];
136};
137
138/**
139   Returns the sync access handle associated with the given file
140   handle object (which must be a valid handle object, as created by
141   xOpen()), lazily opening it if needed.
142
143   In order to help alleviate cross-tab contention for a dabase,
144   if an exception is thrown while acquiring the handle, this routine
145   will wait briefly and try again, up to 3 times. If acquisition
146   still fails at that point it will give up and propagate the
147   exception.
148*/
149const getSyncHandle = async (fh)=>{
150  if(!fh.syncHandle){
151    const t = performance.now();
152    log("Acquiring sync handle for",fh.filenameAbs);
153    const maxTries = 3;
154    let i = 1, ms = 300;
155    for(; true; ms *= ++i){
156      try {
157        //if(i<3) toss("Just testing.");
158        //TODO? A config option which tells it to throw here
159        //randomly every now and then, for testing purposes.
160        fh.syncHandle = await fh.fileHandle.createSyncAccessHandle();
161        break;
162      }catch(e){
163        if(i === maxTries){
164          toss("Error getting sync handle.",maxTries,
165               "attempts failed. ",fh.filenameAbs, ":", e.message);
166          throw e;
167        }
168        warn("Error getting sync handle. Waiting",ms,
169              "ms and trying again.",fh.filenameAbs,e);
170        Atomics.wait(state.sabOPView, state.opIds.retry, 0, ms);
171      }
172    }
173    log("Got sync handle for",fh.filenameAbs,'in',performance.now() - t,'ms');
174  }
175  return fh.syncHandle;
176};
177
178/**
179   If the given file-holding object has a sync handle attached to it,
180   that handle is remove and asynchronously closed. Though it may
181   sound sensible to continue work as soon as the close() returns
182   (noting that it's asynchronous), doing so can cause operations
183   performed soon afterwards, e.g. a call to getSyncHandle() to fail
184   because they may happen out of order from the close(). OPFS does
185   not guaranty that the actual order of operations is retained in
186   such cases. i.e.  always "await" on the result of this function.
187*/
188const closeSyncHandle = async (fh)=>{
189  if(fh.syncHandle){
190    log("Closing sync handle for",fh.filenameAbs);
191    const h = fh.syncHandle;
192    delete fh.syncHandle;
193    return h.close();
194  }
195};
196
197/**
198   Stores the given value at state.sabOPView[state.opIds.rc] and then
199   Atomics.notify()'s it.
200*/
201const storeAndNotify = (opName, value)=>{
202  log(opName+"() => notify(",state.opIds.rc,",",value,")");
203  Atomics.store(state.sabOPView, state.opIds.rc, value);
204  Atomics.notify(state.sabOPView, state.opIds.rc);
205};
206
207/**
208   Throws if fh is a file-holding object which is flagged as read-only.
209*/
210const affirmNotRO = function(opName,fh){
211  if(fh.readOnly) toss(opName+"(): File is read-only: "+fh.filenameAbs);
212};
213
214/**
215   We track 2 different timers: the "metrics" timer records how much
216   time we spend performing work. The "wait" timer records how much
217   time we spend waiting on the underlying OPFS timer. See the calls
218   to mTimeStart(), mTimeEnd(), wTimeStart(), and wTimeEnd()
219   throughout this file to see how they're used.
220*/
221const __mTimer = Object.create(null);
222__mTimer.op = undefined;
223__mTimer.start = undefined;
224const mTimeStart = (op)=>{
225  __mTimer.start = performance.now();
226  __mTimer.op = op;
227  //metrics[op] || toss("Maintenance required: missing metrics for",op);
228  ++metrics[op].count;
229};
230const mTimeEnd = ()=>(
231  metrics[__mTimer.op].time += performance.now() - __mTimer.start
232);
233const __wTimer = Object.create(null);
234__wTimer.op = undefined;
235__wTimer.start = undefined;
236const wTimeStart = (op)=>{
237  __wTimer.start = performance.now();
238  __wTimer.op = op;
239  //metrics[op] || toss("Maintenance required: missing metrics for",op);
240};
241const wTimeEnd = ()=>(
242  metrics[__wTimer.op].wait += performance.now() - __wTimer.start
243);
244
245/**
246   Gets set to true by the 'opfs-async-shutdown' command to quit the
247   wait loop. This is only intended for debugging purposes: we cannot
248   inspect this file's state while the tight waitLoop() is running and
249   need a way to stop that loop for introspection purposes.
250*/
251let flagAsyncShutdown = false;
252
253
254/**
255   Asynchronous wrappers for sqlite3_vfs and sqlite3_io_methods
256   methods, as well as helpers like mkdir(). Maintenance reminder:
257   members are in alphabetical order to simplify finding them.
258*/
259const vfsAsyncImpls = {
260  'opfs-async-metrics': async ()=>{
261    mTimeStart('opfs-async-metrics');
262    metrics.dump();
263    storeAndNotify('opfs-async-metrics', 0);
264    mTimeEnd();
265  },
266  'opfs-async-shutdown': async ()=>{
267    flagAsyncShutdown = true;
268    storeAndNotify('opfs-async-shutdown', 0);
269  },
270  mkdir: async (dirname)=>{
271    mTimeStart('mkdir');
272    let rc = 0;
273    wTimeStart('mkdir');
274    try {
275        await getDirForFilename(dirname+"/filepart", true);
276    }catch(e){
277      state.s11n.storeException(2,e);
278      rc = state.sq3Codes.SQLITE_IOERR;
279    }finally{
280      wTimeEnd();
281    }
282    storeAndNotify('mkdir', rc);
283    mTimeEnd();
284  },
285  xAccess: async (filename)=>{
286    mTimeStart('xAccess');
287    /* OPFS cannot support the full range of xAccess() queries sqlite3
288       calls for. We can essentially just tell if the file is
289       accessible, but if it is it's automatically writable (unless
290       it's locked, which we cannot(?) know without trying to open
291       it). OPFS does not have the notion of read-only.
292
293       The return semantics of this function differ from sqlite3's
294       xAccess semantics because we are limited in what we can
295       communicate back to our synchronous communication partner: 0 =
296       accessible, non-0 means not accessible.
297    */
298    let rc = 0;
299    wTimeStart('xAccess');
300    try{
301      const [dh, fn] = await getDirForFilename(filename);
302      await dh.getFileHandle(fn);
303    }catch(e){
304      state.s11n.storeException(2,e);
305      rc = state.sq3Codes.SQLITE_IOERR;
306    }finally{
307      wTimeEnd();
308    }
309    storeAndNotify('xAccess', rc);
310    mTimeEnd();
311  },
312  xClose: async function(fid/*sqlite3_file pointer*/){
313    const opName = 'xClose';
314    mTimeStart(opName);
315    const fh = __openFiles[fid];
316    let rc = 0;
317    wTimeStart('xClose');
318    if(fh){
319      delete __openFiles[fid];
320      await closeSyncHandle(fh);
321      if(fh.deleteOnClose){
322        try{ await fh.dirHandle.removeEntry(fh.filenamePart) }
323        catch(e){ warn("Ignoring dirHandle.removeEntry() failure of",fh,e) }
324      }
325    }else{
326      state.s11n.serialize();
327      rc = state.sq3Codes.SQLITE_NOTFOUND;
328    }
329    wTimeEnd();
330    storeAndNotify(opName, rc);
331    mTimeEnd();
332  },
333  xDelete: async function(...args){
334    mTimeStart('xDelete');
335    const rc = await vfsAsyncImpls.xDeleteNoWait(...args);
336    storeAndNotify('xDelete', rc);
337    mTimeEnd();
338  },
339  xDeleteNoWait: async function(filename, syncDir = 0, recursive = false){
340    /* The syncDir flag is, for purposes of the VFS API's semantics,
341       ignored here. However, if it has the value 0x1234 then: after
342       deleting the given file, recursively try to delete any empty
343       directories left behind in its wake (ignoring any errors and
344       stopping at the first failure).
345
346       That said: we don't know for sure that removeEntry() fails if
347       the dir is not empty because the API is not documented. It has,
348       however, a "recursive" flag which defaults to false, so
349       presumably it will fail if the dir is not empty and that flag
350       is false.
351    */
352    let rc = 0;
353    wTimeStart('xDelete');
354    try {
355      while(filename){
356        const [hDir, filenamePart] = await getDirForFilename(filename, false);
357        if(!filenamePart) break;
358        await hDir.removeEntry(filenamePart, {recursive});
359        if(0x1234 !== syncDir) break;
360        filename = getResolvedPath(filename, true);
361        filename.pop();
362        filename = filename.join('/');
363      }
364    }catch(e){
365      state.s11n.storeException(2,e);
366      rc = state.sq3Codes.SQLITE_IOERR_DELETE;
367    }
368    wTimeEnd();
369    return rc;
370  },
371  xFileSize: async function(fid/*sqlite3_file pointer*/){
372    mTimeStart('xFileSize');
373    const fh = __openFiles[fid];
374    let sz;
375    wTimeStart('xFileSize');
376    try{
377      sz = await (await getSyncHandle(fh)).getSize();
378      state.s11n.serialize(Number(sz));
379      sz = 0;
380    }catch(e){
381      state.s11n.storeException(2,e);
382      sz = state.sq3Codes.SQLITE_IOERR;
383    }
384    wTimeEnd();
385    storeAndNotify('xFileSize', sz);
386    mTimeEnd();
387  },
388  xLock: async function(fid/*sqlite3_file pointer*/,
389                        lockType/*SQLITE_LOCK_...*/){
390    mTimeStart('xLock');
391    const fh = __openFiles[fid];
392    let rc = 0;
393    if( !fh.syncHandle ){
394      wTimeStart('xLock');
395      try { await getSyncHandle(fh) }
396      catch(e){
397        state.s11n.storeException(1,e);
398        rc = state.sq3Codes.SQLITE_IOERR;
399      }
400      wTimeEnd();
401    }
402    storeAndNotify('xLock',rc);
403    mTimeEnd();
404  },
405  xOpen: async function(fid/*sqlite3_file pointer*/, filename,
406                        flags/*SQLITE_OPEN_...*/){
407    const opName = 'xOpen';
408    mTimeStart(opName);
409    const deleteOnClose = (state.sq3Codes.SQLITE_OPEN_DELETEONCLOSE & flags);
410    const create = (state.sq3Codes.SQLITE_OPEN_CREATE & flags);
411    wTimeStart('xOpen');
412    try{
413      let hDir, filenamePart;
414      try {
415        [hDir, filenamePart] = await getDirForFilename(filename, !!create);
416      }catch(e){
417        storeAndNotify(opName, state.sql3Codes.SQLITE_NOTFOUND);
418        mTimeEnd();
419        wTimeEnd();
420        return;
421      }
422      const hFile = await hDir.getFileHandle(filenamePart, {create});
423      /**
424         wa-sqlite, at this point, grabs a SyncAccessHandle and
425         assigns it to the syncHandle prop of the file state
426         object, but only for certain cases and it's unclear why it
427         places that limitation on it.
428      */
429      wTimeEnd();
430      __openFiles[fid] = Object.assign(Object.create(null),{
431        filenameAbs: filename,
432        filenamePart: filenamePart,
433        dirHandle: hDir,
434        fileHandle: hFile,
435        sabView: state.sabFileBufView,
436        readOnly: create
437          ? false : (state.sq3Codes.SQLITE_OPEN_READONLY & flags),
438        deleteOnClose: deleteOnClose
439      });
440      storeAndNotify(opName, 0);
441    }catch(e){
442      wTimeEnd();
443      error(opName,e);
444      state.s11n.storeException(1,e);
445      storeAndNotify(opName, state.sq3Codes.SQLITE_IOERR);
446    }
447    mTimeEnd();
448  },
449  xRead: async function(fid/*sqlite3_file pointer*/,n,offset64){
450    mTimeStart('xRead');
451    let rc = 0, nRead;
452    const fh = __openFiles[fid];
453    try{
454      wTimeStart('xRead');
455      nRead = (await getSyncHandle(fh)).read(
456        fh.sabView.subarray(0, n),
457        {at: Number(offset64)}
458      );
459      wTimeEnd();
460      if(nRead < n){/* Zero-fill remaining bytes */
461        fh.sabView.fill(0, nRead, n);
462        rc = state.sq3Codes.SQLITE_IOERR_SHORT_READ;
463      }
464    }catch(e){
465      if(undefined===nRead) wTimeEnd();
466      error("xRead() failed",e,fh);
467      state.s11n.storeException(1,e);
468      rc = state.sq3Codes.SQLITE_IOERR_READ;
469    }
470    storeAndNotify('xRead',rc);
471    mTimeEnd();
472  },
473  xSync: async function(fid/*sqlite3_file pointer*/,flags/*ignored*/){
474    mTimeStart('xSync');
475    const fh = __openFiles[fid];
476    let rc = 0;
477    if(!fh.readOnly && fh.syncHandle){
478      try {
479        wTimeStart('xSync');
480        await fh.syncHandle.flush();
481      }catch(e){
482        state.s11n.storeException(2,e);
483      }
484      wTimeEnd();
485    }
486    storeAndNotify('xSync',rc);
487    mTimeEnd();
488  },
489  xTruncate: async function(fid/*sqlite3_file pointer*/,size){
490    mTimeStart('xTruncate');
491    let rc = 0;
492    const fh = __openFiles[fid];
493    wTimeStart('xTruncate');
494    try{
495      affirmNotRO('xTruncate', fh);
496      await (await getSyncHandle(fh)).truncate(size);
497    }catch(e){
498      error("xTruncate():",e,fh);
499      state.s11n.storeException(2,e);
500      rc = state.sq3Codes.SQLITE_IOERR_TRUNCATE;
501    }
502    wTimeEnd();
503    storeAndNotify('xTruncate',rc);
504    mTimeEnd();
505  },
506  xUnlock: async function(fid/*sqlite3_file pointer*/,
507                          lockType/*SQLITE_LOCK_...*/){
508    mTimeStart('xUnlock');
509    let rc = 0;
510    const fh = __openFiles[fid];
511    if( state.sq3Codes.SQLITE_LOCK_NONE===lockType
512        && fh.syncHandle ){
513      wTimeStart('xUnlock');
514      try { await closeSyncHandle(fh) }
515      catch(e){
516        state.s11n.storeException(1,e);
517        rc = state.sq3Codes.SQLITE_IOERR;
518      }
519      wTimeEnd();
520    }
521    storeAndNotify('xUnlock',rc);
522    mTimeEnd();
523  },
524  xWrite: async function(fid/*sqlite3_file pointer*/,n,offset64){
525    mTimeStart('xWrite');
526    let rc;
527    wTimeStart('xWrite');
528    try{
529      const fh = __openFiles[fid];
530      affirmNotRO('xWrite', fh);
531      rc = (
532        n === (await getSyncHandle(fh))
533          .write(fh.sabView.subarray(0, n),
534                 {at: Number(offset64)})
535      ) ? 0 : state.sq3Codes.SQLITE_IOERR_WRITE;
536    }catch(e){
537      error("xWrite():",e,fh);
538      state.s11n.storeException(1,e);
539      rc = state.sq3Codes.SQLITE_IOERR_WRITE;
540    }
541    wTimeEnd();
542    storeAndNotify('xWrite',rc);
543    mTimeEnd();
544  }
545}/*vfsAsyncImpls*/;
546
547const initS11n = ()=>{
548  /**
549     ACHTUNG: this code is 100% duplicated in the other half of this
550     proxy! The documentation is maintained in the "synchronous half".
551  */
552  if(state.s11n) return state.s11n;
553  const textDecoder = new TextDecoder(),
554  textEncoder = new TextEncoder('utf-8'),
555  viewU8 = new Uint8Array(state.sabIO, state.sabS11nOffset, state.sabS11nSize),
556  viewDV = new DataView(state.sabIO, state.sabS11nOffset, state.sabS11nSize);
557  state.s11n = Object.create(null);
558  const TypeIds = Object.create(null);
559  TypeIds.number  = { id: 1, size: 8, getter: 'getFloat64', setter: 'setFloat64' };
560  TypeIds.bigint  = { id: 2, size: 8, getter: 'getBigInt64', setter: 'setBigInt64' };
561  TypeIds.boolean = { id: 3, size: 4, getter: 'getInt32', setter: 'setInt32' };
562  TypeIds.string =  { id: 4 };
563  const getTypeId = (v)=>(
564    TypeIds[typeof v]
565      || toss("Maintenance required: this value type cannot be serialized.",v)
566  );
567  const getTypeIdById = (tid)=>{
568    switch(tid){
569      case TypeIds.number.id: return TypeIds.number;
570      case TypeIds.bigint.id: return TypeIds.bigint;
571      case TypeIds.boolean.id: return TypeIds.boolean;
572      case TypeIds.string.id: return TypeIds.string;
573      default: toss("Invalid type ID:",tid);
574    }
575  };
576  state.s11n.deserialize = function(){
577    ++metrics.s11n.deserialize.count;
578    const t = performance.now();
579    const argc = viewU8[0];
580    const rc = argc ? [] : null;
581    if(argc){
582      const typeIds = [];
583      let offset = 1, i, n, v;
584      for(i = 0; i < argc; ++i, ++offset){
585        typeIds.push(getTypeIdById(viewU8[offset]));
586      }
587      for(i = 0; i < argc; ++i){
588        const t = typeIds[i];
589        if(t.getter){
590          v = viewDV[t.getter](offset, state.littleEndian);
591          offset += t.size;
592        }else{/*String*/
593          n = viewDV.getInt32(offset, state.littleEndian);
594          offset += 4;
595          v = textDecoder.decode(viewU8.slice(offset, offset+n));
596          offset += n;
597        }
598        rc.push(v);
599      }
600    }
601    //log("deserialize:",argc, rc);
602    metrics.s11n.deserialize.time += performance.now() - t;
603    return rc;
604  };
605  state.s11n.serialize = function(...args){
606    const t = performance.now();
607    ++metrics.s11n.serialize.count;
608    if(args.length){
609      //log("serialize():",args);
610      const typeIds = [];
611      let i = 0, offset = 1;
612      viewU8[0] = args.length & 0xff /* header = # of args */;
613      for(; i < args.length; ++i, ++offset){
614        /* Write the TypeIds.id value into the next args.length
615           bytes. */
616        typeIds.push(getTypeId(args[i]));
617        viewU8[offset] = typeIds[i].id;
618      }
619      for(i = 0; i < args.length; ++i) {
620        /* Deserialize the following bytes based on their
621           corresponding TypeIds.id from the header. */
622        const t = typeIds[i];
623        if(t.setter){
624          viewDV[t.setter](offset, args[i], state.littleEndian);
625          offset += t.size;
626        }else{/*String*/
627          const s = textEncoder.encode(args[i]);
628          viewDV.setInt32(offset, s.byteLength, state.littleEndian);
629          offset += 4;
630          viewU8.set(s, offset);
631          offset += s.byteLength;
632        }
633      }
634      //log("serialize() result:",viewU8.slice(0,offset));
635    }else{
636      viewU8[0] = 0;
637    }
638    metrics.s11n.serialize.time += performance.now() - t;
639  };
640
641  state.s11n.storeException = state.asyncS11nExceptions
642    ? ((priority,e)=>{
643      if(priority<=state.asyncS11nExceptions){
644        state.s11n.serialize(e.message);
645      }
646    })
647    : ()=>{};
648
649  return state.s11n;
650}/*initS11n()*/;
651
652const waitLoop = async function f(){
653  const opHandlers = Object.create(null);
654  for(let k of Object.keys(state.opIds)){
655    const vi = vfsAsyncImpls[k];
656    if(!vi) continue;
657    const o = Object.create(null);
658    opHandlers[state.opIds[k]] = o;
659    o.key = k;
660    o.f = vi;
661  }
662  /**
663     waitTime is how long (ms) to wait for each Atomics.wait().
664     We need to wake up periodically to give the thread a chance
665     to do other things.
666  */
667  const waitTime = 1000;
668  while(!flagAsyncShutdown){
669    try {
670      if('timed-out'===Atomics.wait(
671        state.sabOPView, state.opIds.whichOp, 0, waitTime
672      )){
673        continue;
674      }
675      const opId = Atomics.load(state.sabOPView, state.opIds.whichOp);
676      Atomics.store(state.sabOPView, state.opIds.whichOp, 0);
677      const hnd = opHandlers[opId] ?? toss("No waitLoop handler for whichOp #",opId);
678      const args = state.s11n.deserialize() || [];
679      state.s11n.serialize(/* clear s11n to keep the caller from
680                              confusing this with an exception string
681                              written by the upcoming operation */);
682      //warn("waitLoop() whichOp =",opId, hnd, args);
683      if(hnd.f) await hnd.f(...args);
684      else error("Missing callback for opId",opId);
685    }catch(e){
686      error('in waitLoop():',e);
687    }
688  }
689};
690
691navigator.storage.getDirectory().then(function(d){
692  const wMsg = (type)=>postMessage({type});
693  state.rootDir = d;
694  self.onmessage = function({data}){
695    switch(data.type){
696        case 'opfs-async-init':{
697          /* Receive shared state from synchronous partner */
698          const opt = data.args;
699          state.littleEndian = opt.littleEndian;
700          state.asyncS11nExceptions = opt.asyncS11nExceptions;
701          state.verbose = opt.verbose ?? 2;
702          state.fileBufferSize = opt.fileBufferSize;
703          state.sabS11nOffset = opt.sabS11nOffset;
704          state.sabS11nSize = opt.sabS11nSize;
705          state.sabOP = opt.sabOP;
706          state.sabOPView = new Int32Array(state.sabOP);
707          state.sabIO = opt.sabIO;
708          state.sabFileBufView = new Uint8Array(state.sabIO, 0, state.fileBufferSize);
709          state.sabS11nView = new Uint8Array(state.sabIO, state.sabS11nOffset, state.sabS11nSize);
710          state.opIds = opt.opIds;
711          state.sq3Codes = opt.sq3Codes;
712          Object.keys(vfsAsyncImpls).forEach((k)=>{
713            if(!Number.isFinite(state.opIds[k])){
714              toss("Maintenance required: missing state.opIds[",k,"]");
715            }
716          });
717          initS11n();
718          metrics.reset();
719          log("init state",state);
720          wMsg('opfs-async-inited');
721          waitLoop();
722          break;
723        }
724        case 'opfs-async-restart':
725          if(flagAsyncShutdown){
726            warn("Restarting after opfs-async-shutdown. Might or might not work.");
727            flagAsyncShutdown = false;
728            waitLoop();
729          }
730          break;
731        case 'opfs-async-metrics':
732          metrics.dump();
733          break;
734    }
735  };
736  wMsg('opfs-async-loaded');
737}).catch((e)=>error("error initializing OPFS asyncer:",e));
738