##// END OF EJS Templates
transaction: add a `hasfinalize` method...
marmoute -
r44508:8e095512 default
parent child Browse files
Show More
@@ -1,712 +1,718 b''
1 1 # transaction.py - simple journaling scheme for mercurial
2 2 #
3 3 # This transaction scheme is intended to gracefully handle program
4 4 # errors and interruptions. More serious failures like system crashes
5 5 # can be recovered with an fsck-like tool. As the whole repository is
6 6 # effectively log-structured, this should amount to simply truncating
7 7 # anything that isn't referenced in the changelog.
8 8 #
9 9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 10 #
11 11 # This software may be used and distributed according to the terms of the
12 12 # GNU General Public License version 2 or any later version.
13 13
14 14 from __future__ import absolute_import
15 15
16 16 import errno
17 17
18 18 from .i18n import _
19 19 from . import (
20 20 error,
21 21 pycompat,
22 22 util,
23 23 )
24 24 from .utils import stringutil
25 25
26 26 version = 2
27 27
28 28 # These are the file generators that should only be executed after the
29 29 # finalizers are done, since they rely on the output of the finalizers (like
30 30 # the changelog having been written).
31 31 postfinalizegenerators = {b'bookmarks', b'dirstate'}
32 32
33 33 gengroupall = b'all'
34 34 gengroupprefinalize = b'prefinalize'
35 35 gengrouppostfinalize = b'postfinalize'
36 36
37 37
38 38 def active(func):
39 39 def _active(self, *args, **kwds):
40 40 if self._count == 0:
41 41 raise error.Abort(
42 42 _(
43 43 b'cannot use transaction when it is already committed/aborted'
44 44 )
45 45 )
46 46 return func(self, *args, **kwds)
47 47
48 48 return _active
49 49
50 50
51 51 def _playback(
52 52 journal,
53 53 report,
54 54 opener,
55 55 vfsmap,
56 56 entries,
57 57 backupentries,
58 58 unlink=True,
59 59 checkambigfiles=None,
60 60 ):
61 61 for f, o, _ignore in entries:
62 62 if o or not unlink:
63 63 checkambig = checkambigfiles and (f, b'') in checkambigfiles
64 64 try:
65 65 fp = opener(f, b'a', checkambig=checkambig)
66 66 if fp.tell() < o:
67 67 raise error.Abort(
68 68 _(
69 69 b"attempted to truncate %s to %d bytes, but it was "
70 70 b"already %d bytes\n"
71 71 )
72 72 % (f, o, fp.tell())
73 73 )
74 74 fp.truncate(o)
75 75 fp.close()
76 76 except IOError:
77 77 report(_(b"failed to truncate %s\n") % f)
78 78 raise
79 79 else:
80 80 try:
81 81 opener.unlink(f)
82 82 except (IOError, OSError) as inst:
83 83 if inst.errno != errno.ENOENT:
84 84 raise
85 85
86 86 backupfiles = []
87 87 for l, f, b, c in backupentries:
88 88 if l not in vfsmap and c:
89 89 report(b"couldn't handle %s: unknown cache location %s\n" % (b, l))
90 90 vfs = vfsmap[l]
91 91 try:
92 92 if f and b:
93 93 filepath = vfs.join(f)
94 94 backuppath = vfs.join(b)
95 95 checkambig = checkambigfiles and (f, l) in checkambigfiles
96 96 try:
97 97 util.copyfile(backuppath, filepath, checkambig=checkambig)
98 98 backupfiles.append(b)
99 99 except IOError:
100 100 report(_(b"failed to recover %s\n") % f)
101 101 else:
102 102 target = f or b
103 103 try:
104 104 vfs.unlink(target)
105 105 except (IOError, OSError) as inst:
106 106 if inst.errno != errno.ENOENT:
107 107 raise
108 108 except (IOError, OSError, error.Abort):
109 109 if not c:
110 110 raise
111 111
112 112 backuppath = b"%s.backupfiles" % journal
113 113 if opener.exists(backuppath):
114 114 opener.unlink(backuppath)
115 115 opener.unlink(journal)
116 116 try:
117 117 for f in backupfiles:
118 118 if opener.exists(f):
119 119 opener.unlink(f)
120 120 except (IOError, OSError, error.Abort):
121 121 # only pure backup file remains, it is sage to ignore any error
122 122 pass
123 123
124 124
125 125 class transaction(util.transactional):
126 126 def __init__(
127 127 self,
128 128 report,
129 129 opener,
130 130 vfsmap,
131 131 journalname,
132 132 undoname=None,
133 133 after=None,
134 134 createmode=None,
135 135 validator=None,
136 136 releasefn=None,
137 137 checkambigfiles=None,
138 138 name='<unnamed>',
139 139 ):
140 140 """Begin a new transaction
141 141
142 142 Begins a new transaction that allows rolling back writes in the event of
143 143 an exception.
144 144
145 145 * `after`: called after the transaction has been committed
146 146 * `createmode`: the mode of the journal file that will be created
147 147 * `releasefn`: called after releasing (with transaction and result)
148 148
149 149 `checkambigfiles` is a set of (path, vfs-location) tuples,
150 150 which determine whether file stat ambiguity should be avoided
151 151 for corresponded files.
152 152 """
153 153 self._count = 1
154 154 self._usages = 1
155 155 self._report = report
156 156 # a vfs to the store content
157 157 self._opener = opener
158 158 # a map to access file in various {location -> vfs}
159 159 vfsmap = vfsmap.copy()
160 160 vfsmap[b''] = opener # set default value
161 161 self._vfsmap = vfsmap
162 162 self._after = after
163 163 self._entries = []
164 164 self._map = {}
165 165 self._journal = journalname
166 166 self._undoname = undoname
167 167 self._queue = []
168 168 # A callback to validate transaction content before closing it.
169 169 # should raise exception is anything is wrong.
170 170 # target user is repository hooks.
171 171 if validator is None:
172 172 validator = lambda tr: None
173 173 self._validator = validator
174 174 # A callback to do something just after releasing transaction.
175 175 if releasefn is None:
176 176 releasefn = lambda tr, success: None
177 177 self._releasefn = releasefn
178 178
179 179 self._checkambigfiles = set()
180 180 if checkambigfiles:
181 181 self._checkambigfiles.update(checkambigfiles)
182 182
183 183 self._names = [name]
184 184
185 185 # A dict dedicated to precisely tracking the changes introduced in the
186 186 # transaction.
187 187 self.changes = {}
188 188
189 189 # a dict of arguments to be passed to hooks
190 190 self.hookargs = {}
191 191 self._file = opener.open(self._journal, b"w")
192 192
193 193 # a list of ('location', 'path', 'backuppath', cache) entries.
194 194 # - if 'backuppath' is empty, no file existed at backup time
195 195 # - if 'path' is empty, this is a temporary transaction file
196 196 # - if 'location' is not empty, the path is outside main opener reach.
197 197 # use 'location' value as a key in a vfsmap to find the right 'vfs'
198 198 # (cache is currently unused)
199 199 self._backupentries = []
200 200 self._backupmap = {}
201 201 self._backupjournal = b"%s.backupfiles" % self._journal
202 202 self._backupsfile = opener.open(self._backupjournal, b'w')
203 203 self._backupsfile.write(b'%d\n' % version)
204 204
205 205 if createmode is not None:
206 206 opener.chmod(self._journal, createmode & 0o666)
207 207 opener.chmod(self._backupjournal, createmode & 0o666)
208 208
209 209 # hold file generations to be performed on commit
210 210 self._filegenerators = {}
211 211 # hold callback to write pending data for hooks
212 212 self._pendingcallback = {}
213 213 # True is any pending data have been written ever
214 214 self._anypending = False
215 215 # holds callback to call when writing the transaction
216 216 self._finalizecallback = {}
217 217 # hold callback for post transaction close
218 218 self._postclosecallback = {}
219 219 # holds callbacks to call during abort
220 220 self._abortcallback = {}
221 221
222 222 def __repr__(self):
223 223 name = '/'.join(self._names)
224 224 return '<transaction name=%s, count=%d, usages=%d>' % (
225 225 name,
226 226 self._count,
227 227 self._usages,
228 228 )
229 229
230 230 def __del__(self):
231 231 if self._journal:
232 232 self._abort()
233 233
234 234 @active
235 235 def startgroup(self):
236 236 """delay registration of file entry
237 237
238 238 This is used by strip to delay vision of strip offset. The transaction
239 239 sees either none or all of the strip actions to be done."""
240 240 self._queue.append([])
241 241
242 242 @active
243 243 def endgroup(self):
244 244 """apply delayed registration of file entry.
245 245
246 246 This is used by strip to delay vision of strip offset. The transaction
247 247 sees either none or all of the strip actions to be done."""
248 248 q = self._queue.pop()
249 249 for f, o, data in q:
250 250 self._addentry(f, o, data)
251 251
252 252 @active
253 253 def add(self, file, offset, data=None):
254 254 """record the state of an append-only file before update"""
255 255 if file in self._map or file in self._backupmap:
256 256 return
257 257 if self._queue:
258 258 self._queue[-1].append((file, offset, data))
259 259 return
260 260
261 261 self._addentry(file, offset, data)
262 262
263 263 def _addentry(self, file, offset, data):
264 264 """add a append-only entry to memory and on-disk state"""
265 265 if file in self._map or file in self._backupmap:
266 266 return
267 267 self._entries.append((file, offset, data))
268 268 self._map[file] = len(self._entries) - 1
269 269 # add enough data to the journal to do the truncate
270 270 self._file.write(b"%s\0%d\n" % (file, offset))
271 271 self._file.flush()
272 272
273 273 @active
274 274 def addbackup(self, file, hardlink=True, location=b''):
275 275 """Adds a backup of the file to the transaction
276 276
277 277 Calling addbackup() creates a hardlink backup of the specified file
278 278 that is used to recover the file in the event of the transaction
279 279 aborting.
280 280
281 281 * `file`: the file path, relative to .hg/store
282 282 * `hardlink`: use a hardlink to quickly create the backup
283 283 """
284 284 if self._queue:
285 285 msg = b'cannot use transaction.addbackup inside "group"'
286 286 raise error.ProgrammingError(msg)
287 287
288 288 if file in self._map or file in self._backupmap:
289 289 return
290 290 vfs = self._vfsmap[location]
291 291 dirname, filename = vfs.split(file)
292 292 backupfilename = b"%s.backup.%s" % (self._journal, filename)
293 293 backupfile = vfs.reljoin(dirname, backupfilename)
294 294 if vfs.exists(file):
295 295 filepath = vfs.join(file)
296 296 backuppath = vfs.join(backupfile)
297 297 util.copyfile(filepath, backuppath, hardlink=hardlink)
298 298 else:
299 299 backupfile = b''
300 300
301 301 self._addbackupentry((location, file, backupfile, False))
302 302
303 303 def _addbackupentry(self, entry):
304 304 """register a new backup entry and write it to disk"""
305 305 self._backupentries.append(entry)
306 306 self._backupmap[entry[1]] = len(self._backupentries) - 1
307 307 self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry)
308 308 self._backupsfile.flush()
309 309
310 310 @active
311 311 def registertmp(self, tmpfile, location=b''):
312 312 """register a temporary transaction file
313 313
314 314 Such files will be deleted when the transaction exits (on both
315 315 failure and success).
316 316 """
317 317 self._addbackupentry((location, b'', tmpfile, False))
318 318
319 319 @active
320 320 def addfilegenerator(
321 321 self, genid, filenames, genfunc, order=0, location=b''
322 322 ):
323 323 """add a function to generates some files at transaction commit
324 324
325 325 The `genfunc` argument is a function capable of generating proper
326 326 content of each entry in the `filename` tuple.
327 327
328 328 At transaction close time, `genfunc` will be called with one file
329 329 object argument per entries in `filenames`.
330 330
331 331 The transaction itself is responsible for the backup, creation and
332 332 final write of such file.
333 333
334 334 The `genid` argument is used to ensure the same set of file is only
335 335 generated once. Call to `addfilegenerator` for a `genid` already
336 336 present will overwrite the old entry.
337 337
338 338 The `order` argument may be used to control the order in which multiple
339 339 generator will be executed.
340 340
341 341 The `location` arguments may be used to indicate the files are located
342 342 outside of the the standard directory for transaction. It should match
343 343 one of the key of the `transaction.vfsmap` dictionary.
344 344 """
345 345 # For now, we are unable to do proper backup and restore of custom vfs
346 346 # but for bookmarks that are handled outside this mechanism.
347 347 self._filegenerators[genid] = (order, filenames, genfunc, location)
348 348
349 349 @active
350 350 def removefilegenerator(self, genid):
351 351 """reverse of addfilegenerator, remove a file generator function"""
352 352 if genid in self._filegenerators:
353 353 del self._filegenerators[genid]
354 354
355 355 def _generatefiles(self, suffix=b'', group=gengroupall):
356 356 # write files registered for generation
357 357 any = False
358 358 for id, entry in sorted(pycompat.iteritems(self._filegenerators)):
359 359 any = True
360 360 order, filenames, genfunc, location = entry
361 361
362 362 # for generation at closing, check if it's before or after finalize
363 363 postfinalize = group == gengrouppostfinalize
364 364 if (
365 365 group != gengroupall
366 366 and (id in postfinalizegenerators) != postfinalize
367 367 ):
368 368 continue
369 369
370 370 vfs = self._vfsmap[location]
371 371 files = []
372 372 try:
373 373 for name in filenames:
374 374 name += suffix
375 375 if suffix:
376 376 self.registertmp(name, location=location)
377 377 checkambig = False
378 378 else:
379 379 self.addbackup(name, location=location)
380 380 checkambig = (name, location) in self._checkambigfiles
381 381 files.append(
382 382 vfs(name, b'w', atomictemp=True, checkambig=checkambig)
383 383 )
384 384 genfunc(*files)
385 385 for f in files:
386 386 f.close()
387 387 # skip discard() loop since we're sure no open file remains
388 388 del files[:]
389 389 finally:
390 390 for f in files:
391 391 f.discard()
392 392 return any
393 393
394 394 @active
395 395 def find(self, file):
396 396 if file in self._map:
397 397 return self._entries[self._map[file]]
398 398 if file in self._backupmap:
399 399 return self._backupentries[self._backupmap[file]]
400 400 return None
401 401
402 402 @active
403 403 def replace(self, file, offset, data=None):
404 404 '''
405 405 replace can only replace already committed entries
406 406 that are not pending in the queue
407 407 '''
408 408
409 409 if file not in self._map:
410 410 raise KeyError(file)
411 411 index = self._map[file]
412 412 self._entries[index] = (file, offset, data)
413 413 self._file.write(b"%s\0%d\n" % (file, offset))
414 414 self._file.flush()
415 415
416 416 @active
417 417 def nest(self, name='<unnamed>'):
418 418 self._count += 1
419 419 self._usages += 1
420 420 self._names.append(name)
421 421 return self
422 422
423 423 def release(self):
424 424 if self._count > 0:
425 425 self._usages -= 1
426 426 if self._names:
427 427 self._names.pop()
428 428 # if the transaction scopes are left without being closed, fail
429 429 if self._count > 0 and self._usages == 0:
430 430 self._abort()
431 431
432 432 def running(self):
433 433 return self._count > 0
434 434
435 435 def addpending(self, category, callback):
436 436 """add a callback to be called when the transaction is pending
437 437
438 438 The transaction will be given as callback's first argument.
439 439
440 440 Category is a unique identifier to allow overwriting an old callback
441 441 with a newer callback.
442 442 """
443 443 self._pendingcallback[category] = callback
444 444
445 445 @active
446 446 def writepending(self):
447 447 '''write pending file to temporary version
448 448
449 449 This is used to allow hooks to view a transaction before commit'''
450 450 categories = sorted(self._pendingcallback)
451 451 for cat in categories:
452 452 # remove callback since the data will have been flushed
453 453 any = self._pendingcallback.pop(cat)(self)
454 454 self._anypending = self._anypending or any
455 455 self._anypending |= self._generatefiles(suffix=b'.pending')
456 456 return self._anypending
457 457
458 458 @active
459 def hasfinalize(self, category):
460 """check is a callback already exist for a category
461 """
462 return category in self._finalizecallback
463
464 @active
459 465 def addfinalize(self, category, callback):
460 466 """add a callback to be called when the transaction is closed
461 467
462 468 The transaction will be given as callback's first argument.
463 469
464 470 Category is a unique identifier to allow overwriting old callbacks with
465 471 newer callbacks.
466 472 """
467 473 self._finalizecallback[category] = callback
468 474
469 475 @active
470 476 def addpostclose(self, category, callback):
471 477 """add or replace a callback to be called after the transaction closed
472 478
473 479 The transaction will be given as callback's first argument.
474 480
475 481 Category is a unique identifier to allow overwriting an old callback
476 482 with a newer callback.
477 483 """
478 484 self._postclosecallback[category] = callback
479 485
480 486 @active
481 487 def getpostclose(self, category):
482 488 """return a postclose callback added before, or None"""
483 489 return self._postclosecallback.get(category, None)
484 490
485 491 @active
486 492 def addabort(self, category, callback):
487 493 """add a callback to be called when the transaction is aborted.
488 494
489 495 The transaction will be given as the first argument to the callback.
490 496
491 497 Category is a unique identifier to allow overwriting an old callback
492 498 with a newer callback.
493 499 """
494 500 self._abortcallback[category] = callback
495 501
496 502 @active
497 503 def close(self):
498 504 '''commit the transaction'''
499 505 if self._count == 1:
500 506 self._validator(self) # will raise exception if needed
501 507 self._validator = None # Help prevent cycles.
502 508 self._generatefiles(group=gengroupprefinalize)
503 509 categories = sorted(self._finalizecallback)
504 510 for cat in categories:
505 511 self._finalizecallback[cat](self)
506 512 # Prevent double usage and help clear cycles.
507 513 self._finalizecallback = None
508 514 self._generatefiles(group=gengrouppostfinalize)
509 515
510 516 self._count -= 1
511 517 if self._count != 0:
512 518 return
513 519 self._file.close()
514 520 self._backupsfile.close()
515 521 # cleanup temporary files
516 522 for l, f, b, c in self._backupentries:
517 523 if l not in self._vfsmap and c:
518 524 self._report(
519 525 b"couldn't remove %s: unknown cache location %s\n" % (b, l)
520 526 )
521 527 continue
522 528 vfs = self._vfsmap[l]
523 529 if not f and b and vfs.exists(b):
524 530 try:
525 531 vfs.unlink(b)
526 532 except (IOError, OSError, error.Abort) as inst:
527 533 if not c:
528 534 raise
529 535 # Abort may be raise by read only opener
530 536 self._report(
531 537 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
532 538 )
533 539 self._entries = []
534 540 self._writeundo()
535 541 if self._after:
536 542 self._after()
537 543 self._after = None # Help prevent cycles.
538 544 if self._opener.isfile(self._backupjournal):
539 545 self._opener.unlink(self._backupjournal)
540 546 if self._opener.isfile(self._journal):
541 547 self._opener.unlink(self._journal)
542 548 for l, _f, b, c in self._backupentries:
543 549 if l not in self._vfsmap and c:
544 550 self._report(
545 551 b"couldn't remove %s: unknown cache location"
546 552 b"%s\n" % (b, l)
547 553 )
548 554 continue
549 555 vfs = self._vfsmap[l]
550 556 if b and vfs.exists(b):
551 557 try:
552 558 vfs.unlink(b)
553 559 except (IOError, OSError, error.Abort) as inst:
554 560 if not c:
555 561 raise
556 562 # Abort may be raise by read only opener
557 563 self._report(
558 564 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
559 565 )
560 566 self._backupentries = []
561 567 self._journal = None
562 568
563 569 self._releasefn(self, True) # notify success of closing transaction
564 570 self._releasefn = None # Help prevent cycles.
565 571
566 572 # run post close action
567 573 categories = sorted(self._postclosecallback)
568 574 for cat in categories:
569 575 self._postclosecallback[cat](self)
570 576 # Prevent double usage and help clear cycles.
571 577 self._postclosecallback = None
572 578
573 579 @active
574 580 def abort(self):
575 581 '''abort the transaction (generally called on error, or when the
576 582 transaction is not explicitly committed before going out of
577 583 scope)'''
578 584 self._abort()
579 585
580 586 def _writeundo(self):
581 587 """write transaction data for possible future undo call"""
582 588 if self._undoname is None:
583 589 return
584 590 undobackupfile = self._opener.open(
585 591 b"%s.backupfiles" % self._undoname, b'w'
586 592 )
587 593 undobackupfile.write(b'%d\n' % version)
588 594 for l, f, b, c in self._backupentries:
589 595 if not f: # temporary file
590 596 continue
591 597 if not b:
592 598 u = b''
593 599 else:
594 600 if l not in self._vfsmap and c:
595 601 self._report(
596 602 b"couldn't remove %s: unknown cache location"
597 603 b"%s\n" % (b, l)
598 604 )
599 605 continue
600 606 vfs = self._vfsmap[l]
601 607 base, name = vfs.split(b)
602 608 assert name.startswith(self._journal), name
603 609 uname = name.replace(self._journal, self._undoname, 1)
604 610 u = vfs.reljoin(base, uname)
605 611 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
606 612 undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c))
607 613 undobackupfile.close()
608 614
609 615 def _abort(self):
610 616 self._count = 0
611 617 self._usages = 0
612 618 self._file.close()
613 619 self._backupsfile.close()
614 620
615 621 try:
616 622 if not self._entries and not self._backupentries:
617 623 if self._backupjournal:
618 624 self._opener.unlink(self._backupjournal)
619 625 if self._journal:
620 626 self._opener.unlink(self._journal)
621 627 return
622 628
623 629 self._report(_(b"transaction abort!\n"))
624 630
625 631 try:
626 632 for cat in sorted(self._abortcallback):
627 633 self._abortcallback[cat](self)
628 634 # Prevent double usage and help clear cycles.
629 635 self._abortcallback = None
630 636 _playback(
631 637 self._journal,
632 638 self._report,
633 639 self._opener,
634 640 self._vfsmap,
635 641 self._entries,
636 642 self._backupentries,
637 643 False,
638 644 checkambigfiles=self._checkambigfiles,
639 645 )
640 646 self._report(_(b"rollback completed\n"))
641 647 except BaseException as exc:
642 648 self._report(_(b"rollback failed - please run hg recover\n"))
643 649 self._report(
644 650 _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc)
645 651 )
646 652 finally:
647 653 self._journal = None
648 654 self._releasefn(self, False) # notify failure of transaction
649 655 self._releasefn = None # Help prevent cycles.
650 656
651 657
652 658 def rollback(opener, vfsmap, file, report, checkambigfiles=None):
653 659 """Rolls back the transaction contained in the given file
654 660
655 661 Reads the entries in the specified file, and the corresponding
656 662 '*.backupfiles' file, to recover from an incomplete transaction.
657 663
658 664 * `file`: a file containing a list of entries, specifying where
659 665 to truncate each file. The file should contain a list of
660 666 file\0offset pairs, delimited by newlines. The corresponding
661 667 '*.backupfiles' file should contain a list of file\0backupfile
662 668 pairs, delimited by \0.
663 669
664 670 `checkambigfiles` is a set of (path, vfs-location) tuples,
665 671 which determine whether file stat ambiguity should be avoided at
666 672 restoring corresponded files.
667 673 """
668 674 entries = []
669 675 backupentries = []
670 676
671 677 fp = opener.open(file)
672 678 lines = fp.readlines()
673 679 fp.close()
674 680 for l in lines:
675 681 try:
676 682 f, o = l.split(b'\0')
677 683 entries.append((f, int(o), None))
678 684 except ValueError:
679 685 report(
680 686 _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l)
681 687 )
682 688
683 689 backupjournal = b"%s.backupfiles" % file
684 690 if opener.exists(backupjournal):
685 691 fp = opener.open(backupjournal)
686 692 lines = fp.readlines()
687 693 if lines:
688 694 ver = lines[0][:-1]
689 695 if ver == (b'%d' % version):
690 696 for line in lines[1:]:
691 697 if line:
692 698 # Shave off the trailing newline
693 699 line = line[:-1]
694 700 l, f, b, c = line.split(b'\0')
695 701 backupentries.append((l, f, b, bool(c)))
696 702 else:
697 703 report(
698 704 _(
699 705 b"journal was created by a different version of "
700 706 b"Mercurial\n"
701 707 )
702 708 )
703 709
704 710 _playback(
705 711 file,
706 712 report,
707 713 opener,
708 714 vfsmap,
709 715 entries,
710 716 backupentries,
711 717 checkambigfiles=checkambigfiles,
712 718 )
General Comments 0
You need to be logged in to leave comments. Login now