##// END OF EJS Templates
typing: disable an attribute-error warning in the journal extension...
Matt Harbison -
r50755:d5116e4d default
parent child Browse files
Show More
@@ -1,603 +1,607 b''
1 1 # journal.py
2 2 #
3 3 # Copyright 2014-2016 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 """track previous positions of bookmarks (EXPERIMENTAL)
8 8
9 9 This extension adds a new command: `hg journal`, which shows you where
10 10 bookmarks were previously located.
11 11
12 12 """
13 13
14 14
15 15 import collections
16 16 import os
17 17 import weakref
18 18
19 19 from mercurial.i18n import _
20 20 from mercurial.node import (
21 21 bin,
22 22 hex,
23 23 )
24 24
25 25 from mercurial import (
26 26 bookmarks,
27 27 cmdutil,
28 28 dispatch,
29 29 encoding,
30 30 error,
31 31 extensions,
32 32 hg,
33 33 localrepo,
34 34 lock,
35 35 logcmdutil,
36 36 pycompat,
37 37 registrar,
38 38 util,
39 39 )
40 40 from mercurial.utils import (
41 41 dateutil,
42 42 procutil,
43 43 stringutil,
44 44 )
45 45
46 46 cmdtable = {}
47 47 command = registrar.command(cmdtable)
48 48
49 49 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
50 50 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
51 51 # be specifying the version(s) of Mercurial they are tested with, or
52 52 # leave the attribute unspecified.
53 53 testedwith = b'ships-with-hg-core'
54 54
55 55 # storage format version; increment when the format changes
56 56 storageversion = 0
57 57
58 58 # namespaces
59 59 bookmarktype = b'bookmark'
60 60 wdirparenttype = b'wdirparent'
61 61 # In a shared repository, what shared feature name is used
62 62 # to indicate this namespace is shared with the source?
63 63 sharednamespaces = {
64 64 bookmarktype: hg.sharedbookmarks,
65 65 }
66 66
67 67 # Journal recording, register hooks and storage object
68 68 def extsetup(ui):
69 69 extensions.wrapfunction(dispatch, b'runcommand', runcommand)
70 70 extensions.wrapfunction(bookmarks.bmstore, b'_write', recordbookmarks)
71 71 extensions.wrapfilecache(
72 72 localrepo.localrepository, b'dirstate', wrapdirstate
73 73 )
74 74 extensions.wrapfunction(hg, b'postshare', wrappostshare)
75 75 extensions.wrapfunction(hg, b'copystore', unsharejournal)
76 76
77 77
78 78 def reposetup(ui, repo):
79 79 if repo.local():
80 80 repo.journal = journalstorage(repo)
81 81 repo._wlockfreeprefix.add(b'namejournal')
82 82
83 83 dirstate, cached = localrepo.isfilecached(repo, b'dirstate')
84 84 if cached:
85 85 # already instantiated dirstate isn't yet marked as
86 86 # "journal"-ing, even though repo.dirstate() was already
87 87 # wrapped by own wrapdirstate()
88 88 _setupdirstate(repo, dirstate)
89 89
90 90
91 91 def runcommand(orig, lui, repo, cmd, fullargs, *args):
92 92 """Track the command line options for recording in the journal"""
93 93 journalstorage.recordcommand(*fullargs)
94 94 return orig(lui, repo, cmd, fullargs, *args)
95 95
96 96
97 97 def _setupdirstate(repo, dirstate):
98 98 dirstate.journalstorage = repo.journal
99 99 dirstate.addparentchangecallback(b'journal', recorddirstateparents)
100 100
101 101
102 102 # hooks to record dirstate changes
103 103 def wrapdirstate(orig, repo):
104 104 """Make journal storage available to the dirstate object"""
105 105 dirstate = orig(repo)
106 106 if util.safehasattr(repo, 'journal'):
107 107 _setupdirstate(repo, dirstate)
108 108 return dirstate
109 109
110 110
111 111 def recorddirstateparents(dirstate, old, new):
112 112 """Records all dirstate parent changes in the journal."""
113 113 old = list(old)
114 114 new = list(new)
115 115 if util.safehasattr(dirstate, 'journalstorage'):
116 116 # only record two hashes if there was a merge
117 117 oldhashes = old[:1] if old[1] == dirstate._nodeconstants.nullid else old
118 118 newhashes = new[:1] if new[1] == dirstate._nodeconstants.nullid else new
119 119 dirstate.journalstorage.record(
120 120 wdirparenttype, b'.', oldhashes, newhashes
121 121 )
122 122
123 123
124 124 # hooks to record bookmark changes (both local and remote)
125 125 def recordbookmarks(orig, store, fp):
126 126 """Records all bookmark changes in the journal."""
127 127 repo = store._repo
128 128 if util.safehasattr(repo, 'journal'):
129 129 oldmarks = bookmarks.bmstore(repo)
130 130 for mark, value in store.items():
131 131 oldvalue = oldmarks.get(mark, repo.nullid)
132 132 if value != oldvalue:
133 133 repo.journal.record(bookmarktype, mark, oldvalue, value)
134 134 return orig(store, fp)
135 135
136 136
137 137 # shared repository support
138 138 def _readsharedfeatures(repo):
139 139 """A set of shared features for this repository"""
140 140 try:
141 141 return set(repo.vfs.read(b'shared').splitlines())
142 142 except FileNotFoundError:
143 143 return set()
144 144
145 145
146 146 def _mergeentriesiter(*iterables, **kwargs):
147 147 """Given a set of sorted iterables, yield the next entry in merged order
148 148
149 149 Note that by default entries go from most recent to oldest.
150 150 """
151 151 order = kwargs.pop('order', max)
152 152 iterables = [iter(it) for it in iterables]
153 153 # this tracks still active iterables; iterables are deleted as they are
154 154 # exhausted, which is why this is a dictionary and why each entry also
155 155 # stores the key. Entries are mutable so we can store the next value each
156 156 # time.
157 157 iterable_map = {}
158 158 for key, it in enumerate(iterables):
159 159 try:
160 160 iterable_map[key] = [next(it), key, it]
161 161 except StopIteration:
162 162 # empty entry, can be ignored
163 163 pass
164 164
165 165 while iterable_map:
166 166 value, key, it = order(iterable_map.values())
167 167 yield value
168 168 try:
169 169 iterable_map[key][0] = next(it)
170 170 except StopIteration:
171 171 # this iterable is empty, remove it from consideration
172 172 del iterable_map[key]
173 173
174 174
175 175 def wrappostshare(orig, sourcerepo, destrepo, **kwargs):
176 176 """Mark this shared working copy as sharing journal information"""
177 177 with destrepo.wlock():
178 178 orig(sourcerepo, destrepo, **kwargs)
179 179 with destrepo.vfs(b'shared', b'a') as fp:
180 180 fp.write(b'journal\n')
181 181
182 182
183 183 def unsharejournal(orig, ui, repo, repopath):
184 184 """Copy shared journal entries into this repo when unsharing"""
185 185 if (
186 186 repo.path == repopath
187 187 and repo.shared()
188 188 and util.safehasattr(repo, 'journal')
189 189 ):
190 190 sharedrepo = hg.sharedreposource(repo)
191 191 sharedfeatures = _readsharedfeatures(repo)
192 192 if sharedrepo and sharedfeatures > {b'journal'}:
193 193 # there is a shared repository and there are shared journal entries
194 194 # to copy. move shared date over from source to destination but
195 195 # move the local file first
196 196 if repo.vfs.exists(b'namejournal'):
197 197 journalpath = repo.vfs.join(b'namejournal')
198 198 util.rename(journalpath, journalpath + b'.bak')
199 199 storage = repo.journal
200 200 local = storage._open(
201 201 repo.vfs, filename=b'namejournal.bak', _newestfirst=False
202 202 )
203 203 shared = (
204 204 e
205 205 for e in storage._open(sharedrepo.vfs, _newestfirst=False)
206 206 if sharednamespaces.get(e.namespace) in sharedfeatures
207 207 )
208 208 for entry in _mergeentriesiter(local, shared, order=min):
209 209 storage._write(repo.vfs, entry)
210 210
211 211 return orig(ui, repo, repopath)
212 212
213 213
214 214 class journalentry(
215 215 collections.namedtuple(
216 216 'journalentry',
217 217 'timestamp user command namespace name oldhashes newhashes',
218 218 )
219 219 ):
220 220 """Individual journal entry
221 221
222 222 * timestamp: a mercurial (time, timezone) tuple
223 223 * user: the username that ran the command
224 224 * namespace: the entry namespace, an opaque string
225 225 * name: the name of the changed item, opaque string with meaning in the
226 226 namespace
227 227 * command: the hg command that triggered this record
228 228 * oldhashes: a tuple of one or more binary hashes for the old location
229 229 * newhashes: a tuple of one or more binary hashes for the new location
230 230
231 231 Handles serialisation from and to the storage format. Fields are
232 232 separated by newlines, hashes are written out in hex separated by commas,
233 233 timestamp and timezone are separated by a space.
234 234
235 235 """
236 236
237 237 @classmethod
238 238 def fromstorage(cls, line):
239 239 (
240 240 time,
241 241 user,
242 242 command,
243 243 namespace,
244 244 name,
245 245 oldhashes,
246 246 newhashes,
247 247 ) = line.split(b'\n')
248 248 timestamp, tz = time.split()
249 249 timestamp, tz = float(timestamp), int(tz)
250 250 oldhashes = tuple(bin(hash) for hash in oldhashes.split(b','))
251 251 newhashes = tuple(bin(hash) for hash in newhashes.split(b','))
252 252 return cls(
253 253 (timestamp, tz),
254 254 user,
255 255 command,
256 256 namespace,
257 257 name,
258 258 oldhashes,
259 259 newhashes,
260 260 )
261 261
262 262 def __bytes__(self):
263 263 """bytes representation for storage"""
264 264 time = b' '.join(map(pycompat.bytestr, self.timestamp))
265 265 oldhashes = b','.join([hex(hash) for hash in self.oldhashes])
266 266 newhashes = b','.join([hex(hash) for hash in self.newhashes])
267 267 return b'\n'.join(
268 268 (
269 269 time,
270 270 self.user,
271 271 self.command,
272 272 self.namespace,
273 273 self.name,
274 274 oldhashes,
275 275 newhashes,
276 276 )
277 277 )
278 278
279 279 __str__ = encoding.strmethod(__bytes__)
280 280
281 281
282 282 class journalstorage:
283 283 """Storage for journal entries
284 284
285 285 Entries are divided over two files; one with entries that pertain to the
286 286 local working copy *only*, and one with entries that are shared across
287 287 multiple working copies when shared using the share extension.
288 288
289 289 Entries are stored with NUL bytes as separators. See the journalentry
290 290 class for the per-entry structure.
291 291
292 292 The file format starts with an integer version, delimited by a NUL.
293 293
294 294 This storage uses a dedicated lock; this makes it easier to avoid issues
295 295 with adding entries that added when the regular wlock is unlocked (e.g.
296 296 the dirstate).
297 297
298 298 """
299 299
300 300 _currentcommand = ()
301 301 _lockref = None
302 302
303 303 def __init__(self, repo):
304 304 self.user = procutil.getuser()
305 305 self.ui = repo.ui
306 306 self.vfs = repo.vfs
307 307
308 308 # is this working copy using a shared storage?
309 309 self.sharedfeatures = self.sharedvfs = None
310 310 if repo.shared():
311 311 features = _readsharedfeatures(repo)
312 312 sharedrepo = hg.sharedreposource(repo)
313 313 if sharedrepo is not None and b'journal' in features:
314 314 self.sharedvfs = sharedrepo.vfs
315 315 self.sharedfeatures = features
316 316
317 317 # track the current command for recording in journal entries
318 318 @property
319 319 def command(self):
320 320 commandstr = b' '.join(
321 321 map(procutil.shellquote, journalstorage._currentcommand)
322 322 )
323 323 if b'\n' in commandstr:
324 324 # truncate multi-line commands
325 325 commandstr = commandstr.partition(b'\n')[0] + b' ...'
326 326 return commandstr
327 327
328 328 @classmethod
329 329 def recordcommand(cls, *fullargs):
330 330 """Set the current hg arguments, stored with recorded entries"""
331 331 # Set the current command on the class because we may have started
332 332 # with a non-local repo (cloning for example).
333 333 cls._currentcommand = fullargs
334 334
335 335 def _currentlock(self, lockref):
336 336 """Returns the lock if it's held, or None if it's not.
337 337
338 338 (This is copied from the localrepo class)
339 339 """
340 340 if lockref is None:
341 341 return None
342 342 l = lockref()
343 343 if l is None or not l.held:
344 344 return None
345 345 return l
346 346
347 347 def jlock(self, vfs):
348 348 """Create a lock for the journal file"""
349 349 if self._currentlock(self._lockref) is not None:
350 350 raise error.Abort(_(b'journal lock does not support nesting'))
351 351 desc = _(b'journal of %s') % vfs.base
352 352 try:
353 353 l = lock.lock(vfs, b'namejournal.lock', 0, desc=desc)
354 354 except error.LockHeld as inst:
355 355 self.ui.warn(
356 356 _(b"waiting for lock on %s held by %r\n") % (desc, inst.locker)
357 357 )
358 358 # default to 600 seconds timeout
359 359 l = lock.lock(
360 360 vfs,
361 361 b'namejournal.lock',
362 362 self.ui.configint(b"ui", b"timeout"),
363 363 desc=desc,
364 364 )
365 365 self.ui.warn(_(b"got lock after %s seconds\n") % l.delay)
366 366 self._lockref = weakref.ref(l)
367 367 return l
368 368
369 369 def record(self, namespace, name, oldhashes, newhashes):
370 370 """Record a new journal entry
371 371
372 372 * namespace: an opaque string; this can be used to filter on the type
373 373 of recorded entries.
374 374 * name: the name defining this entry; for bookmarks, this is the
375 375 bookmark name. Can be filtered on when retrieving entries.
376 376 * oldhashes and newhashes: each a single binary hash, or a list of
377 377 binary hashes. These represent the old and new position of the named
378 378 item.
379 379
380 380 """
381 381 if not isinstance(oldhashes, list):
382 382 oldhashes = [oldhashes]
383 383 if not isinstance(newhashes, list):
384 384 newhashes = [newhashes]
385 385
386 386 entry = journalentry(
387 387 dateutil.makedate(),
388 388 self.user,
389 389 self.command,
390 390 namespace,
391 391 name,
392 392 oldhashes,
393 393 newhashes,
394 394 )
395 395
396 396 vfs = self.vfs
397 397 if self.sharedvfs is not None:
398 398 # write to the shared repository if this feature is being
399 399 # shared between working copies.
400 400 if sharednamespaces.get(namespace) in self.sharedfeatures:
401 401 vfs = self.sharedvfs
402 402
403 403 self._write(vfs, entry)
404 404
405 405 def _write(self, vfs, entry):
406 406 with self.jlock(vfs):
407 407 # open file in amend mode to ensure it is created if missing
408 408 with vfs(b'namejournal', mode=b'a+b') as f:
409 409 f.seek(0, os.SEEK_SET)
410 410 # Read just enough bytes to get a version number (up to 2
411 411 # digits plus separator)
412 412 version = f.read(3).partition(b'\0')[0]
413 413 if version and version != b"%d" % storageversion:
414 414 # different version of the storage. Exit early (and not
415 415 # write anything) if this is not a version we can handle or
416 416 # the file is corrupt. In future, perhaps rotate the file
417 417 # instead?
418 418 self.ui.warn(
419 419 _(b"unsupported journal file version '%s'\n") % version
420 420 )
421 421 return
422 422 if not version:
423 423 # empty file, write version first
424 424 f.write((b"%d" % storageversion) + b'\0')
425 425 f.seek(0, os.SEEK_END)
426 426 f.write(bytes(entry) + b'\0')
427 427
428 428 def filtered(self, namespace=None, name=None):
429 429 """Yield all journal entries with the given namespace or name
430 430
431 431 Both the namespace and the name are optional; if neither is given all
432 432 entries in the journal are produced.
433 433
434 434 Matching supports regular expressions by using the `re:` prefix
435 435 (use `literal:` to match names or namespaces that start with `re:`)
436 436
437 437 """
438 438 if namespace is not None:
439 439 namespace = stringutil.stringmatcher(namespace)[-1]
440 440 if name is not None:
441 441 name = stringutil.stringmatcher(name)[-1]
442 442 for entry in self:
443 443 if namespace is not None and not namespace(entry.namespace):
444 444 continue
445 445 if name is not None and not name(entry.name):
446 446 continue
447 447 yield entry
448 448
449 449 def __iter__(self):
450 450 """Iterate over the storage
451 451
452 452 Yields journalentry instances for each contained journal record.
453 453
454 454 """
455 455 local = self._open(self.vfs)
456 456
457 457 if self.sharedvfs is None:
458 458 return local
459 459
460 460 # iterate over both local and shared entries, but only those
461 461 # shared entries that are among the currently shared features
462 462 shared = (
463 463 e
464 464 for e in self._open(self.sharedvfs)
465 465 if sharednamespaces.get(e.namespace) in self.sharedfeatures
466 466 )
467 467 return _mergeentriesiter(local, shared)
468 468
469 469 def _open(self, vfs, filename=b'namejournal', _newestfirst=True):
470 470 if not vfs.exists(filename):
471 471 return
472 472
473 473 with vfs(filename) as f:
474 474 raw = f.read()
475 475
476 476 lines = raw.split(b'\0')
477 477 version = lines and lines[0]
478 478 if version != b"%d" % storageversion:
479 479 version = version or _(b'not available')
480 480 raise error.Abort(_(b"unknown journal file version '%s'") % version)
481 481
482 482 # Skip the first line, it's a version number. Normally we iterate over
483 483 # these in reverse order to list newest first; only when copying across
484 484 # a shared storage do we forgo reversing.
485 485 lines = lines[1:]
486 486 if _newestfirst:
487 487 lines = reversed(lines)
488 488 for line in lines:
489 489 if not line:
490 490 continue
491 491 yield journalentry.fromstorage(line)
492 492
493 493
494 494 # journal reading
495 495 # log options that don't make sense for journal
496 496 _ignoreopts = (b'no-merges', b'graph')
497 497
498 498
499 499 @command(
500 500 b'journal',
501 501 [
502 502 (b'', b'all', None, b'show history for all names'),
503 503 (b'c', b'commits', None, b'show commit metadata'),
504 504 ]
505 505 + [opt for opt in cmdutil.logopts if opt[1] not in _ignoreopts],
506 506 b'[OPTION]... [BOOKMARKNAME]',
507 507 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
508 508 )
509 509 def journal(ui, repo, *args, **opts):
510 510 """show the previous position of bookmarks and the working copy
511 511
512 512 The journal is used to see the previous commits that bookmarks and the
513 513 working copy pointed to. By default the previous locations for the working
514 514 copy. Passing a bookmark name will show all the previous positions of
515 515 that bookmark. Use the --all switch to show previous locations for all
516 516 bookmarks and the working copy; each line will then include the bookmark
517 517 name, or '.' for the working copy, as well.
518 518
519 519 If `name` starts with `re:`, the remainder of the name is treated as
520 520 a regular expression. To match a name that actually starts with `re:`,
521 521 use the prefix `literal:`.
522 522
523 523 By default hg journal only shows the commit hash and the command that was
524 524 running at that time. -v/--verbose will show the prior hash, the user, and
525 525 the time at which it happened.
526 526
527 527 Use -c/--commits to output log information on each commit hash; at this
528 528 point you can use the usual `--patch`, `--git`, `--stat` and `--template`
529 529 switches to alter the log output for these.
530 530
531 531 `hg journal -T json` can be used to produce machine readable output.
532 532
533 533 """
534 534 opts = pycompat.byteskwargs(opts)
535 535 name = b'.'
536 536 if opts.get(b'all'):
537 537 if args:
538 538 raise error.Abort(
539 539 _(b"You can't combine --all and filtering on a name")
540 540 )
541 541 name = None
542 542 if args:
543 543 name = args[0]
544 544
545 545 fm = ui.formatter(b'journal', opts)
546 546
547 547 def formatnodes(nodes):
548 548 return fm.formatlist(map(fm.hexfunc, nodes), name=b'node', sep=b',')
549 549
550 550 if opts.get(b"template") != b"json":
551 551 if name is None:
552 552 displayname = _(b'the working copy and bookmarks')
553 553 else:
554 554 displayname = b"'%s'" % name
555 555 ui.status(_(b"previous locations of %s:\n") % displayname)
556 556
557 557 limit = logcmdutil.getlimit(opts)
558 558 entry = None
559 559 ui.pager(b'journal')
560 560 for count, entry in enumerate(repo.journal.filtered(name=name)):
561 561 if count == limit:
562 562 break
563 563
564 564 fm.startitem()
565 565 fm.condwrite(
566 566 ui.verbose, b'oldnodes', b'%s -> ', formatnodes(entry.oldhashes)
567 567 )
568 568 fm.write(b'newnodes', b'%s', formatnodes(entry.newhashes))
569 569 fm.condwrite(ui.verbose, b'user', b' %-8s', entry.user)
570
571 # ``name`` is bytes, or None only if 'all' was an option.
570 572 fm.condwrite(
573 # pytype: disable=attribute-error
571 574 opts.get(b'all') or name.startswith(b're:'),
575 # pytype: enable=attribute-error
572 576 b'name',
573 577 b' %-8s',
574 578 entry.name,
575 579 )
576 580
577 581 fm.condwrite(
578 582 ui.verbose,
579 583 b'date',
580 584 b' %s',
581 585 fm.formatdate(entry.timestamp, b'%Y-%m-%d %H:%M %1%2'),
582 586 )
583 587 fm.write(b'command', b' %s\n', entry.command)
584 588
585 589 if opts.get(b"commits"):
586 590 if fm.isplain():
587 591 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
588 592 else:
589 593 displayer = logcmdutil.changesetformatter(
590 594 ui, repo, fm.nested(b'changesets'), diffopts=opts
591 595 )
592 596 for hash in entry.newhashes:
593 597 try:
594 598 ctx = repo[hash]
595 599 displayer.show(ctx)
596 600 except error.RepoLookupError as e:
597 601 fm.plain(b"%s\n\n" % pycompat.bytestr(e))
598 602 displayer.close()
599 603
600 604 fm.end()
601 605
602 606 if entry is None:
603 607 ui.status(_(b"no recorded locations\n"))
General Comments 0
You need to be logged in to leave comments. Login now