##// END OF EJS Templates
py3: handle keyword arguments in hgext/journal.py...
Pulkit Goyal -
r35001:135edf12 default
parent child Browse files
Show More
@@ -1,514 +1,516 b''
1 1 # journal.py
2 2 #
3 3 # Copyright 2014-2016 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 """track previous positions of bookmarks (EXPERIMENTAL)
8 8
9 9 This extension adds a new command: `hg journal`, which shows you where
10 10 bookmarks were previously located.
11 11
12 12 """
13 13
14 14 from __future__ import absolute_import
15 15
16 16 import collections
17 17 import errno
18 18 import os
19 19 import weakref
20 20
21 21 from mercurial.i18n import _
22 22
23 23 from mercurial import (
24 24 bookmarks,
25 25 cmdutil,
26 26 dispatch,
27 27 error,
28 28 extensions,
29 29 hg,
30 30 localrepo,
31 31 lock,
32 32 node,
33 pycompat,
33 34 registrar,
34 35 util,
35 36 )
36 37
37 38 from . import share
38 39
39 40 cmdtable = {}
40 41 command = registrar.command(cmdtable)
41 42
42 43 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
43 44 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
44 45 # be specifying the version(s) of Mercurial they are tested with, or
45 46 # leave the attribute unspecified.
46 47 testedwith = 'ships-with-hg-core'
47 48
48 49 # storage format version; increment when the format changes
49 50 storageversion = 0
50 51
51 52 # namespaces
52 53 bookmarktype = 'bookmark'
53 54 wdirparenttype = 'wdirparent'
54 55 # In a shared repository, what shared feature name is used
55 56 # to indicate this namespace is shared with the source?
56 57 sharednamespaces = {
57 58 bookmarktype: hg.sharedbookmarks,
58 59 }
59 60
60 61 # Journal recording, register hooks and storage object
61 62 def extsetup(ui):
62 63 extensions.wrapfunction(dispatch, 'runcommand', runcommand)
63 64 extensions.wrapfunction(bookmarks.bmstore, '_write', recordbookmarks)
64 65 extensions.wrapfilecache(
65 66 localrepo.localrepository, 'dirstate', wrapdirstate)
66 67 extensions.wrapfunction(hg, 'postshare', wrappostshare)
67 68 extensions.wrapfunction(hg, 'copystore', unsharejournal)
68 69
69 70 def reposetup(ui, repo):
70 71 if repo.local():
71 72 repo.journal = journalstorage(repo)
72 73 repo._wlockfreeprefix.add('namejournal')
73 74
74 75 dirstate, cached = localrepo.isfilecached(repo, 'dirstate')
75 76 if cached:
76 77 # already instantiated dirstate isn't yet marked as
77 78 # "journal"-ing, even though repo.dirstate() was already
78 79 # wrapped by own wrapdirstate()
79 80 _setupdirstate(repo, dirstate)
80 81
81 82 def runcommand(orig, lui, repo, cmd, fullargs, *args):
82 83 """Track the command line options for recording in the journal"""
83 84 journalstorage.recordcommand(*fullargs)
84 85 return orig(lui, repo, cmd, fullargs, *args)
85 86
86 87 def _setupdirstate(repo, dirstate):
87 88 dirstate.journalstorage = repo.journal
88 89 dirstate.addparentchangecallback('journal', recorddirstateparents)
89 90
90 91 # hooks to record dirstate changes
91 92 def wrapdirstate(orig, repo):
92 93 """Make journal storage available to the dirstate object"""
93 94 dirstate = orig(repo)
94 95 if util.safehasattr(repo, 'journal'):
95 96 _setupdirstate(repo, dirstate)
96 97 return dirstate
97 98
98 99 def recorddirstateparents(dirstate, old, new):
99 100 """Records all dirstate parent changes in the journal."""
100 101 old = list(old)
101 102 new = list(new)
102 103 if util.safehasattr(dirstate, 'journalstorage'):
103 104 # only record two hashes if there was a merge
104 105 oldhashes = old[:1] if old[1] == node.nullid else old
105 106 newhashes = new[:1] if new[1] == node.nullid else new
106 107 dirstate.journalstorage.record(
107 108 wdirparenttype, '.', oldhashes, newhashes)
108 109
109 110 # hooks to record bookmark changes (both local and remote)
110 111 def recordbookmarks(orig, store, fp):
111 112 """Records all bookmark changes in the journal."""
112 113 repo = store._repo
113 114 if util.safehasattr(repo, 'journal'):
114 115 oldmarks = bookmarks.bmstore(repo)
115 116 for mark, value in store.iteritems():
116 117 oldvalue = oldmarks.get(mark, node.nullid)
117 118 if value != oldvalue:
118 119 repo.journal.record(bookmarktype, mark, oldvalue, value)
119 120 return orig(store, fp)
120 121
121 122 # shared repository support
122 123 def _readsharedfeatures(repo):
123 124 """A set of shared features for this repository"""
124 125 try:
125 126 return set(repo.vfs.read('shared').splitlines())
126 127 except IOError as inst:
127 128 if inst.errno != errno.ENOENT:
128 129 raise
129 130 return set()
130 131
131 132 def _mergeentriesiter(*iterables, **kwargs):
132 133 """Given a set of sorted iterables, yield the next entry in merged order
133 134
134 135 Note that by default entries go from most recent to oldest.
135 136 """
136 order = kwargs.pop('order', max)
137 order = kwargs.pop(r'order', max)
137 138 iterables = [iter(it) for it in iterables]
138 139 # this tracks still active iterables; iterables are deleted as they are
139 140 # exhausted, which is why this is a dictionary and why each entry also
140 141 # stores the key. Entries are mutable so we can store the next value each
141 142 # time.
142 143 iterable_map = {}
143 144 for key, it in enumerate(iterables):
144 145 try:
145 146 iterable_map[key] = [next(it), key, it]
146 147 except StopIteration:
147 148 # empty entry, can be ignored
148 149 pass
149 150
150 151 while iterable_map:
151 152 value, key, it = order(iterable_map.itervalues())
152 153 yield value
153 154 try:
154 155 iterable_map[key][0] = next(it)
155 156 except StopIteration:
156 157 # this iterable is empty, remove it from consideration
157 158 del iterable_map[key]
158 159
159 160 def wrappostshare(orig, sourcerepo, destrepo, **kwargs):
160 161 """Mark this shared working copy as sharing journal information"""
161 162 with destrepo.wlock():
162 163 orig(sourcerepo, destrepo, **kwargs)
163 164 with destrepo.vfs('shared', 'a') as fp:
164 165 fp.write('journal\n')
165 166
166 167 def unsharejournal(orig, ui, repo, repopath):
167 168 """Copy shared journal entries into this repo when unsharing"""
168 169 if (repo.path == repopath and repo.shared() and
169 170 util.safehasattr(repo, 'journal')):
170 171 sharedrepo = share._getsrcrepo(repo)
171 172 sharedfeatures = _readsharedfeatures(repo)
172 173 if sharedrepo and sharedfeatures > {'journal'}:
173 174 # there is a shared repository and there are shared journal entries
174 175 # to copy. move shared date over from source to destination but
175 176 # move the local file first
176 177 if repo.vfs.exists('namejournal'):
177 178 journalpath = repo.vfs.join('namejournal')
178 179 util.rename(journalpath, journalpath + '.bak')
179 180 storage = repo.journal
180 181 local = storage._open(
181 182 repo.vfs, filename='namejournal.bak', _newestfirst=False)
182 183 shared = (
183 184 e for e in storage._open(sharedrepo.vfs, _newestfirst=False)
184 185 if sharednamespaces.get(e.namespace) in sharedfeatures)
185 186 for entry in _mergeentriesiter(local, shared, order=min):
186 187 storage._write(repo.vfs, entry)
187 188
188 189 return orig(ui, repo, repopath)
189 190
190 191 class journalentry(collections.namedtuple(
191 192 u'journalentry',
192 193 u'timestamp user command namespace name oldhashes newhashes')):
193 194 """Individual journal entry
194 195
195 196 * timestamp: a mercurial (time, timezone) tuple
196 197 * user: the username that ran the command
197 198 * namespace: the entry namespace, an opaque string
198 199 * name: the name of the changed item, opaque string with meaning in the
199 200 namespace
200 201 * command: the hg command that triggered this record
201 202 * oldhashes: a tuple of one or more binary hashes for the old location
202 203 * newhashes: a tuple of one or more binary hashes for the new location
203 204
204 205 Handles serialisation from and to the storage format. Fields are
205 206 separated by newlines, hashes are written out in hex separated by commas,
206 207 timestamp and timezone are separated by a space.
207 208
208 209 """
209 210 @classmethod
210 211 def fromstorage(cls, line):
211 212 (time, user, command, namespace, name,
212 213 oldhashes, newhashes) = line.split('\n')
213 214 timestamp, tz = time.split()
214 215 timestamp, tz = float(timestamp), int(tz)
215 216 oldhashes = tuple(node.bin(hash) for hash in oldhashes.split(','))
216 217 newhashes = tuple(node.bin(hash) for hash in newhashes.split(','))
217 218 return cls(
218 219 (timestamp, tz), user, command, namespace, name,
219 220 oldhashes, newhashes)
220 221
221 222 def __str__(self):
222 223 """String representation for storage"""
223 224 time = ' '.join(map(str, self.timestamp))
224 225 oldhashes = ','.join([node.hex(hash) for hash in self.oldhashes])
225 226 newhashes = ','.join([node.hex(hash) for hash in self.newhashes])
226 227 return '\n'.join((
227 228 time, self.user, self.command, self.namespace, self.name,
228 229 oldhashes, newhashes))
229 230
230 231 class journalstorage(object):
231 232 """Storage for journal entries
232 233
233 234 Entries are divided over two files; one with entries that pertain to the
234 235 local working copy *only*, and one with entries that are shared across
235 236 multiple working copies when shared using the share extension.
236 237
237 238 Entries are stored with NUL bytes as separators. See the journalentry
238 239 class for the per-entry structure.
239 240
240 241 The file format starts with an integer version, delimited by a NUL.
241 242
242 243 This storage uses a dedicated lock; this makes it easier to avoid issues
243 244 with adding entries that added when the regular wlock is unlocked (e.g.
244 245 the dirstate).
245 246
246 247 """
247 248 _currentcommand = ()
248 249 _lockref = None
249 250
250 251 def __init__(self, repo):
251 252 self.user = util.getuser()
252 253 self.ui = repo.ui
253 254 self.vfs = repo.vfs
254 255
255 256 # is this working copy using a shared storage?
256 257 self.sharedfeatures = self.sharedvfs = None
257 258 if repo.shared():
258 259 features = _readsharedfeatures(repo)
259 260 sharedrepo = share._getsrcrepo(repo)
260 261 if sharedrepo is not None and 'journal' in features:
261 262 self.sharedvfs = sharedrepo.vfs
262 263 self.sharedfeatures = features
263 264
264 265 # track the current command for recording in journal entries
265 266 @property
266 267 def command(self):
267 268 commandstr = ' '.join(
268 269 map(util.shellquote, journalstorage._currentcommand))
269 270 if '\n' in commandstr:
270 271 # truncate multi-line commands
271 272 commandstr = commandstr.partition('\n')[0] + ' ...'
272 273 return commandstr
273 274
274 275 @classmethod
275 276 def recordcommand(cls, *fullargs):
276 277 """Set the current hg arguments, stored with recorded entries"""
277 278 # Set the current command on the class because we may have started
278 279 # with a non-local repo (cloning for example).
279 280 cls._currentcommand = fullargs
280 281
281 282 def _currentlock(self, lockref):
282 283 """Returns the lock if it's held, or None if it's not.
283 284
284 285 (This is copied from the localrepo class)
285 286 """
286 287 if lockref is None:
287 288 return None
288 289 l = lockref()
289 290 if l is None or not l.held:
290 291 return None
291 292 return l
292 293
293 294 def jlock(self, vfs):
294 295 """Create a lock for the journal file"""
295 296 if self._currentlock(self._lockref) is not None:
296 297 raise error.Abort(_('journal lock does not support nesting'))
297 298 desc = _('journal of %s') % vfs.base
298 299 try:
299 300 l = lock.lock(vfs, 'namejournal.lock', 0, desc=desc)
300 301 except error.LockHeld as inst:
301 302 self.ui.warn(
302 303 _("waiting for lock on %s held by %r\n") % (desc, inst.locker))
303 304 # default to 600 seconds timeout
304 305 l = lock.lock(
305 306 vfs, 'namejournal.lock',
306 307 int(self.ui.config("ui", "timeout")), desc=desc)
307 308 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
308 309 self._lockref = weakref.ref(l)
309 310 return l
310 311
311 312 def record(self, namespace, name, oldhashes, newhashes):
312 313 """Record a new journal entry
313 314
314 315 * namespace: an opaque string; this can be used to filter on the type
315 316 of recorded entries.
316 317 * name: the name defining this entry; for bookmarks, this is the
317 318 bookmark name. Can be filtered on when retrieving entries.
318 319 * oldhashes and newhashes: each a single binary hash, or a list of
319 320 binary hashes. These represent the old and new position of the named
320 321 item.
321 322
322 323 """
323 324 if not isinstance(oldhashes, list):
324 325 oldhashes = [oldhashes]
325 326 if not isinstance(newhashes, list):
326 327 newhashes = [newhashes]
327 328
328 329 entry = journalentry(
329 330 util.makedate(), self.user, self.command, namespace, name,
330 331 oldhashes, newhashes)
331 332
332 333 vfs = self.vfs
333 334 if self.sharedvfs is not None:
334 335 # write to the shared repository if this feature is being
335 336 # shared between working copies.
336 337 if sharednamespaces.get(namespace) in self.sharedfeatures:
337 338 vfs = self.sharedvfs
338 339
339 340 self._write(vfs, entry)
340 341
341 342 def _write(self, vfs, entry):
342 343 with self.jlock(vfs):
343 344 version = None
344 345 # open file in amend mode to ensure it is created if missing
345 346 with vfs('namejournal', mode='a+b') as f:
346 347 f.seek(0, os.SEEK_SET)
347 348 # Read just enough bytes to get a version number (up to 2
348 349 # digits plus separator)
349 350 version = f.read(3).partition('\0')[0]
350 351 if version and version != str(storageversion):
351 352 # different version of the storage. Exit early (and not
352 353 # write anything) if this is not a version we can handle or
353 354 # the file is corrupt. In future, perhaps rotate the file
354 355 # instead?
355 356 self.ui.warn(
356 357 _("unsupported journal file version '%s'\n") % version)
357 358 return
358 359 if not version:
359 360 # empty file, write version first
360 361 f.write(str(storageversion) + '\0')
361 362 f.seek(0, os.SEEK_END)
362 363 f.write(str(entry) + '\0')
363 364
364 365 def filtered(self, namespace=None, name=None):
365 366 """Yield all journal entries with the given namespace or name
366 367
367 368 Both the namespace and the name are optional; if neither is given all
368 369 entries in the journal are produced.
369 370
370 371 Matching supports regular expressions by using the `re:` prefix
371 372 (use `literal:` to match names or namespaces that start with `re:`)
372 373
373 374 """
374 375 if namespace is not None:
375 376 namespace = util.stringmatcher(namespace)[-1]
376 377 if name is not None:
377 378 name = util.stringmatcher(name)[-1]
378 379 for entry in self:
379 380 if namespace is not None and not namespace(entry.namespace):
380 381 continue
381 382 if name is not None and not name(entry.name):
382 383 continue
383 384 yield entry
384 385
385 386 def __iter__(self):
386 387 """Iterate over the storage
387 388
388 389 Yields journalentry instances for each contained journal record.
389 390
390 391 """
391 392 local = self._open(self.vfs)
392 393
393 394 if self.sharedvfs is None:
394 395 return local
395 396
396 397 # iterate over both local and shared entries, but only those
397 398 # shared entries that are among the currently shared features
398 399 shared = (
399 400 e for e in self._open(self.sharedvfs)
400 401 if sharednamespaces.get(e.namespace) in self.sharedfeatures)
401 402 return _mergeentriesiter(local, shared)
402 403
403 404 def _open(self, vfs, filename='namejournal', _newestfirst=True):
404 405 if not vfs.exists(filename):
405 406 return
406 407
407 408 with vfs(filename) as f:
408 409 raw = f.read()
409 410
410 411 lines = raw.split('\0')
411 412 version = lines and lines[0]
412 413 if version != str(storageversion):
413 414 version = version or _('not available')
414 415 raise error.Abort(_("unknown journal file version '%s'") % version)
415 416
416 417 # Skip the first line, it's a version number. Normally we iterate over
417 418 # these in reverse order to list newest first; only when copying across
418 419 # a shared storage do we forgo reversing.
419 420 lines = lines[1:]
420 421 if _newestfirst:
421 422 lines = reversed(lines)
422 423 for line in lines:
423 424 if not line:
424 425 continue
425 426 yield journalentry.fromstorage(line)
426 427
427 428 # journal reading
428 429 # log options that don't make sense for journal
429 430 _ignoreopts = ('no-merges', 'graph')
430 431 @command(
431 432 'journal', [
432 433 ('', 'all', None, 'show history for all names'),
433 434 ('c', 'commits', None, 'show commit metadata'),
434 435 ] + [opt for opt in cmdutil.logopts if opt[1] not in _ignoreopts],
435 436 '[OPTION]... [BOOKMARKNAME]')
436 437 def journal(ui, repo, *args, **opts):
437 438 """show the previous position of bookmarks and the working copy
438 439
439 440 The journal is used to see the previous commits that bookmarks and the
440 441 working copy pointed to. By default the previous locations for the working
441 442 copy. Passing a bookmark name will show all the previous positions of
442 443 that bookmark. Use the --all switch to show previous locations for all
443 444 bookmarks and the working copy; each line will then include the bookmark
444 445 name, or '.' for the working copy, as well.
445 446
446 447 If `name` starts with `re:`, the remainder of the name is treated as
447 448 a regular expression. To match a name that actually starts with `re:`,
448 449 use the prefix `literal:`.
449 450
450 451 By default hg journal only shows the commit hash and the command that was
451 452 running at that time. -v/--verbose will show the prior hash, the user, and
452 453 the time at which it happened.
453 454
454 455 Use -c/--commits to output log information on each commit hash; at this
455 456 point you can use the usual `--patch`, `--git`, `--stat` and `--template`
456 457 switches to alter the log output for these.
457 458
458 459 `hg journal -T json` can be used to produce machine readable output.
459 460
460 461 """
462 opts = pycompat.byteskwargs(opts)
461 463 name = '.'
462 464 if opts.get('all'):
463 465 if args:
464 466 raise error.Abort(
465 467 _("You can't combine --all and filtering on a name"))
466 468 name = None
467 469 if args:
468 470 name = args[0]
469 471
470 472 fm = ui.formatter('journal', opts)
471 473
472 474 if opts.get("template") != "json":
473 475 if name is None:
474 476 displayname = _('the working copy and bookmarks')
475 477 else:
476 478 displayname = "'%s'" % name
477 479 ui.status(_("previous locations of %s:\n") % displayname)
478 480
479 481 limit = cmdutil.loglimit(opts)
480 482 entry = None
481 483 for count, entry in enumerate(repo.journal.filtered(name=name)):
482 484 if count == limit:
483 485 break
484 486 newhashesstr = fm.formatlist(map(fm.hexfunc, entry.newhashes),
485 487 name='node', sep=',')
486 488 oldhashesstr = fm.formatlist(map(fm.hexfunc, entry.oldhashes),
487 489 name='node', sep=',')
488 490
489 491 fm.startitem()
490 492 fm.condwrite(ui.verbose, 'oldhashes', '%s -> ', oldhashesstr)
491 493 fm.write('newhashes', '%s', newhashesstr)
492 494 fm.condwrite(ui.verbose, 'user', ' %-8s', entry.user)
493 495 fm.condwrite(
494 496 opts.get('all') or name.startswith('re:'),
495 497 'name', ' %-8s', entry.name)
496 498
497 499 timestring = fm.formatdate(entry.timestamp, '%Y-%m-%d %H:%M %1%2')
498 500 fm.condwrite(ui.verbose, 'date', ' %s', timestring)
499 501 fm.write('command', ' %s\n', entry.command)
500 502
501 503 if opts.get("commits"):
502 504 displayer = cmdutil.show_changeset(ui, repo, opts, buffered=False)
503 505 for hash in entry.newhashes:
504 506 try:
505 507 ctx = repo[hash]
506 508 displayer.show(ctx)
507 509 except error.RepoLookupError as e:
508 510 fm.write('repolookuperror', "%s\n\n", str(e))
509 511 displayer.close()
510 512
511 513 fm.end()
512 514
513 515 if entry is None:
514 516 ui.status(_("no recorded locations\n"))
General Comments 0
You need to be logged in to leave comments. Login now