##// END OF EJS Templates
py3: replace __str__ to __bytes__ in hgext/journal.py...
Pulkit Goyal -
r36684:d79d68bb default
parent child Browse files
Show More
@@ -1,517 +1,520 b''
1 1 # journal.py
2 2 #
3 3 # Copyright 2014-2016 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 """track previous positions of bookmarks (EXPERIMENTAL)
8 8
9 9 This extension adds a new command: `hg journal`, which shows you where
10 10 bookmarks were previously located.
11 11
12 12 """
13 13
14 14 from __future__ import absolute_import
15 15
16 16 import collections
17 17 import errno
18 18 import os
19 19 import weakref
20 20
21 21 from mercurial.i18n import _
22 22
23 23 from mercurial import (
24 24 bookmarks,
25 25 cmdutil,
26 26 dispatch,
27 encoding,
27 28 error,
28 29 extensions,
29 30 hg,
30 31 localrepo,
31 32 lock,
32 33 logcmdutil,
33 34 node,
34 35 pycompat,
35 36 registrar,
36 37 util,
37 38 )
38 39 from mercurial.utils import dateutil
39 40
40 41 cmdtable = {}
41 42 command = registrar.command(cmdtable)
42 43
43 44 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
44 45 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
45 46 # be specifying the version(s) of Mercurial they are tested with, or
46 47 # leave the attribute unspecified.
47 48 testedwith = 'ships-with-hg-core'
48 49
49 50 # storage format version; increment when the format changes
50 51 storageversion = 0
51 52
52 53 # namespaces
53 54 bookmarktype = 'bookmark'
54 55 wdirparenttype = 'wdirparent'
55 56 # In a shared repository, what shared feature name is used
56 57 # to indicate this namespace is shared with the source?
57 58 sharednamespaces = {
58 59 bookmarktype: hg.sharedbookmarks,
59 60 }
60 61
61 62 # Journal recording, register hooks and storage object
62 63 def extsetup(ui):
63 64 extensions.wrapfunction(dispatch, 'runcommand', runcommand)
64 65 extensions.wrapfunction(bookmarks.bmstore, '_write', recordbookmarks)
65 66 extensions.wrapfilecache(
66 67 localrepo.localrepository, 'dirstate', wrapdirstate)
67 68 extensions.wrapfunction(hg, 'postshare', wrappostshare)
68 69 extensions.wrapfunction(hg, 'copystore', unsharejournal)
69 70
70 71 def reposetup(ui, repo):
71 72 if repo.local():
72 73 repo.journal = journalstorage(repo)
73 74 repo._wlockfreeprefix.add('namejournal')
74 75
75 76 dirstate, cached = localrepo.isfilecached(repo, 'dirstate')
76 77 if cached:
77 78 # already instantiated dirstate isn't yet marked as
78 79 # "journal"-ing, even though repo.dirstate() was already
79 80 # wrapped by own wrapdirstate()
80 81 _setupdirstate(repo, dirstate)
81 82
82 83 def runcommand(orig, lui, repo, cmd, fullargs, *args):
83 84 """Track the command line options for recording in the journal"""
84 85 journalstorage.recordcommand(*fullargs)
85 86 return orig(lui, repo, cmd, fullargs, *args)
86 87
87 88 def _setupdirstate(repo, dirstate):
88 89 dirstate.journalstorage = repo.journal
89 90 dirstate.addparentchangecallback('journal', recorddirstateparents)
90 91
91 92 # hooks to record dirstate changes
92 93 def wrapdirstate(orig, repo):
93 94 """Make journal storage available to the dirstate object"""
94 95 dirstate = orig(repo)
95 96 if util.safehasattr(repo, 'journal'):
96 97 _setupdirstate(repo, dirstate)
97 98 return dirstate
98 99
99 100 def recorddirstateparents(dirstate, old, new):
100 101 """Records all dirstate parent changes in the journal."""
101 102 old = list(old)
102 103 new = list(new)
103 104 if util.safehasattr(dirstate, 'journalstorage'):
104 105 # only record two hashes if there was a merge
105 106 oldhashes = old[:1] if old[1] == node.nullid else old
106 107 newhashes = new[:1] if new[1] == node.nullid else new
107 108 dirstate.journalstorage.record(
108 109 wdirparenttype, '.', oldhashes, newhashes)
109 110
110 111 # hooks to record bookmark changes (both local and remote)
111 112 def recordbookmarks(orig, store, fp):
112 113 """Records all bookmark changes in the journal."""
113 114 repo = store._repo
114 115 if util.safehasattr(repo, 'journal'):
115 116 oldmarks = bookmarks.bmstore(repo)
116 117 for mark, value in store.iteritems():
117 118 oldvalue = oldmarks.get(mark, node.nullid)
118 119 if value != oldvalue:
119 120 repo.journal.record(bookmarktype, mark, oldvalue, value)
120 121 return orig(store, fp)
121 122
122 123 # shared repository support
123 124 def _readsharedfeatures(repo):
124 125 """A set of shared features for this repository"""
125 126 try:
126 127 return set(repo.vfs.read('shared').splitlines())
127 128 except IOError as inst:
128 129 if inst.errno != errno.ENOENT:
129 130 raise
130 131 return set()
131 132
132 133 def _mergeentriesiter(*iterables, **kwargs):
133 134 """Given a set of sorted iterables, yield the next entry in merged order
134 135
135 136 Note that by default entries go from most recent to oldest.
136 137 """
137 138 order = kwargs.pop(r'order', max)
138 139 iterables = [iter(it) for it in iterables]
139 140 # this tracks still active iterables; iterables are deleted as they are
140 141 # exhausted, which is why this is a dictionary and why each entry also
141 142 # stores the key. Entries are mutable so we can store the next value each
142 143 # time.
143 144 iterable_map = {}
144 145 for key, it in enumerate(iterables):
145 146 try:
146 147 iterable_map[key] = [next(it), key, it]
147 148 except StopIteration:
148 149 # empty entry, can be ignored
149 150 pass
150 151
151 152 while iterable_map:
152 153 value, key, it = order(iterable_map.itervalues())
153 154 yield value
154 155 try:
155 156 iterable_map[key][0] = next(it)
156 157 except StopIteration:
157 158 # this iterable is empty, remove it from consideration
158 159 del iterable_map[key]
159 160
160 161 def wrappostshare(orig, sourcerepo, destrepo, **kwargs):
161 162 """Mark this shared working copy as sharing journal information"""
162 163 with destrepo.wlock():
163 164 orig(sourcerepo, destrepo, **kwargs)
164 165 with destrepo.vfs('shared', 'a') as fp:
165 166 fp.write('journal\n')
166 167
167 168 def unsharejournal(orig, ui, repo, repopath):
168 169 """Copy shared journal entries into this repo when unsharing"""
169 170 if (repo.path == repopath and repo.shared() and
170 171 util.safehasattr(repo, 'journal')):
171 172 sharedrepo = hg.sharedreposource(repo)
172 173 sharedfeatures = _readsharedfeatures(repo)
173 174 if sharedrepo and sharedfeatures > {'journal'}:
174 175 # there is a shared repository and there are shared journal entries
175 176 # to copy. move shared date over from source to destination but
176 177 # move the local file first
177 178 if repo.vfs.exists('namejournal'):
178 179 journalpath = repo.vfs.join('namejournal')
179 180 util.rename(journalpath, journalpath + '.bak')
180 181 storage = repo.journal
181 182 local = storage._open(
182 183 repo.vfs, filename='namejournal.bak', _newestfirst=False)
183 184 shared = (
184 185 e for e in storage._open(sharedrepo.vfs, _newestfirst=False)
185 186 if sharednamespaces.get(e.namespace) in sharedfeatures)
186 187 for entry in _mergeentriesiter(local, shared, order=min):
187 188 storage._write(repo.vfs, entry)
188 189
189 190 return orig(ui, repo, repopath)
190 191
191 192 class journalentry(collections.namedtuple(
192 193 u'journalentry',
193 194 u'timestamp user command namespace name oldhashes newhashes')):
194 195 """Individual journal entry
195 196
196 197 * timestamp: a mercurial (time, timezone) tuple
197 198 * user: the username that ran the command
198 199 * namespace: the entry namespace, an opaque string
199 200 * name: the name of the changed item, opaque string with meaning in the
200 201 namespace
201 202 * command: the hg command that triggered this record
202 203 * oldhashes: a tuple of one or more binary hashes for the old location
203 204 * newhashes: a tuple of one or more binary hashes for the new location
204 205
205 206 Handles serialisation from and to the storage format. Fields are
206 207 separated by newlines, hashes are written out in hex separated by commas,
207 208 timestamp and timezone are separated by a space.
208 209
209 210 """
210 211 @classmethod
211 212 def fromstorage(cls, line):
212 213 (time, user, command, namespace, name,
213 214 oldhashes, newhashes) = line.split('\n')
214 215 timestamp, tz = time.split()
215 216 timestamp, tz = float(timestamp), int(tz)
216 217 oldhashes = tuple(node.bin(hash) for hash in oldhashes.split(','))
217 218 newhashes = tuple(node.bin(hash) for hash in newhashes.split(','))
218 219 return cls(
219 220 (timestamp, tz), user, command, namespace, name,
220 221 oldhashes, newhashes)
221 222
222 def __str__(self):
223 """String representation for storage"""
223 def __bytes__(self):
224 """bytes representation for storage"""
224 225 time = ' '.join(map(str, self.timestamp))
225 226 oldhashes = ','.join([node.hex(hash) for hash in self.oldhashes])
226 227 newhashes = ','.join([node.hex(hash) for hash in self.newhashes])
227 228 return '\n'.join((
228 229 time, self.user, self.command, self.namespace, self.name,
229 230 oldhashes, newhashes))
230 231
232 __str__ = encoding.strmethod(__bytes__)
233
231 234 class journalstorage(object):
232 235 """Storage for journal entries
233 236
234 237 Entries are divided over two files; one with entries that pertain to the
235 238 local working copy *only*, and one with entries that are shared across
236 239 multiple working copies when shared using the share extension.
237 240
238 241 Entries are stored with NUL bytes as separators. See the journalentry
239 242 class for the per-entry structure.
240 243
241 244 The file format starts with an integer version, delimited by a NUL.
242 245
243 246 This storage uses a dedicated lock; this makes it easier to avoid issues
244 247 with adding entries that added when the regular wlock is unlocked (e.g.
245 248 the dirstate).
246 249
247 250 """
248 251 _currentcommand = ()
249 252 _lockref = None
250 253
251 254 def __init__(self, repo):
252 255 self.user = util.getuser()
253 256 self.ui = repo.ui
254 257 self.vfs = repo.vfs
255 258
256 259 # is this working copy using a shared storage?
257 260 self.sharedfeatures = self.sharedvfs = None
258 261 if repo.shared():
259 262 features = _readsharedfeatures(repo)
260 263 sharedrepo = hg.sharedreposource(repo)
261 264 if sharedrepo is not None and 'journal' in features:
262 265 self.sharedvfs = sharedrepo.vfs
263 266 self.sharedfeatures = features
264 267
265 268 # track the current command for recording in journal entries
266 269 @property
267 270 def command(self):
268 271 commandstr = ' '.join(
269 272 map(util.shellquote, journalstorage._currentcommand))
270 273 if '\n' in commandstr:
271 274 # truncate multi-line commands
272 275 commandstr = commandstr.partition('\n')[0] + ' ...'
273 276 return commandstr
274 277
275 278 @classmethod
276 279 def recordcommand(cls, *fullargs):
277 280 """Set the current hg arguments, stored with recorded entries"""
278 281 # Set the current command on the class because we may have started
279 282 # with a non-local repo (cloning for example).
280 283 cls._currentcommand = fullargs
281 284
282 285 def _currentlock(self, lockref):
283 286 """Returns the lock if it's held, or None if it's not.
284 287
285 288 (This is copied from the localrepo class)
286 289 """
287 290 if lockref is None:
288 291 return None
289 292 l = lockref()
290 293 if l is None or not l.held:
291 294 return None
292 295 return l
293 296
294 297 def jlock(self, vfs):
295 298 """Create a lock for the journal file"""
296 299 if self._currentlock(self._lockref) is not None:
297 300 raise error.Abort(_('journal lock does not support nesting'))
298 301 desc = _('journal of %s') % vfs.base
299 302 try:
300 303 l = lock.lock(vfs, 'namejournal.lock', 0, desc=desc)
301 304 except error.LockHeld as inst:
302 305 self.ui.warn(
303 306 _("waiting for lock on %s held by %r\n") % (desc, inst.locker))
304 307 # default to 600 seconds timeout
305 308 l = lock.lock(
306 309 vfs, 'namejournal.lock',
307 310 self.ui.configint("ui", "timeout"), desc=desc)
308 311 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
309 312 self._lockref = weakref.ref(l)
310 313 return l
311 314
312 315 def record(self, namespace, name, oldhashes, newhashes):
313 316 """Record a new journal entry
314 317
315 318 * namespace: an opaque string; this can be used to filter on the type
316 319 of recorded entries.
317 320 * name: the name defining this entry; for bookmarks, this is the
318 321 bookmark name. Can be filtered on when retrieving entries.
319 322 * oldhashes and newhashes: each a single binary hash, or a list of
320 323 binary hashes. These represent the old and new position of the named
321 324 item.
322 325
323 326 """
324 327 if not isinstance(oldhashes, list):
325 328 oldhashes = [oldhashes]
326 329 if not isinstance(newhashes, list):
327 330 newhashes = [newhashes]
328 331
329 332 entry = journalentry(
330 333 dateutil.makedate(), self.user, self.command, namespace, name,
331 334 oldhashes, newhashes)
332 335
333 336 vfs = self.vfs
334 337 if self.sharedvfs is not None:
335 338 # write to the shared repository if this feature is being
336 339 # shared between working copies.
337 340 if sharednamespaces.get(namespace) in self.sharedfeatures:
338 341 vfs = self.sharedvfs
339 342
340 343 self._write(vfs, entry)
341 344
342 345 def _write(self, vfs, entry):
343 346 with self.jlock(vfs):
344 347 version = None
345 348 # open file in amend mode to ensure it is created if missing
346 349 with vfs('namejournal', mode='a+b') as f:
347 350 f.seek(0, os.SEEK_SET)
348 351 # Read just enough bytes to get a version number (up to 2
349 352 # digits plus separator)
350 353 version = f.read(3).partition('\0')[0]
351 354 if version and version != str(storageversion):
352 355 # different version of the storage. Exit early (and not
353 356 # write anything) if this is not a version we can handle or
354 357 # the file is corrupt. In future, perhaps rotate the file
355 358 # instead?
356 359 self.ui.warn(
357 360 _("unsupported journal file version '%s'\n") % version)
358 361 return
359 362 if not version:
360 363 # empty file, write version first
361 364 f.write(str(storageversion) + '\0')
362 365 f.seek(0, os.SEEK_END)
363 366 f.write(str(entry) + '\0')
364 367
365 368 def filtered(self, namespace=None, name=None):
366 369 """Yield all journal entries with the given namespace or name
367 370
368 371 Both the namespace and the name are optional; if neither is given all
369 372 entries in the journal are produced.
370 373
371 374 Matching supports regular expressions by using the `re:` prefix
372 375 (use `literal:` to match names or namespaces that start with `re:`)
373 376
374 377 """
375 378 if namespace is not None:
376 379 namespace = util.stringmatcher(namespace)[-1]
377 380 if name is not None:
378 381 name = util.stringmatcher(name)[-1]
379 382 for entry in self:
380 383 if namespace is not None and not namespace(entry.namespace):
381 384 continue
382 385 if name is not None and not name(entry.name):
383 386 continue
384 387 yield entry
385 388
386 389 def __iter__(self):
387 390 """Iterate over the storage
388 391
389 392 Yields journalentry instances for each contained journal record.
390 393
391 394 """
392 395 local = self._open(self.vfs)
393 396
394 397 if self.sharedvfs is None:
395 398 return local
396 399
397 400 # iterate over both local and shared entries, but only those
398 401 # shared entries that are among the currently shared features
399 402 shared = (
400 403 e for e in self._open(self.sharedvfs)
401 404 if sharednamespaces.get(e.namespace) in self.sharedfeatures)
402 405 return _mergeentriesiter(local, shared)
403 406
404 407 def _open(self, vfs, filename='namejournal', _newestfirst=True):
405 408 if not vfs.exists(filename):
406 409 return
407 410
408 411 with vfs(filename) as f:
409 412 raw = f.read()
410 413
411 414 lines = raw.split('\0')
412 415 version = lines and lines[0]
413 416 if version != str(storageversion):
414 417 version = version or _('not available')
415 418 raise error.Abort(_("unknown journal file version '%s'") % version)
416 419
417 420 # Skip the first line, it's a version number. Normally we iterate over
418 421 # these in reverse order to list newest first; only when copying across
419 422 # a shared storage do we forgo reversing.
420 423 lines = lines[1:]
421 424 if _newestfirst:
422 425 lines = reversed(lines)
423 426 for line in lines:
424 427 if not line:
425 428 continue
426 429 yield journalentry.fromstorage(line)
427 430
428 431 # journal reading
429 432 # log options that don't make sense for journal
430 433 _ignoreopts = ('no-merges', 'graph')
431 434 @command(
432 435 'journal', [
433 436 ('', 'all', None, 'show history for all names'),
434 437 ('c', 'commits', None, 'show commit metadata'),
435 438 ] + [opt for opt in cmdutil.logopts if opt[1] not in _ignoreopts],
436 439 '[OPTION]... [BOOKMARKNAME]')
437 440 def journal(ui, repo, *args, **opts):
438 441 """show the previous position of bookmarks and the working copy
439 442
440 443 The journal is used to see the previous commits that bookmarks and the
441 444 working copy pointed to. By default the previous locations for the working
442 445 copy. Passing a bookmark name will show all the previous positions of
443 446 that bookmark. Use the --all switch to show previous locations for all
444 447 bookmarks and the working copy; each line will then include the bookmark
445 448 name, or '.' for the working copy, as well.
446 449
447 450 If `name` starts with `re:`, the remainder of the name is treated as
448 451 a regular expression. To match a name that actually starts with `re:`,
449 452 use the prefix `literal:`.
450 453
451 454 By default hg journal only shows the commit hash and the command that was
452 455 running at that time. -v/--verbose will show the prior hash, the user, and
453 456 the time at which it happened.
454 457
455 458 Use -c/--commits to output log information on each commit hash; at this
456 459 point you can use the usual `--patch`, `--git`, `--stat` and `--template`
457 460 switches to alter the log output for these.
458 461
459 462 `hg journal -T json` can be used to produce machine readable output.
460 463
461 464 """
462 465 opts = pycompat.byteskwargs(opts)
463 466 name = '.'
464 467 if opts.get('all'):
465 468 if args:
466 469 raise error.Abort(
467 470 _("You can't combine --all and filtering on a name"))
468 471 name = None
469 472 if args:
470 473 name = args[0]
471 474
472 475 fm = ui.formatter('journal', opts)
473 476
474 477 if opts.get("template") != "json":
475 478 if name is None:
476 479 displayname = _('the working copy and bookmarks')
477 480 else:
478 481 displayname = "'%s'" % name
479 482 ui.status(_("previous locations of %s:\n") % displayname)
480 483
481 484 limit = logcmdutil.getlimit(opts)
482 485 entry = None
483 486 ui.pager('journal')
484 487 for count, entry in enumerate(repo.journal.filtered(name=name)):
485 488 if count == limit:
486 489 break
487 490 newhashesstr = fm.formatlist(map(fm.hexfunc, entry.newhashes),
488 491 name='node', sep=',')
489 492 oldhashesstr = fm.formatlist(map(fm.hexfunc, entry.oldhashes),
490 493 name='node', sep=',')
491 494
492 495 fm.startitem()
493 496 fm.condwrite(ui.verbose, 'oldhashes', '%s -> ', oldhashesstr)
494 497 fm.write('newhashes', '%s', newhashesstr)
495 498 fm.condwrite(ui.verbose, 'user', ' %-8s', entry.user)
496 499 fm.condwrite(
497 500 opts.get('all') or name.startswith('re:'),
498 501 'name', ' %-8s', entry.name)
499 502
500 503 timestring = fm.formatdate(entry.timestamp, '%Y-%m-%d %H:%M %1%2')
501 504 fm.condwrite(ui.verbose, 'date', ' %s', timestring)
502 505 fm.write('command', ' %s\n', entry.command)
503 506
504 507 if opts.get("commits"):
505 508 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
506 509 for hash in entry.newhashes:
507 510 try:
508 511 ctx = repo[hash]
509 512 displayer.show(ctx)
510 513 except error.RepoLookupError as e:
511 514 fm.write('repolookuperror', "%s\n\n", pycompat.bytestr(e))
512 515 displayer.close()
513 516
514 517 fm.end()
515 518
516 519 if entry is None:
517 520 ui.status(_("no recorded locations\n"))
General Comments 0
You need to be logged in to leave comments. Login now