##// END OF EJS Templates
journal: execute setup procedures for already instantiated dirstate...
FUJIWARA Katsunori -
r33383:774beab9 default
parent child Browse files
Show More
@@ -1,503 +1,513 b''
1 1 # journal.py
2 2 #
3 3 # Copyright 2014-2016 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 """track previous positions of bookmarks (EXPERIMENTAL)
8 8
9 9 This extension adds a new command: `hg journal`, which shows you where
10 10 bookmarks were previously located.
11 11
12 12 """
13 13
14 14 from __future__ import absolute_import
15 15
16 16 import collections
17 17 import errno
18 18 import os
19 19 import weakref
20 20
21 21 from mercurial.i18n import _
22 22
23 23 from mercurial import (
24 24 bookmarks,
25 25 cmdutil,
26 26 dispatch,
27 27 error,
28 28 extensions,
29 29 hg,
30 30 localrepo,
31 31 lock,
32 32 node,
33 33 registrar,
34 34 util,
35 35 )
36 36
37 37 from . import share
38 38
39 39 cmdtable = {}
40 40 command = registrar.command(cmdtable)
41 41
42 42 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
43 43 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
44 44 # be specifying the version(s) of Mercurial they are tested with, or
45 45 # leave the attribute unspecified.
46 46 testedwith = 'ships-with-hg-core'
47 47
48 48 # storage format version; increment when the format changes
49 49 storageversion = 0
50 50
51 51 # namespaces
52 52 bookmarktype = 'bookmark'
53 53 wdirparenttype = 'wdirparent'
54 54 # In a shared repository, what shared feature name is used
55 55 # to indicate this namespace is shared with the source?
56 56 sharednamespaces = {
57 57 bookmarktype: hg.sharedbookmarks,
58 58 }
59 59
60 60 # Journal recording, register hooks and storage object
61 61 def extsetup(ui):
62 62 extensions.wrapfunction(dispatch, 'runcommand', runcommand)
63 63 extensions.wrapfunction(bookmarks.bmstore, '_write', recordbookmarks)
64 64 extensions.wrapfunction(
65 65 localrepo.localrepository.dirstate, 'func', wrapdirstate)
66 66 extensions.wrapfunction(hg, 'postshare', wrappostshare)
67 67 extensions.wrapfunction(hg, 'copystore', unsharejournal)
68 68
69 69 def reposetup(ui, repo):
70 70 if repo.local():
71 71 repo.journal = journalstorage(repo)
72 72
73 dirstate, cached = localrepo.isfilecached(repo, 'dirstate')
74 if cached:
75 # already instantiated dirstate isn't yet marked as
76 # "journal"-ing, even though repo.dirstate() was already
77 # wrapped by own wrapdirstate()
78 _setupdirstate(repo, dirstate)
79
73 80 def runcommand(orig, lui, repo, cmd, fullargs, *args):
74 81 """Track the command line options for recording in the journal"""
75 82 journalstorage.recordcommand(*fullargs)
76 83 return orig(lui, repo, cmd, fullargs, *args)
77 84
85 def _setupdirstate(repo, dirstate):
86 dirstate.journalstorage = repo.journal
87 dirstate.addparentchangecallback('journal', recorddirstateparents)
88
78 89 # hooks to record dirstate changes
79 90 def wrapdirstate(orig, repo):
80 91 """Make journal storage available to the dirstate object"""
81 92 dirstate = orig(repo)
82 93 if util.safehasattr(repo, 'journal'):
83 dirstate.journalstorage = repo.journal
84 dirstate.addparentchangecallback('journal', recorddirstateparents)
94 _setupdirstate(repo, dirstate)
85 95 return dirstate
86 96
87 97 def recorddirstateparents(dirstate, old, new):
88 98 """Records all dirstate parent changes in the journal."""
89 99 old = list(old)
90 100 new = list(new)
91 101 if util.safehasattr(dirstate, 'journalstorage'):
92 102 # only record two hashes if there was a merge
93 103 oldhashes = old[:1] if old[1] == node.nullid else old
94 104 newhashes = new[:1] if new[1] == node.nullid else new
95 105 dirstate.journalstorage.record(
96 106 wdirparenttype, '.', oldhashes, newhashes)
97 107
98 108 # hooks to record bookmark changes (both local and remote)
99 109 def recordbookmarks(orig, store, fp):
100 110 """Records all bookmark changes in the journal."""
101 111 repo = store._repo
102 112 if util.safehasattr(repo, 'journal'):
103 113 oldmarks = bookmarks.bmstore(repo)
104 114 for mark, value in store.iteritems():
105 115 oldvalue = oldmarks.get(mark, node.nullid)
106 116 if value != oldvalue:
107 117 repo.journal.record(bookmarktype, mark, oldvalue, value)
108 118 return orig(store, fp)
109 119
110 120 # shared repository support
111 121 def _readsharedfeatures(repo):
112 122 """A set of shared features for this repository"""
113 123 try:
114 124 return set(repo.vfs.read('shared').splitlines())
115 125 except IOError as inst:
116 126 if inst.errno != errno.ENOENT:
117 127 raise
118 128 return set()
119 129
120 130 def _mergeentriesiter(*iterables, **kwargs):
121 131 """Given a set of sorted iterables, yield the next entry in merged order
122 132
123 133 Note that by default entries go from most recent to oldest.
124 134 """
125 135 order = kwargs.pop('order', max)
126 136 iterables = [iter(it) for it in iterables]
127 137 # this tracks still active iterables; iterables are deleted as they are
128 138 # exhausted, which is why this is a dictionary and why each entry also
129 139 # stores the key. Entries are mutable so we can store the next value each
130 140 # time.
131 141 iterable_map = {}
132 142 for key, it in enumerate(iterables):
133 143 try:
134 144 iterable_map[key] = [next(it), key, it]
135 145 except StopIteration:
136 146 # empty entry, can be ignored
137 147 pass
138 148
139 149 while iterable_map:
140 150 value, key, it = order(iterable_map.itervalues())
141 151 yield value
142 152 try:
143 153 iterable_map[key][0] = next(it)
144 154 except StopIteration:
145 155 # this iterable is empty, remove it from consideration
146 156 del iterable_map[key]
147 157
148 158 def wrappostshare(orig, sourcerepo, destrepo, **kwargs):
149 159 """Mark this shared working copy as sharing journal information"""
150 160 with destrepo.wlock():
151 161 orig(sourcerepo, destrepo, **kwargs)
152 162 with destrepo.vfs('shared', 'a') as fp:
153 163 fp.write('journal\n')
154 164
155 165 def unsharejournal(orig, ui, repo, repopath):
156 166 """Copy shared journal entries into this repo when unsharing"""
157 167 if (repo.path == repopath and repo.shared() and
158 168 util.safehasattr(repo, 'journal')):
159 169 sharedrepo = share._getsrcrepo(repo)
160 170 sharedfeatures = _readsharedfeatures(repo)
161 171 if sharedrepo and sharedfeatures > {'journal'}:
162 172 # there is a shared repository and there are shared journal entries
163 173 # to copy. move shared date over from source to destination but
164 174 # move the local file first
165 175 if repo.vfs.exists('namejournal'):
166 176 journalpath = repo.vfs.join('namejournal')
167 177 util.rename(journalpath, journalpath + '.bak')
168 178 storage = repo.journal
169 179 local = storage._open(
170 180 repo.vfs, filename='namejournal.bak', _newestfirst=False)
171 181 shared = (
172 182 e for e in storage._open(sharedrepo.vfs, _newestfirst=False)
173 183 if sharednamespaces.get(e.namespace) in sharedfeatures)
174 184 for entry in _mergeentriesiter(local, shared, order=min):
175 185 storage._write(repo.vfs, entry)
176 186
177 187 return orig(ui, repo, repopath)
178 188
179 189 class journalentry(collections.namedtuple(
180 190 u'journalentry',
181 191 u'timestamp user command namespace name oldhashes newhashes')):
182 192 """Individual journal entry
183 193
184 194 * timestamp: a mercurial (time, timezone) tuple
185 195 * user: the username that ran the command
186 196 * namespace: the entry namespace, an opaque string
187 197 * name: the name of the changed item, opaque string with meaning in the
188 198 namespace
189 199 * command: the hg command that triggered this record
190 200 * oldhashes: a tuple of one or more binary hashes for the old location
191 201 * newhashes: a tuple of one or more binary hashes for the new location
192 202
193 203 Handles serialisation from and to the storage format. Fields are
194 204 separated by newlines, hashes are written out in hex separated by commas,
195 205 timestamp and timezone are separated by a space.
196 206
197 207 """
198 208 @classmethod
199 209 def fromstorage(cls, line):
200 210 (time, user, command, namespace, name,
201 211 oldhashes, newhashes) = line.split('\n')
202 212 timestamp, tz = time.split()
203 213 timestamp, tz = float(timestamp), int(tz)
204 214 oldhashes = tuple(node.bin(hash) for hash in oldhashes.split(','))
205 215 newhashes = tuple(node.bin(hash) for hash in newhashes.split(','))
206 216 return cls(
207 217 (timestamp, tz), user, command, namespace, name,
208 218 oldhashes, newhashes)
209 219
210 220 def __str__(self):
211 221 """String representation for storage"""
212 222 time = ' '.join(map(str, self.timestamp))
213 223 oldhashes = ','.join([node.hex(hash) for hash in self.oldhashes])
214 224 newhashes = ','.join([node.hex(hash) for hash in self.newhashes])
215 225 return '\n'.join((
216 226 time, self.user, self.command, self.namespace, self.name,
217 227 oldhashes, newhashes))
218 228
219 229 class journalstorage(object):
220 230 """Storage for journal entries
221 231
222 232 Entries are divided over two files; one with entries that pertain to the
223 233 local working copy *only*, and one with entries that are shared across
224 234 multiple working copies when shared using the share extension.
225 235
226 236 Entries are stored with NUL bytes as separators. See the journalentry
227 237 class for the per-entry structure.
228 238
229 239 The file format starts with an integer version, delimited by a NUL.
230 240
231 241 This storage uses a dedicated lock; this makes it easier to avoid issues
232 242 with adding entries that added when the regular wlock is unlocked (e.g.
233 243 the dirstate).
234 244
235 245 """
236 246 _currentcommand = ()
237 247 _lockref = None
238 248
239 249 def __init__(self, repo):
240 250 self.user = util.getuser()
241 251 self.ui = repo.ui
242 252 self.vfs = repo.vfs
243 253
244 254 # is this working copy using a shared storage?
245 255 self.sharedfeatures = self.sharedvfs = None
246 256 if repo.shared():
247 257 features = _readsharedfeatures(repo)
248 258 sharedrepo = share._getsrcrepo(repo)
249 259 if sharedrepo is not None and 'journal' in features:
250 260 self.sharedvfs = sharedrepo.vfs
251 261 self.sharedfeatures = features
252 262
253 263 # track the current command for recording in journal entries
254 264 @property
255 265 def command(self):
256 266 commandstr = ' '.join(
257 267 map(util.shellquote, journalstorage._currentcommand))
258 268 if '\n' in commandstr:
259 269 # truncate multi-line commands
260 270 commandstr = commandstr.partition('\n')[0] + ' ...'
261 271 return commandstr
262 272
263 273 @classmethod
264 274 def recordcommand(cls, *fullargs):
265 275 """Set the current hg arguments, stored with recorded entries"""
266 276 # Set the current command on the class because we may have started
267 277 # with a non-local repo (cloning for example).
268 278 cls._currentcommand = fullargs
269 279
270 280 def _currentlock(self, lockref):
271 281 """Returns the lock if it's held, or None if it's not.
272 282
273 283 (This is copied from the localrepo class)
274 284 """
275 285 if lockref is None:
276 286 return None
277 287 l = lockref()
278 288 if l is None or not l.held:
279 289 return None
280 290 return l
281 291
282 292 def jlock(self, vfs):
283 293 """Create a lock for the journal file"""
284 294 if self._currentlock(self._lockref) is not None:
285 295 raise error.Abort(_('journal lock does not support nesting'))
286 296 desc = _('journal of %s') % vfs.base
287 297 try:
288 298 l = lock.lock(vfs, 'namejournal.lock', 0, desc=desc)
289 299 except error.LockHeld as inst:
290 300 self.ui.warn(
291 301 _("waiting for lock on %s held by %r\n") % (desc, inst.locker))
292 302 # default to 600 seconds timeout
293 303 l = lock.lock(
294 304 vfs, 'namejournal.lock',
295 305 int(self.ui.config("ui", "timeout", "600")), desc=desc)
296 306 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
297 307 self._lockref = weakref.ref(l)
298 308 return l
299 309
300 310 def record(self, namespace, name, oldhashes, newhashes):
301 311 """Record a new journal entry
302 312
303 313 * namespace: an opaque string; this can be used to filter on the type
304 314 of recorded entries.
305 315 * name: the name defining this entry; for bookmarks, this is the
306 316 bookmark name. Can be filtered on when retrieving entries.
307 317 * oldhashes and newhashes: each a single binary hash, or a list of
308 318 binary hashes. These represent the old and new position of the named
309 319 item.
310 320
311 321 """
312 322 if not isinstance(oldhashes, list):
313 323 oldhashes = [oldhashes]
314 324 if not isinstance(newhashes, list):
315 325 newhashes = [newhashes]
316 326
317 327 entry = journalentry(
318 328 util.makedate(), self.user, self.command, namespace, name,
319 329 oldhashes, newhashes)
320 330
321 331 vfs = self.vfs
322 332 if self.sharedvfs is not None:
323 333 # write to the shared repository if this feature is being
324 334 # shared between working copies.
325 335 if sharednamespaces.get(namespace) in self.sharedfeatures:
326 336 vfs = self.sharedvfs
327 337
328 338 self._write(vfs, entry)
329 339
330 340 def _write(self, vfs, entry):
331 341 with self.jlock(vfs):
332 342 version = None
333 343 # open file in amend mode to ensure it is created if missing
334 344 with vfs('namejournal', mode='a+b', atomictemp=True) as f:
335 345 f.seek(0, os.SEEK_SET)
336 346 # Read just enough bytes to get a version number (up to 2
337 347 # digits plus separator)
338 348 version = f.read(3).partition('\0')[0]
339 349 if version and version != str(storageversion):
340 350 # different version of the storage. Exit early (and not
341 351 # write anything) if this is not a version we can handle or
342 352 # the file is corrupt. In future, perhaps rotate the file
343 353 # instead?
344 354 self.ui.warn(
345 355 _("unsupported journal file version '%s'\n") % version)
346 356 return
347 357 if not version:
348 358 # empty file, write version first
349 359 f.write(str(storageversion) + '\0')
350 360 f.seek(0, os.SEEK_END)
351 361 f.write(str(entry) + '\0')
352 362
353 363 def filtered(self, namespace=None, name=None):
354 364 """Yield all journal entries with the given namespace or name
355 365
356 366 Both the namespace and the name are optional; if neither is given all
357 367 entries in the journal are produced.
358 368
359 369 Matching supports regular expressions by using the `re:` prefix
360 370 (use `literal:` to match names or namespaces that start with `re:`)
361 371
362 372 """
363 373 if namespace is not None:
364 374 namespace = util.stringmatcher(namespace)[-1]
365 375 if name is not None:
366 376 name = util.stringmatcher(name)[-1]
367 377 for entry in self:
368 378 if namespace is not None and not namespace(entry.namespace):
369 379 continue
370 380 if name is not None and not name(entry.name):
371 381 continue
372 382 yield entry
373 383
374 384 def __iter__(self):
375 385 """Iterate over the storage
376 386
377 387 Yields journalentry instances for each contained journal record.
378 388
379 389 """
380 390 local = self._open(self.vfs)
381 391
382 392 if self.sharedvfs is None:
383 393 return local
384 394
385 395 # iterate over both local and shared entries, but only those
386 396 # shared entries that are among the currently shared features
387 397 shared = (
388 398 e for e in self._open(self.sharedvfs)
389 399 if sharednamespaces.get(e.namespace) in self.sharedfeatures)
390 400 return _mergeentriesiter(local, shared)
391 401
392 402 def _open(self, vfs, filename='namejournal', _newestfirst=True):
393 403 if not vfs.exists(filename):
394 404 return
395 405
396 406 with vfs(filename) as f:
397 407 raw = f.read()
398 408
399 409 lines = raw.split('\0')
400 410 version = lines and lines[0]
401 411 if version != str(storageversion):
402 412 version = version or _('not available')
403 413 raise error.Abort(_("unknown journal file version '%s'") % version)
404 414
405 415 # Skip the first line, it's a version number. Normally we iterate over
406 416 # these in reverse order to list newest first; only when copying across
407 417 # a shared storage do we forgo reversing.
408 418 lines = lines[1:]
409 419 if _newestfirst:
410 420 lines = reversed(lines)
411 421 for line in lines:
412 422 if not line:
413 423 continue
414 424 yield journalentry.fromstorage(line)
415 425
416 426 # journal reading
417 427 # log options that don't make sense for journal
418 428 _ignoreopts = ('no-merges', 'graph')
419 429 @command(
420 430 'journal', [
421 431 ('', 'all', None, 'show history for all names'),
422 432 ('c', 'commits', None, 'show commit metadata'),
423 433 ] + [opt for opt in cmdutil.logopts if opt[1] not in _ignoreopts],
424 434 '[OPTION]... [BOOKMARKNAME]')
425 435 def journal(ui, repo, *args, **opts):
426 436 """show the previous position of bookmarks and the working copy
427 437
428 438 The journal is used to see the previous commits that bookmarks and the
429 439 working copy pointed to. By default the previous locations for the working
430 440 copy. Passing a bookmark name will show all the previous positions of
431 441 that bookmark. Use the --all switch to show previous locations for all
432 442 bookmarks and the working copy; each line will then include the bookmark
433 443 name, or '.' for the working copy, as well.
434 444
435 445 If `name` starts with `re:`, the remainder of the name is treated as
436 446 a regular expression. To match a name that actually starts with `re:`,
437 447 use the prefix `literal:`.
438 448
439 449 By default hg journal only shows the commit hash and the command that was
440 450 running at that time. -v/--verbose will show the prior hash, the user, and
441 451 the time at which it happened.
442 452
443 453 Use -c/--commits to output log information on each commit hash; at this
444 454 point you can use the usual `--patch`, `--git`, `--stat` and `--template`
445 455 switches to alter the log output for these.
446 456
447 457 `hg journal -T json` can be used to produce machine readable output.
448 458
449 459 """
450 460 name = '.'
451 461 if opts.get('all'):
452 462 if args:
453 463 raise error.Abort(
454 464 _("You can't combine --all and filtering on a name"))
455 465 name = None
456 466 if args:
457 467 name = args[0]
458 468
459 469 fm = ui.formatter('journal', opts)
460 470
461 471 if opts.get("template") != "json":
462 472 if name is None:
463 473 displayname = _('the working copy and bookmarks')
464 474 else:
465 475 displayname = "'%s'" % name
466 476 ui.status(_("previous locations of %s:\n") % displayname)
467 477
468 478 limit = cmdutil.loglimit(opts)
469 479 entry = None
470 480 for count, entry in enumerate(repo.journal.filtered(name=name)):
471 481 if count == limit:
472 482 break
473 483 newhashesstr = fm.formatlist(map(fm.hexfunc, entry.newhashes),
474 484 name='node', sep=',')
475 485 oldhashesstr = fm.formatlist(map(fm.hexfunc, entry.oldhashes),
476 486 name='node', sep=',')
477 487
478 488 fm.startitem()
479 489 fm.condwrite(ui.verbose, 'oldhashes', '%s -> ', oldhashesstr)
480 490 fm.write('newhashes', '%s', newhashesstr)
481 491 fm.condwrite(ui.verbose, 'user', ' %-8s', entry.user)
482 492 fm.condwrite(
483 493 opts.get('all') or name.startswith('re:'),
484 494 'name', ' %-8s', entry.name)
485 495
486 496 timestring = fm.formatdate(entry.timestamp, '%Y-%m-%d %H:%M %1%2')
487 497 fm.condwrite(ui.verbose, 'date', ' %s', timestring)
488 498 fm.write('command', ' %s\n', entry.command)
489 499
490 500 if opts.get("commits"):
491 501 displayer = cmdutil.show_changeset(ui, repo, opts, buffered=False)
492 502 for hash in entry.newhashes:
493 503 try:
494 504 ctx = repo[hash]
495 505 displayer.show(ctx)
496 506 except error.RepoLookupError as e:
497 507 fm.write('repolookuperror', "%s\n\n", str(e))
498 508 displayer.close()
499 509
500 510 fm.end()
501 511
502 512 if entry is None:
503 513 ui.status(_("no recorded locations\n"))
General Comments 0
You need to be logged in to leave comments. Login now