##// END OF EJS Templates
journal: use the dirstate parentchange callbacks...
Mateusz Kwapich -
r29773:f2241c13 default
parent child Browse files
Show More
@@ -1,509 +1,491 b''
1 # journal.py
1 # journal.py
2 #
2 #
3 # Copyright 2014-2016 Facebook, Inc.
3 # Copyright 2014-2016 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 """Track previous positions of bookmarks (EXPERIMENTAL)
7 """Track previous positions of bookmarks (EXPERIMENTAL)
8
8
9 This extension adds a new command: `hg journal`, which shows you where
9 This extension adds a new command: `hg journal`, which shows you where
10 bookmarks were previously located.
10 bookmarks were previously located.
11
11
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import collections
16 import collections
17 import errno
17 import errno
18 import os
18 import os
19 import weakref
19 import weakref
20
20
21 from mercurial.i18n import _
21 from mercurial.i18n import _
22
22
23 from mercurial import (
23 from mercurial import (
24 bookmarks,
24 bookmarks,
25 cmdutil,
25 cmdutil,
26 commands,
26 commands,
27 dirstate,
28 dispatch,
27 dispatch,
29 error,
28 error,
30 extensions,
29 extensions,
31 hg,
30 hg,
32 localrepo,
31 localrepo,
33 lock,
32 lock,
34 node,
33 node,
35 util,
34 util,
36 )
35 )
37
36
38 from . import share
37 from . import share
39
38
40 cmdtable = {}
39 cmdtable = {}
41 command = cmdutil.command(cmdtable)
40 command = cmdutil.command(cmdtable)
42
41
43 # Note for extension authors: ONLY specify testedwith = 'internal' for
42 # Note for extension authors: ONLY specify testedwith = 'internal' for
44 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
43 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
45 # be specifying the version(s) of Mercurial they are tested with, or
44 # be specifying the version(s) of Mercurial they are tested with, or
46 # leave the attribute unspecified.
45 # leave the attribute unspecified.
47 testedwith = 'internal'
46 testedwith = 'internal'
48
47
49 # storage format version; increment when the format changes
48 # storage format version; increment when the format changes
50 storageversion = 0
49 storageversion = 0
51
50
52 # namespaces
51 # namespaces
53 bookmarktype = 'bookmark'
52 bookmarktype = 'bookmark'
54 wdirparenttype = 'wdirparent'
53 wdirparenttype = 'wdirparent'
55 # In a shared repository, what shared feature name is used
54 # In a shared repository, what shared feature name is used
56 # to indicate this namespace is shared with the source?
55 # to indicate this namespace is shared with the source?
57 sharednamespaces = {
56 sharednamespaces = {
58 bookmarktype: hg.sharedbookmarks,
57 bookmarktype: hg.sharedbookmarks,
59 }
58 }
60
59
61 # Journal recording, register hooks and storage object
60 # Journal recording, register hooks and storage object
62 def extsetup(ui):
61 def extsetup(ui):
63 extensions.wrapfunction(dispatch, 'runcommand', runcommand)
62 extensions.wrapfunction(dispatch, 'runcommand', runcommand)
64 extensions.wrapfunction(bookmarks.bmstore, '_write', recordbookmarks)
63 extensions.wrapfunction(bookmarks.bmstore, '_write', recordbookmarks)
65 extensions.wrapfunction(
64 extensions.wrapfunction(
66 dirstate.dirstate, '_writedirstate', recorddirstateparents)
67 extensions.wrapfunction(
68 localrepo.localrepository.dirstate, 'func', wrapdirstate)
65 localrepo.localrepository.dirstate, 'func', wrapdirstate)
69 extensions.wrapfunction(hg, 'postshare', wrappostshare)
66 extensions.wrapfunction(hg, 'postshare', wrappostshare)
70 extensions.wrapfunction(hg, 'copystore', unsharejournal)
67 extensions.wrapfunction(hg, 'copystore', unsharejournal)
71
68
72 def reposetup(ui, repo):
69 def reposetup(ui, repo):
73 if repo.local():
70 if repo.local():
74 repo.journal = journalstorage(repo)
71 repo.journal = journalstorage(repo)
75
72
76 def runcommand(orig, lui, repo, cmd, fullargs, *args):
73 def runcommand(orig, lui, repo, cmd, fullargs, *args):
77 """Track the command line options for recording in the journal"""
74 """Track the command line options for recording in the journal"""
78 journalstorage.recordcommand(*fullargs)
75 journalstorage.recordcommand(*fullargs)
79 return orig(lui, repo, cmd, fullargs, *args)
76 return orig(lui, repo, cmd, fullargs, *args)
80
77
81 # hooks to record dirstate changes
78 # hooks to record dirstate changes
82 def wrapdirstate(orig, repo):
79 def wrapdirstate(orig, repo):
83 """Make journal storage available to the dirstate object"""
80 """Make journal storage available to the dirstate object"""
84 dirstate = orig(repo)
81 dirstate = orig(repo)
85 if util.safehasattr(repo, 'journal'):
82 if util.safehasattr(repo, 'journal'):
86 dirstate.journalstorage = repo.journal
83 dirstate.journalstorage = repo.journal
84 dirstate.addparentchangecallback('journal', recorddirstateparents)
87 return dirstate
85 return dirstate
88
86
89 def recorddirstateparents(orig, dirstate, dirstatefp):
87 def recorddirstateparents(dirstate, old, new):
90 """Records all dirstate parent changes in the journal."""
88 """Records all dirstate parent changes in the journal."""
89 old = list(old)
90 new = list(new)
91 if util.safehasattr(dirstate, 'journalstorage'):
91 if util.safehasattr(dirstate, 'journalstorage'):
92 old = [node.nullid, node.nullid]
92 # only record two hashes if there was a merge
93 nodesize = len(node.nullid)
93 oldhashes = old[:1] if old[1] == node.nullid else old
94 try:
94 newhashes = new[:1] if new[1] == node.nullid else new
95 # The only source for the old state is in the dirstate file still
95 dirstate.journalstorage.record(
96 # on disk; the in-memory dirstate object only contains the new
96 wdirparenttype, '.', oldhashes, newhashes)
97 # state. dirstate._opendirstatefile() switches beteen .hg/dirstate
98 # and .hg/dirstate.pending depending on the transaction state.
99 with dirstate._opendirstatefile() as fp:
100 state = fp.read(2 * nodesize)
101 if len(state) == 2 * nodesize:
102 old = [state[:nodesize], state[nodesize:]]
103 except IOError:
104 pass
105
106 new = dirstate.parents()
107 if old != new:
108 # only record two hashes if there was a merge
109 oldhashes = old[:1] if old[1] == node.nullid else old
110 newhashes = new[:1] if new[1] == node.nullid else new
111 dirstate.journalstorage.record(
112 wdirparenttype, '.', oldhashes, newhashes)
113
114 return orig(dirstate, dirstatefp)
115
97
116 # hooks to record bookmark changes (both local and remote)
98 # hooks to record bookmark changes (both local and remote)
117 def recordbookmarks(orig, store, fp):
99 def recordbookmarks(orig, store, fp):
118 """Records all bookmark changes in the journal."""
100 """Records all bookmark changes in the journal."""
119 repo = store._repo
101 repo = store._repo
120 if util.safehasattr(repo, 'journal'):
102 if util.safehasattr(repo, 'journal'):
121 oldmarks = bookmarks.bmstore(repo)
103 oldmarks = bookmarks.bmstore(repo)
122 for mark, value in store.iteritems():
104 for mark, value in store.iteritems():
123 oldvalue = oldmarks.get(mark, node.nullid)
105 oldvalue = oldmarks.get(mark, node.nullid)
124 if value != oldvalue:
106 if value != oldvalue:
125 repo.journal.record(bookmarktype, mark, oldvalue, value)
107 repo.journal.record(bookmarktype, mark, oldvalue, value)
126 return orig(store, fp)
108 return orig(store, fp)
127
109
128 # shared repository support
110 # shared repository support
129 def _readsharedfeatures(repo):
111 def _readsharedfeatures(repo):
130 """A set of shared features for this repository"""
112 """A set of shared features for this repository"""
131 try:
113 try:
132 return set(repo.vfs.read('shared').splitlines())
114 return set(repo.vfs.read('shared').splitlines())
133 except IOError as inst:
115 except IOError as inst:
134 if inst.errno != errno.ENOENT:
116 if inst.errno != errno.ENOENT:
135 raise
117 raise
136 return set()
118 return set()
137
119
138 def _mergeentriesiter(*iterables, **kwargs):
120 def _mergeentriesiter(*iterables, **kwargs):
139 """Given a set of sorted iterables, yield the next entry in merged order
121 """Given a set of sorted iterables, yield the next entry in merged order
140
122
141 Note that by default entries go from most recent to oldest.
123 Note that by default entries go from most recent to oldest.
142 """
124 """
143 order = kwargs.pop('order', max)
125 order = kwargs.pop('order', max)
144 iterables = [iter(it) for it in iterables]
126 iterables = [iter(it) for it in iterables]
145 # this tracks still active iterables; iterables are deleted as they are
127 # this tracks still active iterables; iterables are deleted as they are
146 # exhausted, which is why this is a dictionary and why each entry also
128 # exhausted, which is why this is a dictionary and why each entry also
147 # stores the key. Entries are mutable so we can store the next value each
129 # stores the key. Entries are mutable so we can store the next value each
148 # time.
130 # time.
149 iterable_map = {}
131 iterable_map = {}
150 for key, it in enumerate(iterables):
132 for key, it in enumerate(iterables):
151 try:
133 try:
152 iterable_map[key] = [next(it), key, it]
134 iterable_map[key] = [next(it), key, it]
153 except StopIteration:
135 except StopIteration:
154 # empty entry, can be ignored
136 # empty entry, can be ignored
155 pass
137 pass
156
138
157 while iterable_map:
139 while iterable_map:
158 value, key, it = order(iterable_map.itervalues())
140 value, key, it = order(iterable_map.itervalues())
159 yield value
141 yield value
160 try:
142 try:
161 iterable_map[key][0] = next(it)
143 iterable_map[key][0] = next(it)
162 except StopIteration:
144 except StopIteration:
163 # this iterable is empty, remove it from consideration
145 # this iterable is empty, remove it from consideration
164 del iterable_map[key]
146 del iterable_map[key]
165
147
166 def wrappostshare(orig, sourcerepo, destrepo, **kwargs):
148 def wrappostshare(orig, sourcerepo, destrepo, **kwargs):
167 """Mark this shared working copy as sharing journal information"""
149 """Mark this shared working copy as sharing journal information"""
168 with destrepo.wlock():
150 with destrepo.wlock():
169 orig(sourcerepo, destrepo, **kwargs)
151 orig(sourcerepo, destrepo, **kwargs)
170 with destrepo.vfs('shared', 'a') as fp:
152 with destrepo.vfs('shared', 'a') as fp:
171 fp.write('journal\n')
153 fp.write('journal\n')
172
154
173 def unsharejournal(orig, ui, repo, repopath):
155 def unsharejournal(orig, ui, repo, repopath):
174 """Copy shared journal entries into this repo when unsharing"""
156 """Copy shared journal entries into this repo when unsharing"""
175 if (repo.path == repopath and repo.shared() and
157 if (repo.path == repopath and repo.shared() and
176 util.safehasattr(repo, 'journal')):
158 util.safehasattr(repo, 'journal')):
177 sharedrepo = share._getsrcrepo(repo)
159 sharedrepo = share._getsrcrepo(repo)
178 sharedfeatures = _readsharedfeatures(repo)
160 sharedfeatures = _readsharedfeatures(repo)
179 if sharedrepo and sharedfeatures > set(['journal']):
161 if sharedrepo and sharedfeatures > set(['journal']):
180 # there is a shared repository and there are shared journal entries
162 # there is a shared repository and there are shared journal entries
181 # to copy. move shared date over from source to destination but
163 # to copy. move shared date over from source to destination but
182 # move the local file first
164 # move the local file first
183 if repo.vfs.exists('journal'):
165 if repo.vfs.exists('journal'):
184 journalpath = repo.join('journal')
166 journalpath = repo.join('journal')
185 util.rename(journalpath, journalpath + '.bak')
167 util.rename(journalpath, journalpath + '.bak')
186 storage = repo.journal
168 storage = repo.journal
187 local = storage._open(
169 local = storage._open(
188 repo.vfs, filename='journal.bak', _newestfirst=False)
170 repo.vfs, filename='journal.bak', _newestfirst=False)
189 shared = (
171 shared = (
190 e for e in storage._open(sharedrepo.vfs, _newestfirst=False)
172 e for e in storage._open(sharedrepo.vfs, _newestfirst=False)
191 if sharednamespaces.get(e.namespace) in sharedfeatures)
173 if sharednamespaces.get(e.namespace) in sharedfeatures)
192 for entry in _mergeentriesiter(local, shared, order=min):
174 for entry in _mergeentriesiter(local, shared, order=min):
193 storage._write(repo.vfs, entry)
175 storage._write(repo.vfs, entry)
194
176
195 return orig(ui, repo, repopath)
177 return orig(ui, repo, repopath)
196
178
197 class journalentry(collections.namedtuple(
179 class journalentry(collections.namedtuple(
198 'journalentry',
180 'journalentry',
199 'timestamp user command namespace name oldhashes newhashes')):
181 'timestamp user command namespace name oldhashes newhashes')):
200 """Individual journal entry
182 """Individual journal entry
201
183
202 * timestamp: a mercurial (time, timezone) tuple
184 * timestamp: a mercurial (time, timezone) tuple
203 * user: the username that ran the command
185 * user: the username that ran the command
204 * namespace: the entry namespace, an opaque string
186 * namespace: the entry namespace, an opaque string
205 * name: the name of the changed item, opaque string with meaning in the
187 * name: the name of the changed item, opaque string with meaning in the
206 namespace
188 namespace
207 * command: the hg command that triggered this record
189 * command: the hg command that triggered this record
208 * oldhashes: a tuple of one or more binary hashes for the old location
190 * oldhashes: a tuple of one or more binary hashes for the old location
209 * newhashes: a tuple of one or more binary hashes for the new location
191 * newhashes: a tuple of one or more binary hashes for the new location
210
192
211 Handles serialisation from and to the storage format. Fields are
193 Handles serialisation from and to the storage format. Fields are
212 separated by newlines, hashes are written out in hex separated by commas,
194 separated by newlines, hashes are written out in hex separated by commas,
213 timestamp and timezone are separated by a space.
195 timestamp and timezone are separated by a space.
214
196
215 """
197 """
216 @classmethod
198 @classmethod
217 def fromstorage(cls, line):
199 def fromstorage(cls, line):
218 (time, user, command, namespace, name,
200 (time, user, command, namespace, name,
219 oldhashes, newhashes) = line.split('\n')
201 oldhashes, newhashes) = line.split('\n')
220 timestamp, tz = time.split()
202 timestamp, tz = time.split()
221 timestamp, tz = float(timestamp), int(tz)
203 timestamp, tz = float(timestamp), int(tz)
222 oldhashes = tuple(node.bin(hash) for hash in oldhashes.split(','))
204 oldhashes = tuple(node.bin(hash) for hash in oldhashes.split(','))
223 newhashes = tuple(node.bin(hash) for hash in newhashes.split(','))
205 newhashes = tuple(node.bin(hash) for hash in newhashes.split(','))
224 return cls(
206 return cls(
225 (timestamp, tz), user, command, namespace, name,
207 (timestamp, tz), user, command, namespace, name,
226 oldhashes, newhashes)
208 oldhashes, newhashes)
227
209
228 def __str__(self):
210 def __str__(self):
229 """String representation for storage"""
211 """String representation for storage"""
230 time = ' '.join(map(str, self.timestamp))
212 time = ' '.join(map(str, self.timestamp))
231 oldhashes = ','.join([node.hex(hash) for hash in self.oldhashes])
213 oldhashes = ','.join([node.hex(hash) for hash in self.oldhashes])
232 newhashes = ','.join([node.hex(hash) for hash in self.newhashes])
214 newhashes = ','.join([node.hex(hash) for hash in self.newhashes])
233 return '\n'.join((
215 return '\n'.join((
234 time, self.user, self.command, self.namespace, self.name,
216 time, self.user, self.command, self.namespace, self.name,
235 oldhashes, newhashes))
217 oldhashes, newhashes))
236
218
237 class journalstorage(object):
219 class journalstorage(object):
238 """Storage for journal entries
220 """Storage for journal entries
239
221
240 Entries are divided over two files; one with entries that pertain to the
222 Entries are divided over two files; one with entries that pertain to the
241 local working copy *only*, and one with entries that are shared across
223 local working copy *only*, and one with entries that are shared across
242 multiple working copies when shared using the share extension.
224 multiple working copies when shared using the share extension.
243
225
244 Entries are stored with NUL bytes as separators. See the journalentry
226 Entries are stored with NUL bytes as separators. See the journalentry
245 class for the per-entry structure.
227 class for the per-entry structure.
246
228
247 The file format starts with an integer version, delimited by a NUL.
229 The file format starts with an integer version, delimited by a NUL.
248
230
249 This storage uses a dedicated lock; this makes it easier to avoid issues
231 This storage uses a dedicated lock; this makes it easier to avoid issues
250 with adding entries that added when the regular wlock is unlocked (e.g.
232 with adding entries that added when the regular wlock is unlocked (e.g.
251 the dirstate).
233 the dirstate).
252
234
253 """
235 """
254 _currentcommand = ()
236 _currentcommand = ()
255 _lockref = None
237 _lockref = None
256
238
257 def __init__(self, repo):
239 def __init__(self, repo):
258 self.user = util.getuser()
240 self.user = util.getuser()
259 self.ui = repo.ui
241 self.ui = repo.ui
260 self.vfs = repo.vfs
242 self.vfs = repo.vfs
261
243
262 # is this working copy using a shared storage?
244 # is this working copy using a shared storage?
263 self.sharedfeatures = self.sharedvfs = None
245 self.sharedfeatures = self.sharedvfs = None
264 if repo.shared():
246 if repo.shared():
265 features = _readsharedfeatures(repo)
247 features = _readsharedfeatures(repo)
266 sharedrepo = share._getsrcrepo(repo)
248 sharedrepo = share._getsrcrepo(repo)
267 if sharedrepo is not None and 'journal' in features:
249 if sharedrepo is not None and 'journal' in features:
268 self.sharedvfs = sharedrepo.vfs
250 self.sharedvfs = sharedrepo.vfs
269 self.sharedfeatures = features
251 self.sharedfeatures = features
270
252
271 # track the current command for recording in journal entries
253 # track the current command for recording in journal entries
272 @property
254 @property
273 def command(self):
255 def command(self):
274 commandstr = ' '.join(
256 commandstr = ' '.join(
275 map(util.shellquote, journalstorage._currentcommand))
257 map(util.shellquote, journalstorage._currentcommand))
276 if '\n' in commandstr:
258 if '\n' in commandstr:
277 # truncate multi-line commands
259 # truncate multi-line commands
278 commandstr = commandstr.partition('\n')[0] + ' ...'
260 commandstr = commandstr.partition('\n')[0] + ' ...'
279 return commandstr
261 return commandstr
280
262
281 @classmethod
263 @classmethod
282 def recordcommand(cls, *fullargs):
264 def recordcommand(cls, *fullargs):
283 """Set the current hg arguments, stored with recorded entries"""
265 """Set the current hg arguments, stored with recorded entries"""
284 # Set the current command on the class because we may have started
266 # Set the current command on the class because we may have started
285 # with a non-local repo (cloning for example).
267 # with a non-local repo (cloning for example).
286 cls._currentcommand = fullargs
268 cls._currentcommand = fullargs
287
269
288 def jlock(self, vfs):
270 def jlock(self, vfs):
289 """Create a lock for the journal file"""
271 """Create a lock for the journal file"""
290 if self._lockref and self._lockref():
272 if self._lockref and self._lockref():
291 raise error.Abort(_('journal lock does not support nesting'))
273 raise error.Abort(_('journal lock does not support nesting'))
292 desc = _('journal of %s') % vfs.base
274 desc = _('journal of %s') % vfs.base
293 try:
275 try:
294 l = lock.lock(vfs, 'journal.lock', 0, desc=desc)
276 l = lock.lock(vfs, 'journal.lock', 0, desc=desc)
295 except error.LockHeld as inst:
277 except error.LockHeld as inst:
296 self.ui.warn(
278 self.ui.warn(
297 _("waiting for lock on %s held by %r\n") % (desc, inst.locker))
279 _("waiting for lock on %s held by %r\n") % (desc, inst.locker))
298 # default to 600 seconds timeout
280 # default to 600 seconds timeout
299 l = lock.lock(
281 l = lock.lock(
300 vfs, 'journal.lock',
282 vfs, 'journal.lock',
301 int(self.ui.config("ui", "timeout", "600")), desc=desc)
283 int(self.ui.config("ui", "timeout", "600")), desc=desc)
302 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
284 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
303 self._lockref = weakref.ref(l)
285 self._lockref = weakref.ref(l)
304 return l
286 return l
305
287
306 def record(self, namespace, name, oldhashes, newhashes):
288 def record(self, namespace, name, oldhashes, newhashes):
307 """Record a new journal entry
289 """Record a new journal entry
308
290
309 * namespace: an opaque string; this can be used to filter on the type
291 * namespace: an opaque string; this can be used to filter on the type
310 of recorded entries.
292 of recorded entries.
311 * name: the name defining this entry; for bookmarks, this is the
293 * name: the name defining this entry; for bookmarks, this is the
312 bookmark name. Can be filtered on when retrieving entries.
294 bookmark name. Can be filtered on when retrieving entries.
313 * oldhashes and newhashes: each a single binary hash, or a list of
295 * oldhashes and newhashes: each a single binary hash, or a list of
314 binary hashes. These represent the old and new position of the named
296 binary hashes. These represent the old and new position of the named
315 item.
297 item.
316
298
317 """
299 """
318 if not isinstance(oldhashes, list):
300 if not isinstance(oldhashes, list):
319 oldhashes = [oldhashes]
301 oldhashes = [oldhashes]
320 if not isinstance(newhashes, list):
302 if not isinstance(newhashes, list):
321 newhashes = [newhashes]
303 newhashes = [newhashes]
322
304
323 entry = journalentry(
305 entry = journalentry(
324 util.makedate(), self.user, self.command, namespace, name,
306 util.makedate(), self.user, self.command, namespace, name,
325 oldhashes, newhashes)
307 oldhashes, newhashes)
326
308
327 vfs = self.vfs
309 vfs = self.vfs
328 if self.sharedvfs is not None:
310 if self.sharedvfs is not None:
329 # write to the shared repository if this feature is being
311 # write to the shared repository if this feature is being
330 # shared between working copies.
312 # shared between working copies.
331 if sharednamespaces.get(namespace) in self.sharedfeatures:
313 if sharednamespaces.get(namespace) in self.sharedfeatures:
332 vfs = self.sharedvfs
314 vfs = self.sharedvfs
333
315
334 self._write(vfs, entry)
316 self._write(vfs, entry)
335
317
336 def _write(self, vfs, entry):
318 def _write(self, vfs, entry):
337 with self.jlock(vfs):
319 with self.jlock(vfs):
338 version = None
320 version = None
339 # open file in amend mode to ensure it is created if missing
321 # open file in amend mode to ensure it is created if missing
340 with vfs('journal', mode='a+b', atomictemp=True) as f:
322 with vfs('journal', mode='a+b', atomictemp=True) as f:
341 f.seek(0, os.SEEK_SET)
323 f.seek(0, os.SEEK_SET)
342 # Read just enough bytes to get a version number (up to 2
324 # Read just enough bytes to get a version number (up to 2
343 # digits plus separator)
325 # digits plus separator)
344 version = f.read(3).partition('\0')[0]
326 version = f.read(3).partition('\0')[0]
345 if version and version != str(storageversion):
327 if version and version != str(storageversion):
346 # different version of the storage. Exit early (and not
328 # different version of the storage. Exit early (and not
347 # write anything) if this is not a version we can handle or
329 # write anything) if this is not a version we can handle or
348 # the file is corrupt. In future, perhaps rotate the file
330 # the file is corrupt. In future, perhaps rotate the file
349 # instead?
331 # instead?
350 self.ui.warn(
332 self.ui.warn(
351 _("unsupported journal file version '%s'\n") % version)
333 _("unsupported journal file version '%s'\n") % version)
352 return
334 return
353 if not version:
335 if not version:
354 # empty file, write version first
336 # empty file, write version first
355 f.write(str(storageversion) + '\0')
337 f.write(str(storageversion) + '\0')
356 f.seek(0, os.SEEK_END)
338 f.seek(0, os.SEEK_END)
357 f.write(str(entry) + '\0')
339 f.write(str(entry) + '\0')
358
340
359 def filtered(self, namespace=None, name=None):
341 def filtered(self, namespace=None, name=None):
360 """Yield all journal entries with the given namespace or name
342 """Yield all journal entries with the given namespace or name
361
343
362 Both the namespace and the name are optional; if neither is given all
344 Both the namespace and the name are optional; if neither is given all
363 entries in the journal are produced.
345 entries in the journal are produced.
364
346
365 Matching supports regular expressions by using the `re:` prefix
347 Matching supports regular expressions by using the `re:` prefix
366 (use `literal:` to match names or namespaces that start with `re:`)
348 (use `literal:` to match names or namespaces that start with `re:`)
367
349
368 """
350 """
369 if namespace is not None:
351 if namespace is not None:
370 namespace = util.stringmatcher(namespace)[-1]
352 namespace = util.stringmatcher(namespace)[-1]
371 if name is not None:
353 if name is not None:
372 name = util.stringmatcher(name)[-1]
354 name = util.stringmatcher(name)[-1]
373 for entry in self:
355 for entry in self:
374 if namespace is not None and not namespace(entry.namespace):
356 if namespace is not None and not namespace(entry.namespace):
375 continue
357 continue
376 if name is not None and not name(entry.name):
358 if name is not None and not name(entry.name):
377 continue
359 continue
378 yield entry
360 yield entry
379
361
380 def __iter__(self):
362 def __iter__(self):
381 """Iterate over the storage
363 """Iterate over the storage
382
364
383 Yields journalentry instances for each contained journal record.
365 Yields journalentry instances for each contained journal record.
384
366
385 """
367 """
386 local = self._open(self.vfs)
368 local = self._open(self.vfs)
387
369
388 if self.sharedvfs is None:
370 if self.sharedvfs is None:
389 return local
371 return local
390
372
391 # iterate over both local and shared entries, but only those
373 # iterate over both local and shared entries, but only those
392 # shared entries that are among the currently shared features
374 # shared entries that are among the currently shared features
393 shared = (
375 shared = (
394 e for e in self._open(self.sharedvfs)
376 e for e in self._open(self.sharedvfs)
395 if sharednamespaces.get(e.namespace) in self.sharedfeatures)
377 if sharednamespaces.get(e.namespace) in self.sharedfeatures)
396 return _mergeentriesiter(local, shared)
378 return _mergeentriesiter(local, shared)
397
379
398 def _open(self, vfs, filename='journal', _newestfirst=True):
380 def _open(self, vfs, filename='journal', _newestfirst=True):
399 if not vfs.exists(filename):
381 if not vfs.exists(filename):
400 return
382 return
401
383
402 with vfs(filename) as f:
384 with vfs(filename) as f:
403 raw = f.read()
385 raw = f.read()
404
386
405 lines = raw.split('\0')
387 lines = raw.split('\0')
406 version = lines and lines[0]
388 version = lines and lines[0]
407 if version != str(storageversion):
389 if version != str(storageversion):
408 version = version or _('not available')
390 version = version or _('not available')
409 raise error.Abort(_("unknown journal file version '%s'") % version)
391 raise error.Abort(_("unknown journal file version '%s'") % version)
410
392
411 # Skip the first line, it's a version number. Normally we iterate over
393 # Skip the first line, it's a version number. Normally we iterate over
412 # these in reverse order to list newest first; only when copying across
394 # these in reverse order to list newest first; only when copying across
413 # a shared storage do we forgo reversing.
395 # a shared storage do we forgo reversing.
414 lines = lines[1:]
396 lines = lines[1:]
415 if _newestfirst:
397 if _newestfirst:
416 lines = reversed(lines)
398 lines = reversed(lines)
417 for line in lines:
399 for line in lines:
418 if not line:
400 if not line:
419 continue
401 continue
420 yield journalentry.fromstorage(line)
402 yield journalentry.fromstorage(line)
421
403
422 # journal reading
404 # journal reading
423 # log options that don't make sense for journal
405 # log options that don't make sense for journal
424 _ignoreopts = ('no-merges', 'graph')
406 _ignoreopts = ('no-merges', 'graph')
425 @command(
407 @command(
426 'journal', [
408 'journal', [
427 ('', 'all', None, 'show history for all names'),
409 ('', 'all', None, 'show history for all names'),
428 ('c', 'commits', None, 'show commit metadata'),
410 ('c', 'commits', None, 'show commit metadata'),
429 ] + [opt for opt in commands.logopts if opt[1] not in _ignoreopts],
411 ] + [opt for opt in commands.logopts if opt[1] not in _ignoreopts],
430 '[OPTION]... [BOOKMARKNAME]')
412 '[OPTION]... [BOOKMARKNAME]')
431 def journal(ui, repo, *args, **opts):
413 def journal(ui, repo, *args, **opts):
432 """show the previous position of bookmarks and the working copy
414 """show the previous position of bookmarks and the working copy
433
415
434 The journal is used to see the previous commits that bookmarks and the
416 The journal is used to see the previous commits that bookmarks and the
435 working copy pointed to. By default the previous locations for the working
417 working copy pointed to. By default the previous locations for the working
436 copy. Passing a bookmark name will show all the previous positions of
418 copy. Passing a bookmark name will show all the previous positions of
437 that bookmark. Use the --all switch to show previous locations for all
419 that bookmark. Use the --all switch to show previous locations for all
438 bookmarks and the working copy; each line will then include the bookmark
420 bookmarks and the working copy; each line will then include the bookmark
439 name, or '.' for the working copy, as well.
421 name, or '.' for the working copy, as well.
440
422
441 If `name` starts with `re:`, the remainder of the name is treated as
423 If `name` starts with `re:`, the remainder of the name is treated as
442 a regular expression. To match a name that actually starts with `re:`,
424 a regular expression. To match a name that actually starts with `re:`,
443 use the prefix `literal:`.
425 use the prefix `literal:`.
444
426
445 By default hg journal only shows the commit hash and the command that was
427 By default hg journal only shows the commit hash and the command that was
446 running at that time. -v/--verbose will show the prior hash, the user, and
428 running at that time. -v/--verbose will show the prior hash, the user, and
447 the time at which it happened.
429 the time at which it happened.
448
430
449 Use -c/--commits to output log information on each commit hash; at this
431 Use -c/--commits to output log information on each commit hash; at this
450 point you can use the usual `--patch`, `--git`, `--stat` and `--template`
432 point you can use the usual `--patch`, `--git`, `--stat` and `--template`
451 switches to alter the log output for these.
433 switches to alter the log output for these.
452
434
453 `hg journal -T json` can be used to produce machine readable output.
435 `hg journal -T json` can be used to produce machine readable output.
454
436
455 """
437 """
456 name = '.'
438 name = '.'
457 if opts.get('all'):
439 if opts.get('all'):
458 if args:
440 if args:
459 raise error.Abort(
441 raise error.Abort(
460 _("You can't combine --all and filtering on a name"))
442 _("You can't combine --all and filtering on a name"))
461 name = None
443 name = None
462 if args:
444 if args:
463 name = args[0]
445 name = args[0]
464
446
465 fm = ui.formatter('journal', opts)
447 fm = ui.formatter('journal', opts)
466
448
467 if opts.get("template") != "json":
449 if opts.get("template") != "json":
468 if name is None:
450 if name is None:
469 displayname = _('the working copy and bookmarks')
451 displayname = _('the working copy and bookmarks')
470 else:
452 else:
471 displayname = "'%s'" % name
453 displayname = "'%s'" % name
472 ui.status(_("previous locations of %s:\n") % displayname)
454 ui.status(_("previous locations of %s:\n") % displayname)
473
455
474 limit = cmdutil.loglimit(opts)
456 limit = cmdutil.loglimit(opts)
475 entry = None
457 entry = None
476 for count, entry in enumerate(repo.journal.filtered(name=name)):
458 for count, entry in enumerate(repo.journal.filtered(name=name)):
477 if count == limit:
459 if count == limit:
478 break
460 break
479 newhashesstr = fm.formatlist(map(fm.hexfunc, entry.newhashes),
461 newhashesstr = fm.formatlist(map(fm.hexfunc, entry.newhashes),
480 name='node', sep=',')
462 name='node', sep=',')
481 oldhashesstr = fm.formatlist(map(fm.hexfunc, entry.oldhashes),
463 oldhashesstr = fm.formatlist(map(fm.hexfunc, entry.oldhashes),
482 name='node', sep=',')
464 name='node', sep=',')
483
465
484 fm.startitem()
466 fm.startitem()
485 fm.condwrite(ui.verbose, 'oldhashes', '%s -> ', oldhashesstr)
467 fm.condwrite(ui.verbose, 'oldhashes', '%s -> ', oldhashesstr)
486 fm.write('newhashes', '%s', newhashesstr)
468 fm.write('newhashes', '%s', newhashesstr)
487 fm.condwrite(ui.verbose, 'user', ' %-8s', entry.user)
469 fm.condwrite(ui.verbose, 'user', ' %-8s', entry.user)
488 fm.condwrite(
470 fm.condwrite(
489 opts.get('all') or name.startswith('re:'),
471 opts.get('all') or name.startswith('re:'),
490 'name', ' %-8s', entry.name)
472 'name', ' %-8s', entry.name)
491
473
492 timestring = fm.formatdate(entry.timestamp, '%Y-%m-%d %H:%M %1%2')
474 timestring = fm.formatdate(entry.timestamp, '%Y-%m-%d %H:%M %1%2')
493 fm.condwrite(ui.verbose, 'date', ' %s', timestring)
475 fm.condwrite(ui.verbose, 'date', ' %s', timestring)
494 fm.write('command', ' %s\n', entry.command)
476 fm.write('command', ' %s\n', entry.command)
495
477
496 if opts.get("commits"):
478 if opts.get("commits"):
497 displayer = cmdutil.show_changeset(ui, repo, opts, buffered=False)
479 displayer = cmdutil.show_changeset(ui, repo, opts, buffered=False)
498 for hash in entry.newhashes:
480 for hash in entry.newhashes:
499 try:
481 try:
500 ctx = repo[hash]
482 ctx = repo[hash]
501 displayer.show(ctx)
483 displayer.show(ctx)
502 except error.RepoLookupError as e:
484 except error.RepoLookupError as e:
503 fm.write('repolookuperror', "%s\n\n", str(e))
485 fm.write('repolookuperror', "%s\n\n", str(e))
504 displayer.close()
486 displayer.close()
505
487
506 fm.end()
488 fm.end()
507
489
508 if entry is None:
490 if entry is None:
509 ui.status(_("no recorded locations\n"))
491 ui.status(_("no recorded locations\n"))
General Comments 0
You need to be logged in to leave comments. Login now