##// END OF EJS Templates
journal: execute setup procedures for already instantiated dirstate...
FUJIWARA Katsunori -
r33383:774beab9 default
parent child Browse files
Show More
@@ -1,503 +1,513 b''
1 # journal.py
1 # journal.py
2 #
2 #
3 # Copyright 2014-2016 Facebook, Inc.
3 # Copyright 2014-2016 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 """track previous positions of bookmarks (EXPERIMENTAL)
7 """track previous positions of bookmarks (EXPERIMENTAL)
8
8
9 This extension adds a new command: `hg journal`, which shows you where
9 This extension adds a new command: `hg journal`, which shows you where
10 bookmarks were previously located.
10 bookmarks were previously located.
11
11
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import collections
16 import collections
17 import errno
17 import errno
18 import os
18 import os
19 import weakref
19 import weakref
20
20
21 from mercurial.i18n import _
21 from mercurial.i18n import _
22
22
23 from mercurial import (
23 from mercurial import (
24 bookmarks,
24 bookmarks,
25 cmdutil,
25 cmdutil,
26 dispatch,
26 dispatch,
27 error,
27 error,
28 extensions,
28 extensions,
29 hg,
29 hg,
30 localrepo,
30 localrepo,
31 lock,
31 lock,
32 node,
32 node,
33 registrar,
33 registrar,
34 util,
34 util,
35 )
35 )
36
36
37 from . import share
37 from . import share
38
38
39 cmdtable = {}
39 cmdtable = {}
40 command = registrar.command(cmdtable)
40 command = registrar.command(cmdtable)
41
41
42 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
42 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
43 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
43 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
44 # be specifying the version(s) of Mercurial they are tested with, or
44 # be specifying the version(s) of Mercurial they are tested with, or
45 # leave the attribute unspecified.
45 # leave the attribute unspecified.
46 testedwith = 'ships-with-hg-core'
46 testedwith = 'ships-with-hg-core'
47
47
48 # storage format version; increment when the format changes
48 # storage format version; increment when the format changes
49 storageversion = 0
49 storageversion = 0
50
50
51 # namespaces
51 # namespaces
52 bookmarktype = 'bookmark'
52 bookmarktype = 'bookmark'
53 wdirparenttype = 'wdirparent'
53 wdirparenttype = 'wdirparent'
54 # In a shared repository, what shared feature name is used
54 # In a shared repository, what shared feature name is used
55 # to indicate this namespace is shared with the source?
55 # to indicate this namespace is shared with the source?
56 sharednamespaces = {
56 sharednamespaces = {
57 bookmarktype: hg.sharedbookmarks,
57 bookmarktype: hg.sharedbookmarks,
58 }
58 }
59
59
60 # Journal recording, register hooks and storage object
60 # Journal recording, register hooks and storage object
61 def extsetup(ui):
61 def extsetup(ui):
62 extensions.wrapfunction(dispatch, 'runcommand', runcommand)
62 extensions.wrapfunction(dispatch, 'runcommand', runcommand)
63 extensions.wrapfunction(bookmarks.bmstore, '_write', recordbookmarks)
63 extensions.wrapfunction(bookmarks.bmstore, '_write', recordbookmarks)
64 extensions.wrapfunction(
64 extensions.wrapfunction(
65 localrepo.localrepository.dirstate, 'func', wrapdirstate)
65 localrepo.localrepository.dirstate, 'func', wrapdirstate)
66 extensions.wrapfunction(hg, 'postshare', wrappostshare)
66 extensions.wrapfunction(hg, 'postshare', wrappostshare)
67 extensions.wrapfunction(hg, 'copystore', unsharejournal)
67 extensions.wrapfunction(hg, 'copystore', unsharejournal)
68
68
69 def reposetup(ui, repo):
69 def reposetup(ui, repo):
70 if repo.local():
70 if repo.local():
71 repo.journal = journalstorage(repo)
71 repo.journal = journalstorage(repo)
72
72
73 dirstate, cached = localrepo.isfilecached(repo, 'dirstate')
74 if cached:
75 # already instantiated dirstate isn't yet marked as
76 # "journal"-ing, even though repo.dirstate() was already
77 # wrapped by own wrapdirstate()
78 _setupdirstate(repo, dirstate)
79
73 def runcommand(orig, lui, repo, cmd, fullargs, *args):
80 def runcommand(orig, lui, repo, cmd, fullargs, *args):
74 """Track the command line options for recording in the journal"""
81 """Track the command line options for recording in the journal"""
75 journalstorage.recordcommand(*fullargs)
82 journalstorage.recordcommand(*fullargs)
76 return orig(lui, repo, cmd, fullargs, *args)
83 return orig(lui, repo, cmd, fullargs, *args)
77
84
85 def _setupdirstate(repo, dirstate):
86 dirstate.journalstorage = repo.journal
87 dirstate.addparentchangecallback('journal', recorddirstateparents)
88
78 # hooks to record dirstate changes
89 # hooks to record dirstate changes
79 def wrapdirstate(orig, repo):
90 def wrapdirstate(orig, repo):
80 """Make journal storage available to the dirstate object"""
91 """Make journal storage available to the dirstate object"""
81 dirstate = orig(repo)
92 dirstate = orig(repo)
82 if util.safehasattr(repo, 'journal'):
93 if util.safehasattr(repo, 'journal'):
83 dirstate.journalstorage = repo.journal
94 _setupdirstate(repo, dirstate)
84 dirstate.addparentchangecallback('journal', recorddirstateparents)
85 return dirstate
95 return dirstate
86
96
87 def recorddirstateparents(dirstate, old, new):
97 def recorddirstateparents(dirstate, old, new):
88 """Records all dirstate parent changes in the journal."""
98 """Records all dirstate parent changes in the journal."""
89 old = list(old)
99 old = list(old)
90 new = list(new)
100 new = list(new)
91 if util.safehasattr(dirstate, 'journalstorage'):
101 if util.safehasattr(dirstate, 'journalstorage'):
92 # only record two hashes if there was a merge
102 # only record two hashes if there was a merge
93 oldhashes = old[:1] if old[1] == node.nullid else old
103 oldhashes = old[:1] if old[1] == node.nullid else old
94 newhashes = new[:1] if new[1] == node.nullid else new
104 newhashes = new[:1] if new[1] == node.nullid else new
95 dirstate.journalstorage.record(
105 dirstate.journalstorage.record(
96 wdirparenttype, '.', oldhashes, newhashes)
106 wdirparenttype, '.', oldhashes, newhashes)
97
107
98 # hooks to record bookmark changes (both local and remote)
108 # hooks to record bookmark changes (both local and remote)
99 def recordbookmarks(orig, store, fp):
109 def recordbookmarks(orig, store, fp):
100 """Records all bookmark changes in the journal."""
110 """Records all bookmark changes in the journal."""
101 repo = store._repo
111 repo = store._repo
102 if util.safehasattr(repo, 'journal'):
112 if util.safehasattr(repo, 'journal'):
103 oldmarks = bookmarks.bmstore(repo)
113 oldmarks = bookmarks.bmstore(repo)
104 for mark, value in store.iteritems():
114 for mark, value in store.iteritems():
105 oldvalue = oldmarks.get(mark, node.nullid)
115 oldvalue = oldmarks.get(mark, node.nullid)
106 if value != oldvalue:
116 if value != oldvalue:
107 repo.journal.record(bookmarktype, mark, oldvalue, value)
117 repo.journal.record(bookmarktype, mark, oldvalue, value)
108 return orig(store, fp)
118 return orig(store, fp)
109
119
110 # shared repository support
120 # shared repository support
111 def _readsharedfeatures(repo):
121 def _readsharedfeatures(repo):
112 """A set of shared features for this repository"""
122 """A set of shared features for this repository"""
113 try:
123 try:
114 return set(repo.vfs.read('shared').splitlines())
124 return set(repo.vfs.read('shared').splitlines())
115 except IOError as inst:
125 except IOError as inst:
116 if inst.errno != errno.ENOENT:
126 if inst.errno != errno.ENOENT:
117 raise
127 raise
118 return set()
128 return set()
119
129
120 def _mergeentriesiter(*iterables, **kwargs):
130 def _mergeentriesiter(*iterables, **kwargs):
121 """Given a set of sorted iterables, yield the next entry in merged order
131 """Given a set of sorted iterables, yield the next entry in merged order
122
132
123 Note that by default entries go from most recent to oldest.
133 Note that by default entries go from most recent to oldest.
124 """
134 """
125 order = kwargs.pop('order', max)
135 order = kwargs.pop('order', max)
126 iterables = [iter(it) for it in iterables]
136 iterables = [iter(it) for it in iterables]
127 # this tracks still active iterables; iterables are deleted as they are
137 # this tracks still active iterables; iterables are deleted as they are
128 # exhausted, which is why this is a dictionary and why each entry also
138 # exhausted, which is why this is a dictionary and why each entry also
129 # stores the key. Entries are mutable so we can store the next value each
139 # stores the key. Entries are mutable so we can store the next value each
130 # time.
140 # time.
131 iterable_map = {}
141 iterable_map = {}
132 for key, it in enumerate(iterables):
142 for key, it in enumerate(iterables):
133 try:
143 try:
134 iterable_map[key] = [next(it), key, it]
144 iterable_map[key] = [next(it), key, it]
135 except StopIteration:
145 except StopIteration:
136 # empty entry, can be ignored
146 # empty entry, can be ignored
137 pass
147 pass
138
148
139 while iterable_map:
149 while iterable_map:
140 value, key, it = order(iterable_map.itervalues())
150 value, key, it = order(iterable_map.itervalues())
141 yield value
151 yield value
142 try:
152 try:
143 iterable_map[key][0] = next(it)
153 iterable_map[key][0] = next(it)
144 except StopIteration:
154 except StopIteration:
145 # this iterable is empty, remove it from consideration
155 # this iterable is empty, remove it from consideration
146 del iterable_map[key]
156 del iterable_map[key]
147
157
148 def wrappostshare(orig, sourcerepo, destrepo, **kwargs):
158 def wrappostshare(orig, sourcerepo, destrepo, **kwargs):
149 """Mark this shared working copy as sharing journal information"""
159 """Mark this shared working copy as sharing journal information"""
150 with destrepo.wlock():
160 with destrepo.wlock():
151 orig(sourcerepo, destrepo, **kwargs)
161 orig(sourcerepo, destrepo, **kwargs)
152 with destrepo.vfs('shared', 'a') as fp:
162 with destrepo.vfs('shared', 'a') as fp:
153 fp.write('journal\n')
163 fp.write('journal\n')
154
164
155 def unsharejournal(orig, ui, repo, repopath):
165 def unsharejournal(orig, ui, repo, repopath):
156 """Copy shared journal entries into this repo when unsharing"""
166 """Copy shared journal entries into this repo when unsharing"""
157 if (repo.path == repopath and repo.shared() and
167 if (repo.path == repopath and repo.shared() and
158 util.safehasattr(repo, 'journal')):
168 util.safehasattr(repo, 'journal')):
159 sharedrepo = share._getsrcrepo(repo)
169 sharedrepo = share._getsrcrepo(repo)
160 sharedfeatures = _readsharedfeatures(repo)
170 sharedfeatures = _readsharedfeatures(repo)
161 if sharedrepo and sharedfeatures > {'journal'}:
171 if sharedrepo and sharedfeatures > {'journal'}:
162 # there is a shared repository and there are shared journal entries
172 # there is a shared repository and there are shared journal entries
163 # to copy. move shared date over from source to destination but
173 # to copy. move shared date over from source to destination but
164 # move the local file first
174 # move the local file first
165 if repo.vfs.exists('namejournal'):
175 if repo.vfs.exists('namejournal'):
166 journalpath = repo.vfs.join('namejournal')
176 journalpath = repo.vfs.join('namejournal')
167 util.rename(journalpath, journalpath + '.bak')
177 util.rename(journalpath, journalpath + '.bak')
168 storage = repo.journal
178 storage = repo.journal
169 local = storage._open(
179 local = storage._open(
170 repo.vfs, filename='namejournal.bak', _newestfirst=False)
180 repo.vfs, filename='namejournal.bak', _newestfirst=False)
171 shared = (
181 shared = (
172 e for e in storage._open(sharedrepo.vfs, _newestfirst=False)
182 e for e in storage._open(sharedrepo.vfs, _newestfirst=False)
173 if sharednamespaces.get(e.namespace) in sharedfeatures)
183 if sharednamespaces.get(e.namespace) in sharedfeatures)
174 for entry in _mergeentriesiter(local, shared, order=min):
184 for entry in _mergeentriesiter(local, shared, order=min):
175 storage._write(repo.vfs, entry)
185 storage._write(repo.vfs, entry)
176
186
177 return orig(ui, repo, repopath)
187 return orig(ui, repo, repopath)
178
188
179 class journalentry(collections.namedtuple(
189 class journalentry(collections.namedtuple(
180 u'journalentry',
190 u'journalentry',
181 u'timestamp user command namespace name oldhashes newhashes')):
191 u'timestamp user command namespace name oldhashes newhashes')):
182 """Individual journal entry
192 """Individual journal entry
183
193
184 * timestamp: a mercurial (time, timezone) tuple
194 * timestamp: a mercurial (time, timezone) tuple
185 * user: the username that ran the command
195 * user: the username that ran the command
186 * namespace: the entry namespace, an opaque string
196 * namespace: the entry namespace, an opaque string
187 * name: the name of the changed item, opaque string with meaning in the
197 * name: the name of the changed item, opaque string with meaning in the
188 namespace
198 namespace
189 * command: the hg command that triggered this record
199 * command: the hg command that triggered this record
190 * oldhashes: a tuple of one or more binary hashes for the old location
200 * oldhashes: a tuple of one or more binary hashes for the old location
191 * newhashes: a tuple of one or more binary hashes for the new location
201 * newhashes: a tuple of one or more binary hashes for the new location
192
202
193 Handles serialisation from and to the storage format. Fields are
203 Handles serialisation from and to the storage format. Fields are
194 separated by newlines, hashes are written out in hex separated by commas,
204 separated by newlines, hashes are written out in hex separated by commas,
195 timestamp and timezone are separated by a space.
205 timestamp and timezone are separated by a space.
196
206
197 """
207 """
198 @classmethod
208 @classmethod
199 def fromstorage(cls, line):
209 def fromstorage(cls, line):
200 (time, user, command, namespace, name,
210 (time, user, command, namespace, name,
201 oldhashes, newhashes) = line.split('\n')
211 oldhashes, newhashes) = line.split('\n')
202 timestamp, tz = time.split()
212 timestamp, tz = time.split()
203 timestamp, tz = float(timestamp), int(tz)
213 timestamp, tz = float(timestamp), int(tz)
204 oldhashes = tuple(node.bin(hash) for hash in oldhashes.split(','))
214 oldhashes = tuple(node.bin(hash) for hash in oldhashes.split(','))
205 newhashes = tuple(node.bin(hash) for hash in newhashes.split(','))
215 newhashes = tuple(node.bin(hash) for hash in newhashes.split(','))
206 return cls(
216 return cls(
207 (timestamp, tz), user, command, namespace, name,
217 (timestamp, tz), user, command, namespace, name,
208 oldhashes, newhashes)
218 oldhashes, newhashes)
209
219
210 def __str__(self):
220 def __str__(self):
211 """String representation for storage"""
221 """String representation for storage"""
212 time = ' '.join(map(str, self.timestamp))
222 time = ' '.join(map(str, self.timestamp))
213 oldhashes = ','.join([node.hex(hash) for hash in self.oldhashes])
223 oldhashes = ','.join([node.hex(hash) for hash in self.oldhashes])
214 newhashes = ','.join([node.hex(hash) for hash in self.newhashes])
224 newhashes = ','.join([node.hex(hash) for hash in self.newhashes])
215 return '\n'.join((
225 return '\n'.join((
216 time, self.user, self.command, self.namespace, self.name,
226 time, self.user, self.command, self.namespace, self.name,
217 oldhashes, newhashes))
227 oldhashes, newhashes))
218
228
219 class journalstorage(object):
229 class journalstorage(object):
220 """Storage for journal entries
230 """Storage for journal entries
221
231
222 Entries are divided over two files; one with entries that pertain to the
232 Entries are divided over two files; one with entries that pertain to the
223 local working copy *only*, and one with entries that are shared across
233 local working copy *only*, and one with entries that are shared across
224 multiple working copies when shared using the share extension.
234 multiple working copies when shared using the share extension.
225
235
226 Entries are stored with NUL bytes as separators. See the journalentry
236 Entries are stored with NUL bytes as separators. See the journalentry
227 class for the per-entry structure.
237 class for the per-entry structure.
228
238
229 The file format starts with an integer version, delimited by a NUL.
239 The file format starts with an integer version, delimited by a NUL.
230
240
231 This storage uses a dedicated lock; this makes it easier to avoid issues
241 This storage uses a dedicated lock; this makes it easier to avoid issues
232 with adding entries that added when the regular wlock is unlocked (e.g.
242 with adding entries that added when the regular wlock is unlocked (e.g.
233 the dirstate).
243 the dirstate).
234
244
235 """
245 """
236 _currentcommand = ()
246 _currentcommand = ()
237 _lockref = None
247 _lockref = None
238
248
239 def __init__(self, repo):
249 def __init__(self, repo):
240 self.user = util.getuser()
250 self.user = util.getuser()
241 self.ui = repo.ui
251 self.ui = repo.ui
242 self.vfs = repo.vfs
252 self.vfs = repo.vfs
243
253
244 # is this working copy using a shared storage?
254 # is this working copy using a shared storage?
245 self.sharedfeatures = self.sharedvfs = None
255 self.sharedfeatures = self.sharedvfs = None
246 if repo.shared():
256 if repo.shared():
247 features = _readsharedfeatures(repo)
257 features = _readsharedfeatures(repo)
248 sharedrepo = share._getsrcrepo(repo)
258 sharedrepo = share._getsrcrepo(repo)
249 if sharedrepo is not None and 'journal' in features:
259 if sharedrepo is not None and 'journal' in features:
250 self.sharedvfs = sharedrepo.vfs
260 self.sharedvfs = sharedrepo.vfs
251 self.sharedfeatures = features
261 self.sharedfeatures = features
252
262
253 # track the current command for recording in journal entries
263 # track the current command for recording in journal entries
254 @property
264 @property
255 def command(self):
265 def command(self):
256 commandstr = ' '.join(
266 commandstr = ' '.join(
257 map(util.shellquote, journalstorage._currentcommand))
267 map(util.shellquote, journalstorage._currentcommand))
258 if '\n' in commandstr:
268 if '\n' in commandstr:
259 # truncate multi-line commands
269 # truncate multi-line commands
260 commandstr = commandstr.partition('\n')[0] + ' ...'
270 commandstr = commandstr.partition('\n')[0] + ' ...'
261 return commandstr
271 return commandstr
262
272
263 @classmethod
273 @classmethod
264 def recordcommand(cls, *fullargs):
274 def recordcommand(cls, *fullargs):
265 """Set the current hg arguments, stored with recorded entries"""
275 """Set the current hg arguments, stored with recorded entries"""
266 # Set the current command on the class because we may have started
276 # Set the current command on the class because we may have started
267 # with a non-local repo (cloning for example).
277 # with a non-local repo (cloning for example).
268 cls._currentcommand = fullargs
278 cls._currentcommand = fullargs
269
279
270 def _currentlock(self, lockref):
280 def _currentlock(self, lockref):
271 """Returns the lock if it's held, or None if it's not.
281 """Returns the lock if it's held, or None if it's not.
272
282
273 (This is copied from the localrepo class)
283 (This is copied from the localrepo class)
274 """
284 """
275 if lockref is None:
285 if lockref is None:
276 return None
286 return None
277 l = lockref()
287 l = lockref()
278 if l is None or not l.held:
288 if l is None or not l.held:
279 return None
289 return None
280 return l
290 return l
281
291
282 def jlock(self, vfs):
292 def jlock(self, vfs):
283 """Create a lock for the journal file"""
293 """Create a lock for the journal file"""
284 if self._currentlock(self._lockref) is not None:
294 if self._currentlock(self._lockref) is not None:
285 raise error.Abort(_('journal lock does not support nesting'))
295 raise error.Abort(_('journal lock does not support nesting'))
286 desc = _('journal of %s') % vfs.base
296 desc = _('journal of %s') % vfs.base
287 try:
297 try:
288 l = lock.lock(vfs, 'namejournal.lock', 0, desc=desc)
298 l = lock.lock(vfs, 'namejournal.lock', 0, desc=desc)
289 except error.LockHeld as inst:
299 except error.LockHeld as inst:
290 self.ui.warn(
300 self.ui.warn(
291 _("waiting for lock on %s held by %r\n") % (desc, inst.locker))
301 _("waiting for lock on %s held by %r\n") % (desc, inst.locker))
292 # default to 600 seconds timeout
302 # default to 600 seconds timeout
293 l = lock.lock(
303 l = lock.lock(
294 vfs, 'namejournal.lock',
304 vfs, 'namejournal.lock',
295 int(self.ui.config("ui", "timeout", "600")), desc=desc)
305 int(self.ui.config("ui", "timeout", "600")), desc=desc)
296 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
306 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
297 self._lockref = weakref.ref(l)
307 self._lockref = weakref.ref(l)
298 return l
308 return l
299
309
300 def record(self, namespace, name, oldhashes, newhashes):
310 def record(self, namespace, name, oldhashes, newhashes):
301 """Record a new journal entry
311 """Record a new journal entry
302
312
303 * namespace: an opaque string; this can be used to filter on the type
313 * namespace: an opaque string; this can be used to filter on the type
304 of recorded entries.
314 of recorded entries.
305 * name: the name defining this entry; for bookmarks, this is the
315 * name: the name defining this entry; for bookmarks, this is the
306 bookmark name. Can be filtered on when retrieving entries.
316 bookmark name. Can be filtered on when retrieving entries.
307 * oldhashes and newhashes: each a single binary hash, or a list of
317 * oldhashes and newhashes: each a single binary hash, or a list of
308 binary hashes. These represent the old and new position of the named
318 binary hashes. These represent the old and new position of the named
309 item.
319 item.
310
320
311 """
321 """
312 if not isinstance(oldhashes, list):
322 if not isinstance(oldhashes, list):
313 oldhashes = [oldhashes]
323 oldhashes = [oldhashes]
314 if not isinstance(newhashes, list):
324 if not isinstance(newhashes, list):
315 newhashes = [newhashes]
325 newhashes = [newhashes]
316
326
317 entry = journalentry(
327 entry = journalentry(
318 util.makedate(), self.user, self.command, namespace, name,
328 util.makedate(), self.user, self.command, namespace, name,
319 oldhashes, newhashes)
329 oldhashes, newhashes)
320
330
321 vfs = self.vfs
331 vfs = self.vfs
322 if self.sharedvfs is not None:
332 if self.sharedvfs is not None:
323 # write to the shared repository if this feature is being
333 # write to the shared repository if this feature is being
324 # shared between working copies.
334 # shared between working copies.
325 if sharednamespaces.get(namespace) in self.sharedfeatures:
335 if sharednamespaces.get(namespace) in self.sharedfeatures:
326 vfs = self.sharedvfs
336 vfs = self.sharedvfs
327
337
328 self._write(vfs, entry)
338 self._write(vfs, entry)
329
339
330 def _write(self, vfs, entry):
340 def _write(self, vfs, entry):
331 with self.jlock(vfs):
341 with self.jlock(vfs):
332 version = None
342 version = None
333 # open file in amend mode to ensure it is created if missing
343 # open file in amend mode to ensure it is created if missing
334 with vfs('namejournal', mode='a+b', atomictemp=True) as f:
344 with vfs('namejournal', mode='a+b', atomictemp=True) as f:
335 f.seek(0, os.SEEK_SET)
345 f.seek(0, os.SEEK_SET)
336 # Read just enough bytes to get a version number (up to 2
346 # Read just enough bytes to get a version number (up to 2
337 # digits plus separator)
347 # digits plus separator)
338 version = f.read(3).partition('\0')[0]
348 version = f.read(3).partition('\0')[0]
339 if version and version != str(storageversion):
349 if version and version != str(storageversion):
340 # different version of the storage. Exit early (and not
350 # different version of the storage. Exit early (and not
341 # write anything) if this is not a version we can handle or
351 # write anything) if this is not a version we can handle or
342 # the file is corrupt. In future, perhaps rotate the file
352 # the file is corrupt. In future, perhaps rotate the file
343 # instead?
353 # instead?
344 self.ui.warn(
354 self.ui.warn(
345 _("unsupported journal file version '%s'\n") % version)
355 _("unsupported journal file version '%s'\n") % version)
346 return
356 return
347 if not version:
357 if not version:
348 # empty file, write version first
358 # empty file, write version first
349 f.write(str(storageversion) + '\0')
359 f.write(str(storageversion) + '\0')
350 f.seek(0, os.SEEK_END)
360 f.seek(0, os.SEEK_END)
351 f.write(str(entry) + '\0')
361 f.write(str(entry) + '\0')
352
362
353 def filtered(self, namespace=None, name=None):
363 def filtered(self, namespace=None, name=None):
354 """Yield all journal entries with the given namespace or name
364 """Yield all journal entries with the given namespace or name
355
365
356 Both the namespace and the name are optional; if neither is given all
366 Both the namespace and the name are optional; if neither is given all
357 entries in the journal are produced.
367 entries in the journal are produced.
358
368
359 Matching supports regular expressions by using the `re:` prefix
369 Matching supports regular expressions by using the `re:` prefix
360 (use `literal:` to match names or namespaces that start with `re:`)
370 (use `literal:` to match names or namespaces that start with `re:`)
361
371
362 """
372 """
363 if namespace is not None:
373 if namespace is not None:
364 namespace = util.stringmatcher(namespace)[-1]
374 namespace = util.stringmatcher(namespace)[-1]
365 if name is not None:
375 if name is not None:
366 name = util.stringmatcher(name)[-1]
376 name = util.stringmatcher(name)[-1]
367 for entry in self:
377 for entry in self:
368 if namespace is not None and not namespace(entry.namespace):
378 if namespace is not None and not namespace(entry.namespace):
369 continue
379 continue
370 if name is not None and not name(entry.name):
380 if name is not None and not name(entry.name):
371 continue
381 continue
372 yield entry
382 yield entry
373
383
374 def __iter__(self):
384 def __iter__(self):
375 """Iterate over the storage
385 """Iterate over the storage
376
386
377 Yields journalentry instances for each contained journal record.
387 Yields journalentry instances for each contained journal record.
378
388
379 """
389 """
380 local = self._open(self.vfs)
390 local = self._open(self.vfs)
381
391
382 if self.sharedvfs is None:
392 if self.sharedvfs is None:
383 return local
393 return local
384
394
385 # iterate over both local and shared entries, but only those
395 # iterate over both local and shared entries, but only those
386 # shared entries that are among the currently shared features
396 # shared entries that are among the currently shared features
387 shared = (
397 shared = (
388 e for e in self._open(self.sharedvfs)
398 e for e in self._open(self.sharedvfs)
389 if sharednamespaces.get(e.namespace) in self.sharedfeatures)
399 if sharednamespaces.get(e.namespace) in self.sharedfeatures)
390 return _mergeentriesiter(local, shared)
400 return _mergeentriesiter(local, shared)
391
401
392 def _open(self, vfs, filename='namejournal', _newestfirst=True):
402 def _open(self, vfs, filename='namejournal', _newestfirst=True):
393 if not vfs.exists(filename):
403 if not vfs.exists(filename):
394 return
404 return
395
405
396 with vfs(filename) as f:
406 with vfs(filename) as f:
397 raw = f.read()
407 raw = f.read()
398
408
399 lines = raw.split('\0')
409 lines = raw.split('\0')
400 version = lines and lines[0]
410 version = lines and lines[0]
401 if version != str(storageversion):
411 if version != str(storageversion):
402 version = version or _('not available')
412 version = version or _('not available')
403 raise error.Abort(_("unknown journal file version '%s'") % version)
413 raise error.Abort(_("unknown journal file version '%s'") % version)
404
414
405 # Skip the first line, it's a version number. Normally we iterate over
415 # Skip the first line, it's a version number. Normally we iterate over
406 # these in reverse order to list newest first; only when copying across
416 # these in reverse order to list newest first; only when copying across
407 # a shared storage do we forgo reversing.
417 # a shared storage do we forgo reversing.
408 lines = lines[1:]
418 lines = lines[1:]
409 if _newestfirst:
419 if _newestfirst:
410 lines = reversed(lines)
420 lines = reversed(lines)
411 for line in lines:
421 for line in lines:
412 if not line:
422 if not line:
413 continue
423 continue
414 yield journalentry.fromstorage(line)
424 yield journalentry.fromstorage(line)
415
425
416 # journal reading
426 # journal reading
417 # log options that don't make sense for journal
427 # log options that don't make sense for journal
418 _ignoreopts = ('no-merges', 'graph')
428 _ignoreopts = ('no-merges', 'graph')
419 @command(
429 @command(
420 'journal', [
430 'journal', [
421 ('', 'all', None, 'show history for all names'),
431 ('', 'all', None, 'show history for all names'),
422 ('c', 'commits', None, 'show commit metadata'),
432 ('c', 'commits', None, 'show commit metadata'),
423 ] + [opt for opt in cmdutil.logopts if opt[1] not in _ignoreopts],
433 ] + [opt for opt in cmdutil.logopts if opt[1] not in _ignoreopts],
424 '[OPTION]... [BOOKMARKNAME]')
434 '[OPTION]... [BOOKMARKNAME]')
425 def journal(ui, repo, *args, **opts):
435 def journal(ui, repo, *args, **opts):
426 """show the previous position of bookmarks and the working copy
436 """show the previous position of bookmarks and the working copy
427
437
428 The journal is used to see the previous commits that bookmarks and the
438 The journal is used to see the previous commits that bookmarks and the
429 working copy pointed to. By default the previous locations for the working
439 working copy pointed to. By default the previous locations for the working
430 copy. Passing a bookmark name will show all the previous positions of
440 copy. Passing a bookmark name will show all the previous positions of
431 that bookmark. Use the --all switch to show previous locations for all
441 that bookmark. Use the --all switch to show previous locations for all
432 bookmarks and the working copy; each line will then include the bookmark
442 bookmarks and the working copy; each line will then include the bookmark
433 name, or '.' for the working copy, as well.
443 name, or '.' for the working copy, as well.
434
444
435 If `name` starts with `re:`, the remainder of the name is treated as
445 If `name` starts with `re:`, the remainder of the name is treated as
436 a regular expression. To match a name that actually starts with `re:`,
446 a regular expression. To match a name that actually starts with `re:`,
437 use the prefix `literal:`.
447 use the prefix `literal:`.
438
448
439 By default hg journal only shows the commit hash and the command that was
449 By default hg journal only shows the commit hash and the command that was
440 running at that time. -v/--verbose will show the prior hash, the user, and
450 running at that time. -v/--verbose will show the prior hash, the user, and
441 the time at which it happened.
451 the time at which it happened.
442
452
443 Use -c/--commits to output log information on each commit hash; at this
453 Use -c/--commits to output log information on each commit hash; at this
444 point you can use the usual `--patch`, `--git`, `--stat` and `--template`
454 point you can use the usual `--patch`, `--git`, `--stat` and `--template`
445 switches to alter the log output for these.
455 switches to alter the log output for these.
446
456
447 `hg journal -T json` can be used to produce machine readable output.
457 `hg journal -T json` can be used to produce machine readable output.
448
458
449 """
459 """
450 name = '.'
460 name = '.'
451 if opts.get('all'):
461 if opts.get('all'):
452 if args:
462 if args:
453 raise error.Abort(
463 raise error.Abort(
454 _("You can't combine --all and filtering on a name"))
464 _("You can't combine --all and filtering on a name"))
455 name = None
465 name = None
456 if args:
466 if args:
457 name = args[0]
467 name = args[0]
458
468
459 fm = ui.formatter('journal', opts)
469 fm = ui.formatter('journal', opts)
460
470
461 if opts.get("template") != "json":
471 if opts.get("template") != "json":
462 if name is None:
472 if name is None:
463 displayname = _('the working copy and bookmarks')
473 displayname = _('the working copy and bookmarks')
464 else:
474 else:
465 displayname = "'%s'" % name
475 displayname = "'%s'" % name
466 ui.status(_("previous locations of %s:\n") % displayname)
476 ui.status(_("previous locations of %s:\n") % displayname)
467
477
468 limit = cmdutil.loglimit(opts)
478 limit = cmdutil.loglimit(opts)
469 entry = None
479 entry = None
470 for count, entry in enumerate(repo.journal.filtered(name=name)):
480 for count, entry in enumerate(repo.journal.filtered(name=name)):
471 if count == limit:
481 if count == limit:
472 break
482 break
473 newhashesstr = fm.formatlist(map(fm.hexfunc, entry.newhashes),
483 newhashesstr = fm.formatlist(map(fm.hexfunc, entry.newhashes),
474 name='node', sep=',')
484 name='node', sep=',')
475 oldhashesstr = fm.formatlist(map(fm.hexfunc, entry.oldhashes),
485 oldhashesstr = fm.formatlist(map(fm.hexfunc, entry.oldhashes),
476 name='node', sep=',')
486 name='node', sep=',')
477
487
478 fm.startitem()
488 fm.startitem()
479 fm.condwrite(ui.verbose, 'oldhashes', '%s -> ', oldhashesstr)
489 fm.condwrite(ui.verbose, 'oldhashes', '%s -> ', oldhashesstr)
480 fm.write('newhashes', '%s', newhashesstr)
490 fm.write('newhashes', '%s', newhashesstr)
481 fm.condwrite(ui.verbose, 'user', ' %-8s', entry.user)
491 fm.condwrite(ui.verbose, 'user', ' %-8s', entry.user)
482 fm.condwrite(
492 fm.condwrite(
483 opts.get('all') or name.startswith('re:'),
493 opts.get('all') or name.startswith('re:'),
484 'name', ' %-8s', entry.name)
494 'name', ' %-8s', entry.name)
485
495
486 timestring = fm.formatdate(entry.timestamp, '%Y-%m-%d %H:%M %1%2')
496 timestring = fm.formatdate(entry.timestamp, '%Y-%m-%d %H:%M %1%2')
487 fm.condwrite(ui.verbose, 'date', ' %s', timestring)
497 fm.condwrite(ui.verbose, 'date', ' %s', timestring)
488 fm.write('command', ' %s\n', entry.command)
498 fm.write('command', ' %s\n', entry.command)
489
499
490 if opts.get("commits"):
500 if opts.get("commits"):
491 displayer = cmdutil.show_changeset(ui, repo, opts, buffered=False)
501 displayer = cmdutil.show_changeset(ui, repo, opts, buffered=False)
492 for hash in entry.newhashes:
502 for hash in entry.newhashes:
493 try:
503 try:
494 ctx = repo[hash]
504 ctx = repo[hash]
495 displayer.show(ctx)
505 displayer.show(ctx)
496 except error.RepoLookupError as e:
506 except error.RepoLookupError as e:
497 fm.write('repolookuperror', "%s\n\n", str(e))
507 fm.write('repolookuperror', "%s\n\n", str(e))
498 displayer.close()
508 displayer.close()
499
509
500 fm.end()
510 fm.end()
501
511
502 if entry is None:
512 if entry is None:
503 ui.status(_("no recorded locations\n"))
513 ui.status(_("no recorded locations\n"))
General Comments 0
You need to be logged in to leave comments. Login now