##// END OF EJS Templates
py3: handle keyword arguments in hgext/journal.py...
Pulkit Goyal -
r35001:135edf12 default
parent child Browse files
Show More
@@ -1,514 +1,516 b''
1 # journal.py
1 # journal.py
2 #
2 #
3 # Copyright 2014-2016 Facebook, Inc.
3 # Copyright 2014-2016 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 """track previous positions of bookmarks (EXPERIMENTAL)
7 """track previous positions of bookmarks (EXPERIMENTAL)
8
8
9 This extension adds a new command: `hg journal`, which shows you where
9 This extension adds a new command: `hg journal`, which shows you where
10 bookmarks were previously located.
10 bookmarks were previously located.
11
11
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import collections
16 import collections
17 import errno
17 import errno
18 import os
18 import os
19 import weakref
19 import weakref
20
20
21 from mercurial.i18n import _
21 from mercurial.i18n import _
22
22
23 from mercurial import (
23 from mercurial import (
24 bookmarks,
24 bookmarks,
25 cmdutil,
25 cmdutil,
26 dispatch,
26 dispatch,
27 error,
27 error,
28 extensions,
28 extensions,
29 hg,
29 hg,
30 localrepo,
30 localrepo,
31 lock,
31 lock,
32 node,
32 node,
33 pycompat,
33 registrar,
34 registrar,
34 util,
35 util,
35 )
36 )
36
37
37 from . import share
38 from . import share
38
39
39 cmdtable = {}
40 cmdtable = {}
40 command = registrar.command(cmdtable)
41 command = registrar.command(cmdtable)
41
42
42 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
43 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
43 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
44 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
44 # be specifying the version(s) of Mercurial they are tested with, or
45 # be specifying the version(s) of Mercurial they are tested with, or
45 # leave the attribute unspecified.
46 # leave the attribute unspecified.
46 testedwith = 'ships-with-hg-core'
47 testedwith = 'ships-with-hg-core'
47
48
48 # storage format version; increment when the format changes
49 # storage format version; increment when the format changes
49 storageversion = 0
50 storageversion = 0
50
51
51 # namespaces
52 # namespaces
52 bookmarktype = 'bookmark'
53 bookmarktype = 'bookmark'
53 wdirparenttype = 'wdirparent'
54 wdirparenttype = 'wdirparent'
54 # In a shared repository, what shared feature name is used
55 # In a shared repository, what shared feature name is used
55 # to indicate this namespace is shared with the source?
56 # to indicate this namespace is shared with the source?
56 sharednamespaces = {
57 sharednamespaces = {
57 bookmarktype: hg.sharedbookmarks,
58 bookmarktype: hg.sharedbookmarks,
58 }
59 }
59
60
60 # Journal recording, register hooks and storage object
61 # Journal recording, register hooks and storage object
61 def extsetup(ui):
62 def extsetup(ui):
62 extensions.wrapfunction(dispatch, 'runcommand', runcommand)
63 extensions.wrapfunction(dispatch, 'runcommand', runcommand)
63 extensions.wrapfunction(bookmarks.bmstore, '_write', recordbookmarks)
64 extensions.wrapfunction(bookmarks.bmstore, '_write', recordbookmarks)
64 extensions.wrapfilecache(
65 extensions.wrapfilecache(
65 localrepo.localrepository, 'dirstate', wrapdirstate)
66 localrepo.localrepository, 'dirstate', wrapdirstate)
66 extensions.wrapfunction(hg, 'postshare', wrappostshare)
67 extensions.wrapfunction(hg, 'postshare', wrappostshare)
67 extensions.wrapfunction(hg, 'copystore', unsharejournal)
68 extensions.wrapfunction(hg, 'copystore', unsharejournal)
68
69
69 def reposetup(ui, repo):
70 def reposetup(ui, repo):
70 if repo.local():
71 if repo.local():
71 repo.journal = journalstorage(repo)
72 repo.journal = journalstorage(repo)
72 repo._wlockfreeprefix.add('namejournal')
73 repo._wlockfreeprefix.add('namejournal')
73
74
74 dirstate, cached = localrepo.isfilecached(repo, 'dirstate')
75 dirstate, cached = localrepo.isfilecached(repo, 'dirstate')
75 if cached:
76 if cached:
76 # already instantiated dirstate isn't yet marked as
77 # already instantiated dirstate isn't yet marked as
77 # "journal"-ing, even though repo.dirstate() was already
78 # "journal"-ing, even though repo.dirstate() was already
78 # wrapped by own wrapdirstate()
79 # wrapped by own wrapdirstate()
79 _setupdirstate(repo, dirstate)
80 _setupdirstate(repo, dirstate)
80
81
81 def runcommand(orig, lui, repo, cmd, fullargs, *args):
82 def runcommand(orig, lui, repo, cmd, fullargs, *args):
82 """Track the command line options for recording in the journal"""
83 """Track the command line options for recording in the journal"""
83 journalstorage.recordcommand(*fullargs)
84 journalstorage.recordcommand(*fullargs)
84 return orig(lui, repo, cmd, fullargs, *args)
85 return orig(lui, repo, cmd, fullargs, *args)
85
86
86 def _setupdirstate(repo, dirstate):
87 def _setupdirstate(repo, dirstate):
87 dirstate.journalstorage = repo.journal
88 dirstate.journalstorage = repo.journal
88 dirstate.addparentchangecallback('journal', recorddirstateparents)
89 dirstate.addparentchangecallback('journal', recorddirstateparents)
89
90
90 # hooks to record dirstate changes
91 # hooks to record dirstate changes
91 def wrapdirstate(orig, repo):
92 def wrapdirstate(orig, repo):
92 """Make journal storage available to the dirstate object"""
93 """Make journal storage available to the dirstate object"""
93 dirstate = orig(repo)
94 dirstate = orig(repo)
94 if util.safehasattr(repo, 'journal'):
95 if util.safehasattr(repo, 'journal'):
95 _setupdirstate(repo, dirstate)
96 _setupdirstate(repo, dirstate)
96 return dirstate
97 return dirstate
97
98
98 def recorddirstateparents(dirstate, old, new):
99 def recorddirstateparents(dirstate, old, new):
99 """Records all dirstate parent changes in the journal."""
100 """Records all dirstate parent changes in the journal."""
100 old = list(old)
101 old = list(old)
101 new = list(new)
102 new = list(new)
102 if util.safehasattr(dirstate, 'journalstorage'):
103 if util.safehasattr(dirstate, 'journalstorage'):
103 # only record two hashes if there was a merge
104 # only record two hashes if there was a merge
104 oldhashes = old[:1] if old[1] == node.nullid else old
105 oldhashes = old[:1] if old[1] == node.nullid else old
105 newhashes = new[:1] if new[1] == node.nullid else new
106 newhashes = new[:1] if new[1] == node.nullid else new
106 dirstate.journalstorage.record(
107 dirstate.journalstorage.record(
107 wdirparenttype, '.', oldhashes, newhashes)
108 wdirparenttype, '.', oldhashes, newhashes)
108
109
109 # hooks to record bookmark changes (both local and remote)
110 # hooks to record bookmark changes (both local and remote)
110 def recordbookmarks(orig, store, fp):
111 def recordbookmarks(orig, store, fp):
111 """Records all bookmark changes in the journal."""
112 """Records all bookmark changes in the journal."""
112 repo = store._repo
113 repo = store._repo
113 if util.safehasattr(repo, 'journal'):
114 if util.safehasattr(repo, 'journal'):
114 oldmarks = bookmarks.bmstore(repo)
115 oldmarks = bookmarks.bmstore(repo)
115 for mark, value in store.iteritems():
116 for mark, value in store.iteritems():
116 oldvalue = oldmarks.get(mark, node.nullid)
117 oldvalue = oldmarks.get(mark, node.nullid)
117 if value != oldvalue:
118 if value != oldvalue:
118 repo.journal.record(bookmarktype, mark, oldvalue, value)
119 repo.journal.record(bookmarktype, mark, oldvalue, value)
119 return orig(store, fp)
120 return orig(store, fp)
120
121
121 # shared repository support
122 # shared repository support
122 def _readsharedfeatures(repo):
123 def _readsharedfeatures(repo):
123 """A set of shared features for this repository"""
124 """A set of shared features for this repository"""
124 try:
125 try:
125 return set(repo.vfs.read('shared').splitlines())
126 return set(repo.vfs.read('shared').splitlines())
126 except IOError as inst:
127 except IOError as inst:
127 if inst.errno != errno.ENOENT:
128 if inst.errno != errno.ENOENT:
128 raise
129 raise
129 return set()
130 return set()
130
131
131 def _mergeentriesiter(*iterables, **kwargs):
132 def _mergeentriesiter(*iterables, **kwargs):
132 """Given a set of sorted iterables, yield the next entry in merged order
133 """Given a set of sorted iterables, yield the next entry in merged order
133
134
134 Note that by default entries go from most recent to oldest.
135 Note that by default entries go from most recent to oldest.
135 """
136 """
136 order = kwargs.pop('order', max)
137 order = kwargs.pop(r'order', max)
137 iterables = [iter(it) for it in iterables]
138 iterables = [iter(it) for it in iterables]
138 # this tracks still active iterables; iterables are deleted as they are
139 # this tracks still active iterables; iterables are deleted as they are
139 # exhausted, which is why this is a dictionary and why each entry also
140 # exhausted, which is why this is a dictionary and why each entry also
140 # stores the key. Entries are mutable so we can store the next value each
141 # stores the key. Entries are mutable so we can store the next value each
141 # time.
142 # time.
142 iterable_map = {}
143 iterable_map = {}
143 for key, it in enumerate(iterables):
144 for key, it in enumerate(iterables):
144 try:
145 try:
145 iterable_map[key] = [next(it), key, it]
146 iterable_map[key] = [next(it), key, it]
146 except StopIteration:
147 except StopIteration:
147 # empty entry, can be ignored
148 # empty entry, can be ignored
148 pass
149 pass
149
150
150 while iterable_map:
151 while iterable_map:
151 value, key, it = order(iterable_map.itervalues())
152 value, key, it = order(iterable_map.itervalues())
152 yield value
153 yield value
153 try:
154 try:
154 iterable_map[key][0] = next(it)
155 iterable_map[key][0] = next(it)
155 except StopIteration:
156 except StopIteration:
156 # this iterable is empty, remove it from consideration
157 # this iterable is empty, remove it from consideration
157 del iterable_map[key]
158 del iterable_map[key]
158
159
159 def wrappostshare(orig, sourcerepo, destrepo, **kwargs):
160 def wrappostshare(orig, sourcerepo, destrepo, **kwargs):
160 """Mark this shared working copy as sharing journal information"""
161 """Mark this shared working copy as sharing journal information"""
161 with destrepo.wlock():
162 with destrepo.wlock():
162 orig(sourcerepo, destrepo, **kwargs)
163 orig(sourcerepo, destrepo, **kwargs)
163 with destrepo.vfs('shared', 'a') as fp:
164 with destrepo.vfs('shared', 'a') as fp:
164 fp.write('journal\n')
165 fp.write('journal\n')
165
166
166 def unsharejournal(orig, ui, repo, repopath):
167 def unsharejournal(orig, ui, repo, repopath):
167 """Copy shared journal entries into this repo when unsharing"""
168 """Copy shared journal entries into this repo when unsharing"""
168 if (repo.path == repopath and repo.shared() and
169 if (repo.path == repopath and repo.shared() and
169 util.safehasattr(repo, 'journal')):
170 util.safehasattr(repo, 'journal')):
170 sharedrepo = share._getsrcrepo(repo)
171 sharedrepo = share._getsrcrepo(repo)
171 sharedfeatures = _readsharedfeatures(repo)
172 sharedfeatures = _readsharedfeatures(repo)
172 if sharedrepo and sharedfeatures > {'journal'}:
173 if sharedrepo and sharedfeatures > {'journal'}:
173 # there is a shared repository and there are shared journal entries
174 # there is a shared repository and there are shared journal entries
174 # to copy. move shared date over from source to destination but
175 # to copy. move shared date over from source to destination but
175 # move the local file first
176 # move the local file first
176 if repo.vfs.exists('namejournal'):
177 if repo.vfs.exists('namejournal'):
177 journalpath = repo.vfs.join('namejournal')
178 journalpath = repo.vfs.join('namejournal')
178 util.rename(journalpath, journalpath + '.bak')
179 util.rename(journalpath, journalpath + '.bak')
179 storage = repo.journal
180 storage = repo.journal
180 local = storage._open(
181 local = storage._open(
181 repo.vfs, filename='namejournal.bak', _newestfirst=False)
182 repo.vfs, filename='namejournal.bak', _newestfirst=False)
182 shared = (
183 shared = (
183 e for e in storage._open(sharedrepo.vfs, _newestfirst=False)
184 e for e in storage._open(sharedrepo.vfs, _newestfirst=False)
184 if sharednamespaces.get(e.namespace) in sharedfeatures)
185 if sharednamespaces.get(e.namespace) in sharedfeatures)
185 for entry in _mergeentriesiter(local, shared, order=min):
186 for entry in _mergeentriesiter(local, shared, order=min):
186 storage._write(repo.vfs, entry)
187 storage._write(repo.vfs, entry)
187
188
188 return orig(ui, repo, repopath)
189 return orig(ui, repo, repopath)
189
190
190 class journalentry(collections.namedtuple(
191 class journalentry(collections.namedtuple(
191 u'journalentry',
192 u'journalentry',
192 u'timestamp user command namespace name oldhashes newhashes')):
193 u'timestamp user command namespace name oldhashes newhashes')):
193 """Individual journal entry
194 """Individual journal entry
194
195
195 * timestamp: a mercurial (time, timezone) tuple
196 * timestamp: a mercurial (time, timezone) tuple
196 * user: the username that ran the command
197 * user: the username that ran the command
197 * namespace: the entry namespace, an opaque string
198 * namespace: the entry namespace, an opaque string
198 * name: the name of the changed item, opaque string with meaning in the
199 * name: the name of the changed item, opaque string with meaning in the
199 namespace
200 namespace
200 * command: the hg command that triggered this record
201 * command: the hg command that triggered this record
201 * oldhashes: a tuple of one or more binary hashes for the old location
202 * oldhashes: a tuple of one or more binary hashes for the old location
202 * newhashes: a tuple of one or more binary hashes for the new location
203 * newhashes: a tuple of one or more binary hashes for the new location
203
204
204 Handles serialisation from and to the storage format. Fields are
205 Handles serialisation from and to the storage format. Fields are
205 separated by newlines, hashes are written out in hex separated by commas,
206 separated by newlines, hashes are written out in hex separated by commas,
206 timestamp and timezone are separated by a space.
207 timestamp and timezone are separated by a space.
207
208
208 """
209 """
209 @classmethod
210 @classmethod
210 def fromstorage(cls, line):
211 def fromstorage(cls, line):
211 (time, user, command, namespace, name,
212 (time, user, command, namespace, name,
212 oldhashes, newhashes) = line.split('\n')
213 oldhashes, newhashes) = line.split('\n')
213 timestamp, tz = time.split()
214 timestamp, tz = time.split()
214 timestamp, tz = float(timestamp), int(tz)
215 timestamp, tz = float(timestamp), int(tz)
215 oldhashes = tuple(node.bin(hash) for hash in oldhashes.split(','))
216 oldhashes = tuple(node.bin(hash) for hash in oldhashes.split(','))
216 newhashes = tuple(node.bin(hash) for hash in newhashes.split(','))
217 newhashes = tuple(node.bin(hash) for hash in newhashes.split(','))
217 return cls(
218 return cls(
218 (timestamp, tz), user, command, namespace, name,
219 (timestamp, tz), user, command, namespace, name,
219 oldhashes, newhashes)
220 oldhashes, newhashes)
220
221
221 def __str__(self):
222 def __str__(self):
222 """String representation for storage"""
223 """String representation for storage"""
223 time = ' '.join(map(str, self.timestamp))
224 time = ' '.join(map(str, self.timestamp))
224 oldhashes = ','.join([node.hex(hash) for hash in self.oldhashes])
225 oldhashes = ','.join([node.hex(hash) for hash in self.oldhashes])
225 newhashes = ','.join([node.hex(hash) for hash in self.newhashes])
226 newhashes = ','.join([node.hex(hash) for hash in self.newhashes])
226 return '\n'.join((
227 return '\n'.join((
227 time, self.user, self.command, self.namespace, self.name,
228 time, self.user, self.command, self.namespace, self.name,
228 oldhashes, newhashes))
229 oldhashes, newhashes))
229
230
230 class journalstorage(object):
231 class journalstorage(object):
231 """Storage for journal entries
232 """Storage for journal entries
232
233
233 Entries are divided over two files; one with entries that pertain to the
234 Entries are divided over two files; one with entries that pertain to the
234 local working copy *only*, and one with entries that are shared across
235 local working copy *only*, and one with entries that are shared across
235 multiple working copies when shared using the share extension.
236 multiple working copies when shared using the share extension.
236
237
237 Entries are stored with NUL bytes as separators. See the journalentry
238 Entries are stored with NUL bytes as separators. See the journalentry
238 class for the per-entry structure.
239 class for the per-entry structure.
239
240
240 The file format starts with an integer version, delimited by a NUL.
241 The file format starts with an integer version, delimited by a NUL.
241
242
242 This storage uses a dedicated lock; this makes it easier to avoid issues
243 This storage uses a dedicated lock; this makes it easier to avoid issues
243 with adding entries that added when the regular wlock is unlocked (e.g.
244 with adding entries that added when the regular wlock is unlocked (e.g.
244 the dirstate).
245 the dirstate).
245
246
246 """
247 """
247 _currentcommand = ()
248 _currentcommand = ()
248 _lockref = None
249 _lockref = None
249
250
250 def __init__(self, repo):
251 def __init__(self, repo):
251 self.user = util.getuser()
252 self.user = util.getuser()
252 self.ui = repo.ui
253 self.ui = repo.ui
253 self.vfs = repo.vfs
254 self.vfs = repo.vfs
254
255
255 # is this working copy using a shared storage?
256 # is this working copy using a shared storage?
256 self.sharedfeatures = self.sharedvfs = None
257 self.sharedfeatures = self.sharedvfs = None
257 if repo.shared():
258 if repo.shared():
258 features = _readsharedfeatures(repo)
259 features = _readsharedfeatures(repo)
259 sharedrepo = share._getsrcrepo(repo)
260 sharedrepo = share._getsrcrepo(repo)
260 if sharedrepo is not None and 'journal' in features:
261 if sharedrepo is not None and 'journal' in features:
261 self.sharedvfs = sharedrepo.vfs
262 self.sharedvfs = sharedrepo.vfs
262 self.sharedfeatures = features
263 self.sharedfeatures = features
263
264
264 # track the current command for recording in journal entries
265 # track the current command for recording in journal entries
265 @property
266 @property
266 def command(self):
267 def command(self):
267 commandstr = ' '.join(
268 commandstr = ' '.join(
268 map(util.shellquote, journalstorage._currentcommand))
269 map(util.shellquote, journalstorage._currentcommand))
269 if '\n' in commandstr:
270 if '\n' in commandstr:
270 # truncate multi-line commands
271 # truncate multi-line commands
271 commandstr = commandstr.partition('\n')[0] + ' ...'
272 commandstr = commandstr.partition('\n')[0] + ' ...'
272 return commandstr
273 return commandstr
273
274
274 @classmethod
275 @classmethod
275 def recordcommand(cls, *fullargs):
276 def recordcommand(cls, *fullargs):
276 """Set the current hg arguments, stored with recorded entries"""
277 """Set the current hg arguments, stored with recorded entries"""
277 # Set the current command on the class because we may have started
278 # Set the current command on the class because we may have started
278 # with a non-local repo (cloning for example).
279 # with a non-local repo (cloning for example).
279 cls._currentcommand = fullargs
280 cls._currentcommand = fullargs
280
281
281 def _currentlock(self, lockref):
282 def _currentlock(self, lockref):
282 """Returns the lock if it's held, or None if it's not.
283 """Returns the lock if it's held, or None if it's not.
283
284
284 (This is copied from the localrepo class)
285 (This is copied from the localrepo class)
285 """
286 """
286 if lockref is None:
287 if lockref is None:
287 return None
288 return None
288 l = lockref()
289 l = lockref()
289 if l is None or not l.held:
290 if l is None or not l.held:
290 return None
291 return None
291 return l
292 return l
292
293
293 def jlock(self, vfs):
294 def jlock(self, vfs):
294 """Create a lock for the journal file"""
295 """Create a lock for the journal file"""
295 if self._currentlock(self._lockref) is not None:
296 if self._currentlock(self._lockref) is not None:
296 raise error.Abort(_('journal lock does not support nesting'))
297 raise error.Abort(_('journal lock does not support nesting'))
297 desc = _('journal of %s') % vfs.base
298 desc = _('journal of %s') % vfs.base
298 try:
299 try:
299 l = lock.lock(vfs, 'namejournal.lock', 0, desc=desc)
300 l = lock.lock(vfs, 'namejournal.lock', 0, desc=desc)
300 except error.LockHeld as inst:
301 except error.LockHeld as inst:
301 self.ui.warn(
302 self.ui.warn(
302 _("waiting for lock on %s held by %r\n") % (desc, inst.locker))
303 _("waiting for lock on %s held by %r\n") % (desc, inst.locker))
303 # default to 600 seconds timeout
304 # default to 600 seconds timeout
304 l = lock.lock(
305 l = lock.lock(
305 vfs, 'namejournal.lock',
306 vfs, 'namejournal.lock',
306 int(self.ui.config("ui", "timeout")), desc=desc)
307 int(self.ui.config("ui", "timeout")), desc=desc)
307 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
308 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
308 self._lockref = weakref.ref(l)
309 self._lockref = weakref.ref(l)
309 return l
310 return l
310
311
311 def record(self, namespace, name, oldhashes, newhashes):
312 def record(self, namespace, name, oldhashes, newhashes):
312 """Record a new journal entry
313 """Record a new journal entry
313
314
314 * namespace: an opaque string; this can be used to filter on the type
315 * namespace: an opaque string; this can be used to filter on the type
315 of recorded entries.
316 of recorded entries.
316 * name: the name defining this entry; for bookmarks, this is the
317 * name: the name defining this entry; for bookmarks, this is the
317 bookmark name. Can be filtered on when retrieving entries.
318 bookmark name. Can be filtered on when retrieving entries.
318 * oldhashes and newhashes: each a single binary hash, or a list of
319 * oldhashes and newhashes: each a single binary hash, or a list of
319 binary hashes. These represent the old and new position of the named
320 binary hashes. These represent the old and new position of the named
320 item.
321 item.
321
322
322 """
323 """
323 if not isinstance(oldhashes, list):
324 if not isinstance(oldhashes, list):
324 oldhashes = [oldhashes]
325 oldhashes = [oldhashes]
325 if not isinstance(newhashes, list):
326 if not isinstance(newhashes, list):
326 newhashes = [newhashes]
327 newhashes = [newhashes]
327
328
328 entry = journalentry(
329 entry = journalentry(
329 util.makedate(), self.user, self.command, namespace, name,
330 util.makedate(), self.user, self.command, namespace, name,
330 oldhashes, newhashes)
331 oldhashes, newhashes)
331
332
332 vfs = self.vfs
333 vfs = self.vfs
333 if self.sharedvfs is not None:
334 if self.sharedvfs is not None:
334 # write to the shared repository if this feature is being
335 # write to the shared repository if this feature is being
335 # shared between working copies.
336 # shared between working copies.
336 if sharednamespaces.get(namespace) in self.sharedfeatures:
337 if sharednamespaces.get(namespace) in self.sharedfeatures:
337 vfs = self.sharedvfs
338 vfs = self.sharedvfs
338
339
339 self._write(vfs, entry)
340 self._write(vfs, entry)
340
341
341 def _write(self, vfs, entry):
342 def _write(self, vfs, entry):
342 with self.jlock(vfs):
343 with self.jlock(vfs):
343 version = None
344 version = None
344 # open file in amend mode to ensure it is created if missing
345 # open file in amend mode to ensure it is created if missing
345 with vfs('namejournal', mode='a+b') as f:
346 with vfs('namejournal', mode='a+b') as f:
346 f.seek(0, os.SEEK_SET)
347 f.seek(0, os.SEEK_SET)
347 # Read just enough bytes to get a version number (up to 2
348 # Read just enough bytes to get a version number (up to 2
348 # digits plus separator)
349 # digits plus separator)
349 version = f.read(3).partition('\0')[0]
350 version = f.read(3).partition('\0')[0]
350 if version and version != str(storageversion):
351 if version and version != str(storageversion):
351 # different version of the storage. Exit early (and not
352 # different version of the storage. Exit early (and not
352 # write anything) if this is not a version we can handle or
353 # write anything) if this is not a version we can handle or
353 # the file is corrupt. In future, perhaps rotate the file
354 # the file is corrupt. In future, perhaps rotate the file
354 # instead?
355 # instead?
355 self.ui.warn(
356 self.ui.warn(
356 _("unsupported journal file version '%s'\n") % version)
357 _("unsupported journal file version '%s'\n") % version)
357 return
358 return
358 if not version:
359 if not version:
359 # empty file, write version first
360 # empty file, write version first
360 f.write(str(storageversion) + '\0')
361 f.write(str(storageversion) + '\0')
361 f.seek(0, os.SEEK_END)
362 f.seek(0, os.SEEK_END)
362 f.write(str(entry) + '\0')
363 f.write(str(entry) + '\0')
363
364
364 def filtered(self, namespace=None, name=None):
365 def filtered(self, namespace=None, name=None):
365 """Yield all journal entries with the given namespace or name
366 """Yield all journal entries with the given namespace or name
366
367
367 Both the namespace and the name are optional; if neither is given all
368 Both the namespace and the name are optional; if neither is given all
368 entries in the journal are produced.
369 entries in the journal are produced.
369
370
370 Matching supports regular expressions by using the `re:` prefix
371 Matching supports regular expressions by using the `re:` prefix
371 (use `literal:` to match names or namespaces that start with `re:`)
372 (use `literal:` to match names or namespaces that start with `re:`)
372
373
373 """
374 """
374 if namespace is not None:
375 if namespace is not None:
375 namespace = util.stringmatcher(namespace)[-1]
376 namespace = util.stringmatcher(namespace)[-1]
376 if name is not None:
377 if name is not None:
377 name = util.stringmatcher(name)[-1]
378 name = util.stringmatcher(name)[-1]
378 for entry in self:
379 for entry in self:
379 if namespace is not None and not namespace(entry.namespace):
380 if namespace is not None and not namespace(entry.namespace):
380 continue
381 continue
381 if name is not None and not name(entry.name):
382 if name is not None and not name(entry.name):
382 continue
383 continue
383 yield entry
384 yield entry
384
385
385 def __iter__(self):
386 def __iter__(self):
386 """Iterate over the storage
387 """Iterate over the storage
387
388
388 Yields journalentry instances for each contained journal record.
389 Yields journalentry instances for each contained journal record.
389
390
390 """
391 """
391 local = self._open(self.vfs)
392 local = self._open(self.vfs)
392
393
393 if self.sharedvfs is None:
394 if self.sharedvfs is None:
394 return local
395 return local
395
396
396 # iterate over both local and shared entries, but only those
397 # iterate over both local and shared entries, but only those
397 # shared entries that are among the currently shared features
398 # shared entries that are among the currently shared features
398 shared = (
399 shared = (
399 e for e in self._open(self.sharedvfs)
400 e for e in self._open(self.sharedvfs)
400 if sharednamespaces.get(e.namespace) in self.sharedfeatures)
401 if sharednamespaces.get(e.namespace) in self.sharedfeatures)
401 return _mergeentriesiter(local, shared)
402 return _mergeentriesiter(local, shared)
402
403
403 def _open(self, vfs, filename='namejournal', _newestfirst=True):
404 def _open(self, vfs, filename='namejournal', _newestfirst=True):
404 if not vfs.exists(filename):
405 if not vfs.exists(filename):
405 return
406 return
406
407
407 with vfs(filename) as f:
408 with vfs(filename) as f:
408 raw = f.read()
409 raw = f.read()
409
410
410 lines = raw.split('\0')
411 lines = raw.split('\0')
411 version = lines and lines[0]
412 version = lines and lines[0]
412 if version != str(storageversion):
413 if version != str(storageversion):
413 version = version or _('not available')
414 version = version or _('not available')
414 raise error.Abort(_("unknown journal file version '%s'") % version)
415 raise error.Abort(_("unknown journal file version '%s'") % version)
415
416
416 # Skip the first line, it's a version number. Normally we iterate over
417 # Skip the first line, it's a version number. Normally we iterate over
417 # these in reverse order to list newest first; only when copying across
418 # these in reverse order to list newest first; only when copying across
418 # a shared storage do we forgo reversing.
419 # a shared storage do we forgo reversing.
419 lines = lines[1:]
420 lines = lines[1:]
420 if _newestfirst:
421 if _newestfirst:
421 lines = reversed(lines)
422 lines = reversed(lines)
422 for line in lines:
423 for line in lines:
423 if not line:
424 if not line:
424 continue
425 continue
425 yield journalentry.fromstorage(line)
426 yield journalentry.fromstorage(line)
426
427
427 # journal reading
428 # journal reading
428 # log options that don't make sense for journal
429 # log options that don't make sense for journal
429 _ignoreopts = ('no-merges', 'graph')
430 _ignoreopts = ('no-merges', 'graph')
430 @command(
431 @command(
431 'journal', [
432 'journal', [
432 ('', 'all', None, 'show history for all names'),
433 ('', 'all', None, 'show history for all names'),
433 ('c', 'commits', None, 'show commit metadata'),
434 ('c', 'commits', None, 'show commit metadata'),
434 ] + [opt for opt in cmdutil.logopts if opt[1] not in _ignoreopts],
435 ] + [opt for opt in cmdutil.logopts if opt[1] not in _ignoreopts],
435 '[OPTION]... [BOOKMARKNAME]')
436 '[OPTION]... [BOOKMARKNAME]')
436 def journal(ui, repo, *args, **opts):
437 def journal(ui, repo, *args, **opts):
437 """show the previous position of bookmarks and the working copy
438 """show the previous position of bookmarks and the working copy
438
439
439 The journal is used to see the previous commits that bookmarks and the
440 The journal is used to see the previous commits that bookmarks and the
440 working copy pointed to. By default the previous locations for the working
441 working copy pointed to. By default the previous locations for the working
441 copy. Passing a bookmark name will show all the previous positions of
442 copy. Passing a bookmark name will show all the previous positions of
442 that bookmark. Use the --all switch to show previous locations for all
443 that bookmark. Use the --all switch to show previous locations for all
443 bookmarks and the working copy; each line will then include the bookmark
444 bookmarks and the working copy; each line will then include the bookmark
444 name, or '.' for the working copy, as well.
445 name, or '.' for the working copy, as well.
445
446
446 If `name` starts with `re:`, the remainder of the name is treated as
447 If `name` starts with `re:`, the remainder of the name is treated as
447 a regular expression. To match a name that actually starts with `re:`,
448 a regular expression. To match a name that actually starts with `re:`,
448 use the prefix `literal:`.
449 use the prefix `literal:`.
449
450
450 By default hg journal only shows the commit hash and the command that was
451 By default hg journal only shows the commit hash and the command that was
451 running at that time. -v/--verbose will show the prior hash, the user, and
452 running at that time. -v/--verbose will show the prior hash, the user, and
452 the time at which it happened.
453 the time at which it happened.
453
454
454 Use -c/--commits to output log information on each commit hash; at this
455 Use -c/--commits to output log information on each commit hash; at this
455 point you can use the usual `--patch`, `--git`, `--stat` and `--template`
456 point you can use the usual `--patch`, `--git`, `--stat` and `--template`
456 switches to alter the log output for these.
457 switches to alter the log output for these.
457
458
458 `hg journal -T json` can be used to produce machine readable output.
459 `hg journal -T json` can be used to produce machine readable output.
459
460
460 """
461 """
462 opts = pycompat.byteskwargs(opts)
461 name = '.'
463 name = '.'
462 if opts.get('all'):
464 if opts.get('all'):
463 if args:
465 if args:
464 raise error.Abort(
466 raise error.Abort(
465 _("You can't combine --all and filtering on a name"))
467 _("You can't combine --all and filtering on a name"))
466 name = None
468 name = None
467 if args:
469 if args:
468 name = args[0]
470 name = args[0]
469
471
470 fm = ui.formatter('journal', opts)
472 fm = ui.formatter('journal', opts)
471
473
472 if opts.get("template") != "json":
474 if opts.get("template") != "json":
473 if name is None:
475 if name is None:
474 displayname = _('the working copy and bookmarks')
476 displayname = _('the working copy and bookmarks')
475 else:
477 else:
476 displayname = "'%s'" % name
478 displayname = "'%s'" % name
477 ui.status(_("previous locations of %s:\n") % displayname)
479 ui.status(_("previous locations of %s:\n") % displayname)
478
480
479 limit = cmdutil.loglimit(opts)
481 limit = cmdutil.loglimit(opts)
480 entry = None
482 entry = None
481 for count, entry in enumerate(repo.journal.filtered(name=name)):
483 for count, entry in enumerate(repo.journal.filtered(name=name)):
482 if count == limit:
484 if count == limit:
483 break
485 break
484 newhashesstr = fm.formatlist(map(fm.hexfunc, entry.newhashes),
486 newhashesstr = fm.formatlist(map(fm.hexfunc, entry.newhashes),
485 name='node', sep=',')
487 name='node', sep=',')
486 oldhashesstr = fm.formatlist(map(fm.hexfunc, entry.oldhashes),
488 oldhashesstr = fm.formatlist(map(fm.hexfunc, entry.oldhashes),
487 name='node', sep=',')
489 name='node', sep=',')
488
490
489 fm.startitem()
491 fm.startitem()
490 fm.condwrite(ui.verbose, 'oldhashes', '%s -> ', oldhashesstr)
492 fm.condwrite(ui.verbose, 'oldhashes', '%s -> ', oldhashesstr)
491 fm.write('newhashes', '%s', newhashesstr)
493 fm.write('newhashes', '%s', newhashesstr)
492 fm.condwrite(ui.verbose, 'user', ' %-8s', entry.user)
494 fm.condwrite(ui.verbose, 'user', ' %-8s', entry.user)
493 fm.condwrite(
495 fm.condwrite(
494 opts.get('all') or name.startswith('re:'),
496 opts.get('all') or name.startswith('re:'),
495 'name', ' %-8s', entry.name)
497 'name', ' %-8s', entry.name)
496
498
497 timestring = fm.formatdate(entry.timestamp, '%Y-%m-%d %H:%M %1%2')
499 timestring = fm.formatdate(entry.timestamp, '%Y-%m-%d %H:%M %1%2')
498 fm.condwrite(ui.verbose, 'date', ' %s', timestring)
500 fm.condwrite(ui.verbose, 'date', ' %s', timestring)
499 fm.write('command', ' %s\n', entry.command)
501 fm.write('command', ' %s\n', entry.command)
500
502
501 if opts.get("commits"):
503 if opts.get("commits"):
502 displayer = cmdutil.show_changeset(ui, repo, opts, buffered=False)
504 displayer = cmdutil.show_changeset(ui, repo, opts, buffered=False)
503 for hash in entry.newhashes:
505 for hash in entry.newhashes:
504 try:
506 try:
505 ctx = repo[hash]
507 ctx = repo[hash]
506 displayer.show(ctx)
508 displayer.show(ctx)
507 except error.RepoLookupError as e:
509 except error.RepoLookupError as e:
508 fm.write('repolookuperror', "%s\n\n", str(e))
510 fm.write('repolookuperror', "%s\n\n", str(e))
509 displayer.close()
511 displayer.close()
510
512
511 fm.end()
513 fm.end()
512
514
513 if entry is None:
515 if entry is None:
514 ui.status(_("no recorded locations\n"))
516 ui.status(_("no recorded locations\n"))
General Comments 0
You need to be logged in to leave comments. Login now