##// END OF EJS Templates
py3: replace __str__ to __bytes__ in hgext/journal.py...
Pulkit Goyal -
r36684:d79d68bb default
parent child Browse files
Show More
@@ -1,517 +1,520 b''
1 # journal.py
1 # journal.py
2 #
2 #
3 # Copyright 2014-2016 Facebook, Inc.
3 # Copyright 2014-2016 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 """track previous positions of bookmarks (EXPERIMENTAL)
7 """track previous positions of bookmarks (EXPERIMENTAL)
8
8
9 This extension adds a new command: `hg journal`, which shows you where
9 This extension adds a new command: `hg journal`, which shows you where
10 bookmarks were previously located.
10 bookmarks were previously located.
11
11
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import collections
16 import collections
17 import errno
17 import errno
18 import os
18 import os
19 import weakref
19 import weakref
20
20
21 from mercurial.i18n import _
21 from mercurial.i18n import _
22
22
23 from mercurial import (
23 from mercurial import (
24 bookmarks,
24 bookmarks,
25 cmdutil,
25 cmdutil,
26 dispatch,
26 dispatch,
27 encoding,
27 error,
28 error,
28 extensions,
29 extensions,
29 hg,
30 hg,
30 localrepo,
31 localrepo,
31 lock,
32 lock,
32 logcmdutil,
33 logcmdutil,
33 node,
34 node,
34 pycompat,
35 pycompat,
35 registrar,
36 registrar,
36 util,
37 util,
37 )
38 )
38 from mercurial.utils import dateutil
39 from mercurial.utils import dateutil
39
40
40 cmdtable = {}
41 cmdtable = {}
41 command = registrar.command(cmdtable)
42 command = registrar.command(cmdtable)
42
43
43 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
44 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
44 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
45 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
45 # be specifying the version(s) of Mercurial they are tested with, or
46 # be specifying the version(s) of Mercurial they are tested with, or
46 # leave the attribute unspecified.
47 # leave the attribute unspecified.
47 testedwith = 'ships-with-hg-core'
48 testedwith = 'ships-with-hg-core'
48
49
49 # storage format version; increment when the format changes
50 # storage format version; increment when the format changes
50 storageversion = 0
51 storageversion = 0
51
52
52 # namespaces
53 # namespaces
53 bookmarktype = 'bookmark'
54 bookmarktype = 'bookmark'
54 wdirparenttype = 'wdirparent'
55 wdirparenttype = 'wdirparent'
55 # In a shared repository, what shared feature name is used
56 # In a shared repository, what shared feature name is used
56 # to indicate this namespace is shared with the source?
57 # to indicate this namespace is shared with the source?
57 sharednamespaces = {
58 sharednamespaces = {
58 bookmarktype: hg.sharedbookmarks,
59 bookmarktype: hg.sharedbookmarks,
59 }
60 }
60
61
61 # Journal recording, register hooks and storage object
62 # Journal recording, register hooks and storage object
62 def extsetup(ui):
63 def extsetup(ui):
63 extensions.wrapfunction(dispatch, 'runcommand', runcommand)
64 extensions.wrapfunction(dispatch, 'runcommand', runcommand)
64 extensions.wrapfunction(bookmarks.bmstore, '_write', recordbookmarks)
65 extensions.wrapfunction(bookmarks.bmstore, '_write', recordbookmarks)
65 extensions.wrapfilecache(
66 extensions.wrapfilecache(
66 localrepo.localrepository, 'dirstate', wrapdirstate)
67 localrepo.localrepository, 'dirstate', wrapdirstate)
67 extensions.wrapfunction(hg, 'postshare', wrappostshare)
68 extensions.wrapfunction(hg, 'postshare', wrappostshare)
68 extensions.wrapfunction(hg, 'copystore', unsharejournal)
69 extensions.wrapfunction(hg, 'copystore', unsharejournal)
69
70
70 def reposetup(ui, repo):
71 def reposetup(ui, repo):
71 if repo.local():
72 if repo.local():
72 repo.journal = journalstorage(repo)
73 repo.journal = journalstorage(repo)
73 repo._wlockfreeprefix.add('namejournal')
74 repo._wlockfreeprefix.add('namejournal')
74
75
75 dirstate, cached = localrepo.isfilecached(repo, 'dirstate')
76 dirstate, cached = localrepo.isfilecached(repo, 'dirstate')
76 if cached:
77 if cached:
77 # already instantiated dirstate isn't yet marked as
78 # already instantiated dirstate isn't yet marked as
78 # "journal"-ing, even though repo.dirstate() was already
79 # "journal"-ing, even though repo.dirstate() was already
79 # wrapped by own wrapdirstate()
80 # wrapped by own wrapdirstate()
80 _setupdirstate(repo, dirstate)
81 _setupdirstate(repo, dirstate)
81
82
82 def runcommand(orig, lui, repo, cmd, fullargs, *args):
83 def runcommand(orig, lui, repo, cmd, fullargs, *args):
83 """Track the command line options for recording in the journal"""
84 """Track the command line options for recording in the journal"""
84 journalstorage.recordcommand(*fullargs)
85 journalstorage.recordcommand(*fullargs)
85 return orig(lui, repo, cmd, fullargs, *args)
86 return orig(lui, repo, cmd, fullargs, *args)
86
87
87 def _setupdirstate(repo, dirstate):
88 def _setupdirstate(repo, dirstate):
88 dirstate.journalstorage = repo.journal
89 dirstate.journalstorage = repo.journal
89 dirstate.addparentchangecallback('journal', recorddirstateparents)
90 dirstate.addparentchangecallback('journal', recorddirstateparents)
90
91
91 # hooks to record dirstate changes
92 # hooks to record dirstate changes
92 def wrapdirstate(orig, repo):
93 def wrapdirstate(orig, repo):
93 """Make journal storage available to the dirstate object"""
94 """Make journal storage available to the dirstate object"""
94 dirstate = orig(repo)
95 dirstate = orig(repo)
95 if util.safehasattr(repo, 'journal'):
96 if util.safehasattr(repo, 'journal'):
96 _setupdirstate(repo, dirstate)
97 _setupdirstate(repo, dirstate)
97 return dirstate
98 return dirstate
98
99
99 def recorddirstateparents(dirstate, old, new):
100 def recorddirstateparents(dirstate, old, new):
100 """Records all dirstate parent changes in the journal."""
101 """Records all dirstate parent changes in the journal."""
101 old = list(old)
102 old = list(old)
102 new = list(new)
103 new = list(new)
103 if util.safehasattr(dirstate, 'journalstorage'):
104 if util.safehasattr(dirstate, 'journalstorage'):
104 # only record two hashes if there was a merge
105 # only record two hashes if there was a merge
105 oldhashes = old[:1] if old[1] == node.nullid else old
106 oldhashes = old[:1] if old[1] == node.nullid else old
106 newhashes = new[:1] if new[1] == node.nullid else new
107 newhashes = new[:1] if new[1] == node.nullid else new
107 dirstate.journalstorage.record(
108 dirstate.journalstorage.record(
108 wdirparenttype, '.', oldhashes, newhashes)
109 wdirparenttype, '.', oldhashes, newhashes)
109
110
110 # hooks to record bookmark changes (both local and remote)
111 # hooks to record bookmark changes (both local and remote)
111 def recordbookmarks(orig, store, fp):
112 def recordbookmarks(orig, store, fp):
112 """Records all bookmark changes in the journal."""
113 """Records all bookmark changes in the journal."""
113 repo = store._repo
114 repo = store._repo
114 if util.safehasattr(repo, 'journal'):
115 if util.safehasattr(repo, 'journal'):
115 oldmarks = bookmarks.bmstore(repo)
116 oldmarks = bookmarks.bmstore(repo)
116 for mark, value in store.iteritems():
117 for mark, value in store.iteritems():
117 oldvalue = oldmarks.get(mark, node.nullid)
118 oldvalue = oldmarks.get(mark, node.nullid)
118 if value != oldvalue:
119 if value != oldvalue:
119 repo.journal.record(bookmarktype, mark, oldvalue, value)
120 repo.journal.record(bookmarktype, mark, oldvalue, value)
120 return orig(store, fp)
121 return orig(store, fp)
121
122
122 # shared repository support
123 # shared repository support
123 def _readsharedfeatures(repo):
124 def _readsharedfeatures(repo):
124 """A set of shared features for this repository"""
125 """A set of shared features for this repository"""
125 try:
126 try:
126 return set(repo.vfs.read('shared').splitlines())
127 return set(repo.vfs.read('shared').splitlines())
127 except IOError as inst:
128 except IOError as inst:
128 if inst.errno != errno.ENOENT:
129 if inst.errno != errno.ENOENT:
129 raise
130 raise
130 return set()
131 return set()
131
132
132 def _mergeentriesiter(*iterables, **kwargs):
133 def _mergeentriesiter(*iterables, **kwargs):
133 """Given a set of sorted iterables, yield the next entry in merged order
134 """Given a set of sorted iterables, yield the next entry in merged order
134
135
135 Note that by default entries go from most recent to oldest.
136 Note that by default entries go from most recent to oldest.
136 """
137 """
137 order = kwargs.pop(r'order', max)
138 order = kwargs.pop(r'order', max)
138 iterables = [iter(it) for it in iterables]
139 iterables = [iter(it) for it in iterables]
139 # this tracks still active iterables; iterables are deleted as they are
140 # this tracks still active iterables; iterables are deleted as they are
140 # exhausted, which is why this is a dictionary and why each entry also
141 # exhausted, which is why this is a dictionary and why each entry also
141 # stores the key. Entries are mutable so we can store the next value each
142 # stores the key. Entries are mutable so we can store the next value each
142 # time.
143 # time.
143 iterable_map = {}
144 iterable_map = {}
144 for key, it in enumerate(iterables):
145 for key, it in enumerate(iterables):
145 try:
146 try:
146 iterable_map[key] = [next(it), key, it]
147 iterable_map[key] = [next(it), key, it]
147 except StopIteration:
148 except StopIteration:
148 # empty entry, can be ignored
149 # empty entry, can be ignored
149 pass
150 pass
150
151
151 while iterable_map:
152 while iterable_map:
152 value, key, it = order(iterable_map.itervalues())
153 value, key, it = order(iterable_map.itervalues())
153 yield value
154 yield value
154 try:
155 try:
155 iterable_map[key][0] = next(it)
156 iterable_map[key][0] = next(it)
156 except StopIteration:
157 except StopIteration:
157 # this iterable is empty, remove it from consideration
158 # this iterable is empty, remove it from consideration
158 del iterable_map[key]
159 del iterable_map[key]
159
160
160 def wrappostshare(orig, sourcerepo, destrepo, **kwargs):
161 def wrappostshare(orig, sourcerepo, destrepo, **kwargs):
161 """Mark this shared working copy as sharing journal information"""
162 """Mark this shared working copy as sharing journal information"""
162 with destrepo.wlock():
163 with destrepo.wlock():
163 orig(sourcerepo, destrepo, **kwargs)
164 orig(sourcerepo, destrepo, **kwargs)
164 with destrepo.vfs('shared', 'a') as fp:
165 with destrepo.vfs('shared', 'a') as fp:
165 fp.write('journal\n')
166 fp.write('journal\n')
166
167
167 def unsharejournal(orig, ui, repo, repopath):
168 def unsharejournal(orig, ui, repo, repopath):
168 """Copy shared journal entries into this repo when unsharing"""
169 """Copy shared journal entries into this repo when unsharing"""
169 if (repo.path == repopath and repo.shared() and
170 if (repo.path == repopath and repo.shared() and
170 util.safehasattr(repo, 'journal')):
171 util.safehasattr(repo, 'journal')):
171 sharedrepo = hg.sharedreposource(repo)
172 sharedrepo = hg.sharedreposource(repo)
172 sharedfeatures = _readsharedfeatures(repo)
173 sharedfeatures = _readsharedfeatures(repo)
173 if sharedrepo and sharedfeatures > {'journal'}:
174 if sharedrepo and sharedfeatures > {'journal'}:
174 # there is a shared repository and there are shared journal entries
175 # there is a shared repository and there are shared journal entries
175 # to copy. move shared date over from source to destination but
176 # to copy. move shared date over from source to destination but
176 # move the local file first
177 # move the local file first
177 if repo.vfs.exists('namejournal'):
178 if repo.vfs.exists('namejournal'):
178 journalpath = repo.vfs.join('namejournal')
179 journalpath = repo.vfs.join('namejournal')
179 util.rename(journalpath, journalpath + '.bak')
180 util.rename(journalpath, journalpath + '.bak')
180 storage = repo.journal
181 storage = repo.journal
181 local = storage._open(
182 local = storage._open(
182 repo.vfs, filename='namejournal.bak', _newestfirst=False)
183 repo.vfs, filename='namejournal.bak', _newestfirst=False)
183 shared = (
184 shared = (
184 e for e in storage._open(sharedrepo.vfs, _newestfirst=False)
185 e for e in storage._open(sharedrepo.vfs, _newestfirst=False)
185 if sharednamespaces.get(e.namespace) in sharedfeatures)
186 if sharednamespaces.get(e.namespace) in sharedfeatures)
186 for entry in _mergeentriesiter(local, shared, order=min):
187 for entry in _mergeentriesiter(local, shared, order=min):
187 storage._write(repo.vfs, entry)
188 storage._write(repo.vfs, entry)
188
189
189 return orig(ui, repo, repopath)
190 return orig(ui, repo, repopath)
190
191
191 class journalentry(collections.namedtuple(
192 class journalentry(collections.namedtuple(
192 u'journalentry',
193 u'journalentry',
193 u'timestamp user command namespace name oldhashes newhashes')):
194 u'timestamp user command namespace name oldhashes newhashes')):
194 """Individual journal entry
195 """Individual journal entry
195
196
196 * timestamp: a mercurial (time, timezone) tuple
197 * timestamp: a mercurial (time, timezone) tuple
197 * user: the username that ran the command
198 * user: the username that ran the command
198 * namespace: the entry namespace, an opaque string
199 * namespace: the entry namespace, an opaque string
199 * name: the name of the changed item, opaque string with meaning in the
200 * name: the name of the changed item, opaque string with meaning in the
200 namespace
201 namespace
201 * command: the hg command that triggered this record
202 * command: the hg command that triggered this record
202 * oldhashes: a tuple of one or more binary hashes for the old location
203 * oldhashes: a tuple of one or more binary hashes for the old location
203 * newhashes: a tuple of one or more binary hashes for the new location
204 * newhashes: a tuple of one or more binary hashes for the new location
204
205
205 Handles serialisation from and to the storage format. Fields are
206 Handles serialisation from and to the storage format. Fields are
206 separated by newlines, hashes are written out in hex separated by commas,
207 separated by newlines, hashes are written out in hex separated by commas,
207 timestamp and timezone are separated by a space.
208 timestamp and timezone are separated by a space.
208
209
209 """
210 """
210 @classmethod
211 @classmethod
211 def fromstorage(cls, line):
212 def fromstorage(cls, line):
212 (time, user, command, namespace, name,
213 (time, user, command, namespace, name,
213 oldhashes, newhashes) = line.split('\n')
214 oldhashes, newhashes) = line.split('\n')
214 timestamp, tz = time.split()
215 timestamp, tz = time.split()
215 timestamp, tz = float(timestamp), int(tz)
216 timestamp, tz = float(timestamp), int(tz)
216 oldhashes = tuple(node.bin(hash) for hash in oldhashes.split(','))
217 oldhashes = tuple(node.bin(hash) for hash in oldhashes.split(','))
217 newhashes = tuple(node.bin(hash) for hash in newhashes.split(','))
218 newhashes = tuple(node.bin(hash) for hash in newhashes.split(','))
218 return cls(
219 return cls(
219 (timestamp, tz), user, command, namespace, name,
220 (timestamp, tz), user, command, namespace, name,
220 oldhashes, newhashes)
221 oldhashes, newhashes)
221
222
222 def __str__(self):
223 def __bytes__(self):
223 """String representation for storage"""
224 """bytes representation for storage"""
224 time = ' '.join(map(str, self.timestamp))
225 time = ' '.join(map(str, self.timestamp))
225 oldhashes = ','.join([node.hex(hash) for hash in self.oldhashes])
226 oldhashes = ','.join([node.hex(hash) for hash in self.oldhashes])
226 newhashes = ','.join([node.hex(hash) for hash in self.newhashes])
227 newhashes = ','.join([node.hex(hash) for hash in self.newhashes])
227 return '\n'.join((
228 return '\n'.join((
228 time, self.user, self.command, self.namespace, self.name,
229 time, self.user, self.command, self.namespace, self.name,
229 oldhashes, newhashes))
230 oldhashes, newhashes))
230
231
232 __str__ = encoding.strmethod(__bytes__)
233
231 class journalstorage(object):
234 class journalstorage(object):
232 """Storage for journal entries
235 """Storage for journal entries
233
236
234 Entries are divided over two files; one with entries that pertain to the
237 Entries are divided over two files; one with entries that pertain to the
235 local working copy *only*, and one with entries that are shared across
238 local working copy *only*, and one with entries that are shared across
236 multiple working copies when shared using the share extension.
239 multiple working copies when shared using the share extension.
237
240
238 Entries are stored with NUL bytes as separators. See the journalentry
241 Entries are stored with NUL bytes as separators. See the journalentry
239 class for the per-entry structure.
242 class for the per-entry structure.
240
243
241 The file format starts with an integer version, delimited by a NUL.
244 The file format starts with an integer version, delimited by a NUL.
242
245
243 This storage uses a dedicated lock; this makes it easier to avoid issues
246 This storage uses a dedicated lock; this makes it easier to avoid issues
244 with adding entries that added when the regular wlock is unlocked (e.g.
247 with adding entries that added when the regular wlock is unlocked (e.g.
245 the dirstate).
248 the dirstate).
246
249
247 """
250 """
248 _currentcommand = ()
251 _currentcommand = ()
249 _lockref = None
252 _lockref = None
250
253
251 def __init__(self, repo):
254 def __init__(self, repo):
252 self.user = util.getuser()
255 self.user = util.getuser()
253 self.ui = repo.ui
256 self.ui = repo.ui
254 self.vfs = repo.vfs
257 self.vfs = repo.vfs
255
258
256 # is this working copy using a shared storage?
259 # is this working copy using a shared storage?
257 self.sharedfeatures = self.sharedvfs = None
260 self.sharedfeatures = self.sharedvfs = None
258 if repo.shared():
261 if repo.shared():
259 features = _readsharedfeatures(repo)
262 features = _readsharedfeatures(repo)
260 sharedrepo = hg.sharedreposource(repo)
263 sharedrepo = hg.sharedreposource(repo)
261 if sharedrepo is not None and 'journal' in features:
264 if sharedrepo is not None and 'journal' in features:
262 self.sharedvfs = sharedrepo.vfs
265 self.sharedvfs = sharedrepo.vfs
263 self.sharedfeatures = features
266 self.sharedfeatures = features
264
267
265 # track the current command for recording in journal entries
268 # track the current command for recording in journal entries
266 @property
269 @property
267 def command(self):
270 def command(self):
268 commandstr = ' '.join(
271 commandstr = ' '.join(
269 map(util.shellquote, journalstorage._currentcommand))
272 map(util.shellquote, journalstorage._currentcommand))
270 if '\n' in commandstr:
273 if '\n' in commandstr:
271 # truncate multi-line commands
274 # truncate multi-line commands
272 commandstr = commandstr.partition('\n')[0] + ' ...'
275 commandstr = commandstr.partition('\n')[0] + ' ...'
273 return commandstr
276 return commandstr
274
277
275 @classmethod
278 @classmethod
276 def recordcommand(cls, *fullargs):
279 def recordcommand(cls, *fullargs):
277 """Set the current hg arguments, stored with recorded entries"""
280 """Set the current hg arguments, stored with recorded entries"""
278 # Set the current command on the class because we may have started
281 # Set the current command on the class because we may have started
279 # with a non-local repo (cloning for example).
282 # with a non-local repo (cloning for example).
280 cls._currentcommand = fullargs
283 cls._currentcommand = fullargs
281
284
282 def _currentlock(self, lockref):
285 def _currentlock(self, lockref):
283 """Returns the lock if it's held, or None if it's not.
286 """Returns the lock if it's held, or None if it's not.
284
287
285 (This is copied from the localrepo class)
288 (This is copied from the localrepo class)
286 """
289 """
287 if lockref is None:
290 if lockref is None:
288 return None
291 return None
289 l = lockref()
292 l = lockref()
290 if l is None or not l.held:
293 if l is None or not l.held:
291 return None
294 return None
292 return l
295 return l
293
296
294 def jlock(self, vfs):
297 def jlock(self, vfs):
295 """Create a lock for the journal file"""
298 """Create a lock for the journal file"""
296 if self._currentlock(self._lockref) is not None:
299 if self._currentlock(self._lockref) is not None:
297 raise error.Abort(_('journal lock does not support nesting'))
300 raise error.Abort(_('journal lock does not support nesting'))
298 desc = _('journal of %s') % vfs.base
301 desc = _('journal of %s') % vfs.base
299 try:
302 try:
300 l = lock.lock(vfs, 'namejournal.lock', 0, desc=desc)
303 l = lock.lock(vfs, 'namejournal.lock', 0, desc=desc)
301 except error.LockHeld as inst:
304 except error.LockHeld as inst:
302 self.ui.warn(
305 self.ui.warn(
303 _("waiting for lock on %s held by %r\n") % (desc, inst.locker))
306 _("waiting for lock on %s held by %r\n") % (desc, inst.locker))
304 # default to 600 seconds timeout
307 # default to 600 seconds timeout
305 l = lock.lock(
308 l = lock.lock(
306 vfs, 'namejournal.lock',
309 vfs, 'namejournal.lock',
307 self.ui.configint("ui", "timeout"), desc=desc)
310 self.ui.configint("ui", "timeout"), desc=desc)
308 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
311 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
309 self._lockref = weakref.ref(l)
312 self._lockref = weakref.ref(l)
310 return l
313 return l
311
314
312 def record(self, namespace, name, oldhashes, newhashes):
315 def record(self, namespace, name, oldhashes, newhashes):
313 """Record a new journal entry
316 """Record a new journal entry
314
317
315 * namespace: an opaque string; this can be used to filter on the type
318 * namespace: an opaque string; this can be used to filter on the type
316 of recorded entries.
319 of recorded entries.
317 * name: the name defining this entry; for bookmarks, this is the
320 * name: the name defining this entry; for bookmarks, this is the
318 bookmark name. Can be filtered on when retrieving entries.
321 bookmark name. Can be filtered on when retrieving entries.
319 * oldhashes and newhashes: each a single binary hash, or a list of
322 * oldhashes and newhashes: each a single binary hash, or a list of
320 binary hashes. These represent the old and new position of the named
323 binary hashes. These represent the old and new position of the named
321 item.
324 item.
322
325
323 """
326 """
324 if not isinstance(oldhashes, list):
327 if not isinstance(oldhashes, list):
325 oldhashes = [oldhashes]
328 oldhashes = [oldhashes]
326 if not isinstance(newhashes, list):
329 if not isinstance(newhashes, list):
327 newhashes = [newhashes]
330 newhashes = [newhashes]
328
331
329 entry = journalentry(
332 entry = journalentry(
330 dateutil.makedate(), self.user, self.command, namespace, name,
333 dateutil.makedate(), self.user, self.command, namespace, name,
331 oldhashes, newhashes)
334 oldhashes, newhashes)
332
335
333 vfs = self.vfs
336 vfs = self.vfs
334 if self.sharedvfs is not None:
337 if self.sharedvfs is not None:
335 # write to the shared repository if this feature is being
338 # write to the shared repository if this feature is being
336 # shared between working copies.
339 # shared between working copies.
337 if sharednamespaces.get(namespace) in self.sharedfeatures:
340 if sharednamespaces.get(namespace) in self.sharedfeatures:
338 vfs = self.sharedvfs
341 vfs = self.sharedvfs
339
342
340 self._write(vfs, entry)
343 self._write(vfs, entry)
341
344
342 def _write(self, vfs, entry):
345 def _write(self, vfs, entry):
343 with self.jlock(vfs):
346 with self.jlock(vfs):
344 version = None
347 version = None
345 # open file in amend mode to ensure it is created if missing
348 # open file in amend mode to ensure it is created if missing
346 with vfs('namejournal', mode='a+b') as f:
349 with vfs('namejournal', mode='a+b') as f:
347 f.seek(0, os.SEEK_SET)
350 f.seek(0, os.SEEK_SET)
348 # Read just enough bytes to get a version number (up to 2
351 # Read just enough bytes to get a version number (up to 2
349 # digits plus separator)
352 # digits plus separator)
350 version = f.read(3).partition('\0')[0]
353 version = f.read(3).partition('\0')[0]
351 if version and version != str(storageversion):
354 if version and version != str(storageversion):
352 # different version of the storage. Exit early (and not
355 # different version of the storage. Exit early (and not
353 # write anything) if this is not a version we can handle or
356 # write anything) if this is not a version we can handle or
354 # the file is corrupt. In future, perhaps rotate the file
357 # the file is corrupt. In future, perhaps rotate the file
355 # instead?
358 # instead?
356 self.ui.warn(
359 self.ui.warn(
357 _("unsupported journal file version '%s'\n") % version)
360 _("unsupported journal file version '%s'\n") % version)
358 return
361 return
359 if not version:
362 if not version:
360 # empty file, write version first
363 # empty file, write version first
361 f.write(str(storageversion) + '\0')
364 f.write(str(storageversion) + '\0')
362 f.seek(0, os.SEEK_END)
365 f.seek(0, os.SEEK_END)
363 f.write(str(entry) + '\0')
366 f.write(str(entry) + '\0')
364
367
365 def filtered(self, namespace=None, name=None):
368 def filtered(self, namespace=None, name=None):
366 """Yield all journal entries with the given namespace or name
369 """Yield all journal entries with the given namespace or name
367
370
368 Both the namespace and the name are optional; if neither is given all
371 Both the namespace and the name are optional; if neither is given all
369 entries in the journal are produced.
372 entries in the journal are produced.
370
373
371 Matching supports regular expressions by using the `re:` prefix
374 Matching supports regular expressions by using the `re:` prefix
372 (use `literal:` to match names or namespaces that start with `re:`)
375 (use `literal:` to match names or namespaces that start with `re:`)
373
376
374 """
377 """
375 if namespace is not None:
378 if namespace is not None:
376 namespace = util.stringmatcher(namespace)[-1]
379 namespace = util.stringmatcher(namespace)[-1]
377 if name is not None:
380 if name is not None:
378 name = util.stringmatcher(name)[-1]
381 name = util.stringmatcher(name)[-1]
379 for entry in self:
382 for entry in self:
380 if namespace is not None and not namespace(entry.namespace):
383 if namespace is not None and not namespace(entry.namespace):
381 continue
384 continue
382 if name is not None and not name(entry.name):
385 if name is not None and not name(entry.name):
383 continue
386 continue
384 yield entry
387 yield entry
385
388
386 def __iter__(self):
389 def __iter__(self):
387 """Iterate over the storage
390 """Iterate over the storage
388
391
389 Yields journalentry instances for each contained journal record.
392 Yields journalentry instances for each contained journal record.
390
393
391 """
394 """
392 local = self._open(self.vfs)
395 local = self._open(self.vfs)
393
396
394 if self.sharedvfs is None:
397 if self.sharedvfs is None:
395 return local
398 return local
396
399
397 # iterate over both local and shared entries, but only those
400 # iterate over both local and shared entries, but only those
398 # shared entries that are among the currently shared features
401 # shared entries that are among the currently shared features
399 shared = (
402 shared = (
400 e for e in self._open(self.sharedvfs)
403 e for e in self._open(self.sharedvfs)
401 if sharednamespaces.get(e.namespace) in self.sharedfeatures)
404 if sharednamespaces.get(e.namespace) in self.sharedfeatures)
402 return _mergeentriesiter(local, shared)
405 return _mergeentriesiter(local, shared)
403
406
404 def _open(self, vfs, filename='namejournal', _newestfirst=True):
407 def _open(self, vfs, filename='namejournal', _newestfirst=True):
405 if not vfs.exists(filename):
408 if not vfs.exists(filename):
406 return
409 return
407
410
408 with vfs(filename) as f:
411 with vfs(filename) as f:
409 raw = f.read()
412 raw = f.read()
410
413
411 lines = raw.split('\0')
414 lines = raw.split('\0')
412 version = lines and lines[0]
415 version = lines and lines[0]
413 if version != str(storageversion):
416 if version != str(storageversion):
414 version = version or _('not available')
417 version = version or _('not available')
415 raise error.Abort(_("unknown journal file version '%s'") % version)
418 raise error.Abort(_("unknown journal file version '%s'") % version)
416
419
417 # Skip the first line, it's a version number. Normally we iterate over
420 # Skip the first line, it's a version number. Normally we iterate over
418 # these in reverse order to list newest first; only when copying across
421 # these in reverse order to list newest first; only when copying across
419 # a shared storage do we forgo reversing.
422 # a shared storage do we forgo reversing.
420 lines = lines[1:]
423 lines = lines[1:]
421 if _newestfirst:
424 if _newestfirst:
422 lines = reversed(lines)
425 lines = reversed(lines)
423 for line in lines:
426 for line in lines:
424 if not line:
427 if not line:
425 continue
428 continue
426 yield journalentry.fromstorage(line)
429 yield journalentry.fromstorage(line)
427
430
428 # journal reading
431 # journal reading
429 # log options that don't make sense for journal
432 # log options that don't make sense for journal
430 _ignoreopts = ('no-merges', 'graph')
433 _ignoreopts = ('no-merges', 'graph')
431 @command(
434 @command(
432 'journal', [
435 'journal', [
433 ('', 'all', None, 'show history for all names'),
436 ('', 'all', None, 'show history for all names'),
434 ('c', 'commits', None, 'show commit metadata'),
437 ('c', 'commits', None, 'show commit metadata'),
435 ] + [opt for opt in cmdutil.logopts if opt[1] not in _ignoreopts],
438 ] + [opt for opt in cmdutil.logopts if opt[1] not in _ignoreopts],
436 '[OPTION]... [BOOKMARKNAME]')
439 '[OPTION]... [BOOKMARKNAME]')
437 def journal(ui, repo, *args, **opts):
440 def journal(ui, repo, *args, **opts):
438 """show the previous position of bookmarks and the working copy
441 """show the previous position of bookmarks and the working copy
439
442
440 The journal is used to see the previous commits that bookmarks and the
443 The journal is used to see the previous commits that bookmarks and the
441 working copy pointed to. By default the previous locations for the working
444 working copy pointed to. By default the previous locations for the working
442 copy. Passing a bookmark name will show all the previous positions of
445 copy. Passing a bookmark name will show all the previous positions of
443 that bookmark. Use the --all switch to show previous locations for all
446 that bookmark. Use the --all switch to show previous locations for all
444 bookmarks and the working copy; each line will then include the bookmark
447 bookmarks and the working copy; each line will then include the bookmark
445 name, or '.' for the working copy, as well.
448 name, or '.' for the working copy, as well.
446
449
447 If `name` starts with `re:`, the remainder of the name is treated as
450 If `name` starts with `re:`, the remainder of the name is treated as
448 a regular expression. To match a name that actually starts with `re:`,
451 a regular expression. To match a name that actually starts with `re:`,
449 use the prefix `literal:`.
452 use the prefix `literal:`.
450
453
451 By default hg journal only shows the commit hash and the command that was
454 By default hg journal only shows the commit hash and the command that was
452 running at that time. -v/--verbose will show the prior hash, the user, and
455 running at that time. -v/--verbose will show the prior hash, the user, and
453 the time at which it happened.
456 the time at which it happened.
454
457
455 Use -c/--commits to output log information on each commit hash; at this
458 Use -c/--commits to output log information on each commit hash; at this
456 point you can use the usual `--patch`, `--git`, `--stat` and `--template`
459 point you can use the usual `--patch`, `--git`, `--stat` and `--template`
457 switches to alter the log output for these.
460 switches to alter the log output for these.
458
461
459 `hg journal -T json` can be used to produce machine readable output.
462 `hg journal -T json` can be used to produce machine readable output.
460
463
461 """
464 """
462 opts = pycompat.byteskwargs(opts)
465 opts = pycompat.byteskwargs(opts)
463 name = '.'
466 name = '.'
464 if opts.get('all'):
467 if opts.get('all'):
465 if args:
468 if args:
466 raise error.Abort(
469 raise error.Abort(
467 _("You can't combine --all and filtering on a name"))
470 _("You can't combine --all and filtering on a name"))
468 name = None
471 name = None
469 if args:
472 if args:
470 name = args[0]
473 name = args[0]
471
474
472 fm = ui.formatter('journal', opts)
475 fm = ui.formatter('journal', opts)
473
476
474 if opts.get("template") != "json":
477 if opts.get("template") != "json":
475 if name is None:
478 if name is None:
476 displayname = _('the working copy and bookmarks')
479 displayname = _('the working copy and bookmarks')
477 else:
480 else:
478 displayname = "'%s'" % name
481 displayname = "'%s'" % name
479 ui.status(_("previous locations of %s:\n") % displayname)
482 ui.status(_("previous locations of %s:\n") % displayname)
480
483
481 limit = logcmdutil.getlimit(opts)
484 limit = logcmdutil.getlimit(opts)
482 entry = None
485 entry = None
483 ui.pager('journal')
486 ui.pager('journal')
484 for count, entry in enumerate(repo.journal.filtered(name=name)):
487 for count, entry in enumerate(repo.journal.filtered(name=name)):
485 if count == limit:
488 if count == limit:
486 break
489 break
487 newhashesstr = fm.formatlist(map(fm.hexfunc, entry.newhashes),
490 newhashesstr = fm.formatlist(map(fm.hexfunc, entry.newhashes),
488 name='node', sep=',')
491 name='node', sep=',')
489 oldhashesstr = fm.formatlist(map(fm.hexfunc, entry.oldhashes),
492 oldhashesstr = fm.formatlist(map(fm.hexfunc, entry.oldhashes),
490 name='node', sep=',')
493 name='node', sep=',')
491
494
492 fm.startitem()
495 fm.startitem()
493 fm.condwrite(ui.verbose, 'oldhashes', '%s -> ', oldhashesstr)
496 fm.condwrite(ui.verbose, 'oldhashes', '%s -> ', oldhashesstr)
494 fm.write('newhashes', '%s', newhashesstr)
497 fm.write('newhashes', '%s', newhashesstr)
495 fm.condwrite(ui.verbose, 'user', ' %-8s', entry.user)
498 fm.condwrite(ui.verbose, 'user', ' %-8s', entry.user)
496 fm.condwrite(
499 fm.condwrite(
497 opts.get('all') or name.startswith('re:'),
500 opts.get('all') or name.startswith('re:'),
498 'name', ' %-8s', entry.name)
501 'name', ' %-8s', entry.name)
499
502
500 timestring = fm.formatdate(entry.timestamp, '%Y-%m-%d %H:%M %1%2')
503 timestring = fm.formatdate(entry.timestamp, '%Y-%m-%d %H:%M %1%2')
501 fm.condwrite(ui.verbose, 'date', ' %s', timestring)
504 fm.condwrite(ui.verbose, 'date', ' %s', timestring)
502 fm.write('command', ' %s\n', entry.command)
505 fm.write('command', ' %s\n', entry.command)
503
506
504 if opts.get("commits"):
507 if opts.get("commits"):
505 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
508 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
506 for hash in entry.newhashes:
509 for hash in entry.newhashes:
507 try:
510 try:
508 ctx = repo[hash]
511 ctx = repo[hash]
509 displayer.show(ctx)
512 displayer.show(ctx)
510 except error.RepoLookupError as e:
513 except error.RepoLookupError as e:
511 fm.write('repolookuperror', "%s\n\n", pycompat.bytestr(e))
514 fm.write('repolookuperror', "%s\n\n", pycompat.bytestr(e))
512 displayer.close()
515 displayer.close()
513
516
514 fm.end()
517 fm.end()
515
518
516 if entry is None:
519 if entry is None:
517 ui.status(_("no recorded locations\n"))
520 ui.status(_("no recorded locations\n"))
General Comments 0
You need to be logged in to leave comments. Login now