##// END OF EJS Templates
journal: do not use atomictemp (issue5338)...
Jun Wu -
r33924:e6d42156 default
parent child Browse files
Show More
@@ -1,514 +1,514 b''
1 # journal.py
1 # journal.py
2 #
2 #
3 # Copyright 2014-2016 Facebook, Inc.
3 # Copyright 2014-2016 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 """track previous positions of bookmarks (EXPERIMENTAL)
7 """track previous positions of bookmarks (EXPERIMENTAL)
8
8
9 This extension adds a new command: `hg journal`, which shows you where
9 This extension adds a new command: `hg journal`, which shows you where
10 bookmarks were previously located.
10 bookmarks were previously located.
11
11
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import collections
16 import collections
17 import errno
17 import errno
18 import os
18 import os
19 import weakref
19 import weakref
20
20
21 from mercurial.i18n import _
21 from mercurial.i18n import _
22
22
23 from mercurial import (
23 from mercurial import (
24 bookmarks,
24 bookmarks,
25 cmdutil,
25 cmdutil,
26 dispatch,
26 dispatch,
27 error,
27 error,
28 extensions,
28 extensions,
29 hg,
29 hg,
30 localrepo,
30 localrepo,
31 lock,
31 lock,
32 node,
32 node,
33 registrar,
33 registrar,
34 util,
34 util,
35 )
35 )
36
36
37 from . import share
37 from . import share
38
38
39 cmdtable = {}
39 cmdtable = {}
40 command = registrar.command(cmdtable)
40 command = registrar.command(cmdtable)
41
41
42 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
42 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
43 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
43 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
44 # be specifying the version(s) of Mercurial they are tested with, or
44 # be specifying the version(s) of Mercurial they are tested with, or
45 # leave the attribute unspecified.
45 # leave the attribute unspecified.
46 testedwith = 'ships-with-hg-core'
46 testedwith = 'ships-with-hg-core'
47
47
48 # storage format version; increment when the format changes
48 # storage format version; increment when the format changes
49 storageversion = 0
49 storageversion = 0
50
50
51 # namespaces
51 # namespaces
52 bookmarktype = 'bookmark'
52 bookmarktype = 'bookmark'
53 wdirparenttype = 'wdirparent'
53 wdirparenttype = 'wdirparent'
54 # In a shared repository, what shared feature name is used
54 # In a shared repository, what shared feature name is used
55 # to indicate this namespace is shared with the source?
55 # to indicate this namespace is shared with the source?
56 sharednamespaces = {
56 sharednamespaces = {
57 bookmarktype: hg.sharedbookmarks,
57 bookmarktype: hg.sharedbookmarks,
58 }
58 }
59
59
60 # Journal recording, register hooks and storage object
60 # Journal recording, register hooks and storage object
61 def extsetup(ui):
61 def extsetup(ui):
62 extensions.wrapfunction(dispatch, 'runcommand', runcommand)
62 extensions.wrapfunction(dispatch, 'runcommand', runcommand)
63 extensions.wrapfunction(bookmarks.bmstore, '_write', recordbookmarks)
63 extensions.wrapfunction(bookmarks.bmstore, '_write', recordbookmarks)
64 extensions.wrapfilecache(
64 extensions.wrapfilecache(
65 localrepo.localrepository, 'dirstate', wrapdirstate)
65 localrepo.localrepository, 'dirstate', wrapdirstate)
66 extensions.wrapfunction(hg, 'postshare', wrappostshare)
66 extensions.wrapfunction(hg, 'postshare', wrappostshare)
67 extensions.wrapfunction(hg, 'copystore', unsharejournal)
67 extensions.wrapfunction(hg, 'copystore', unsharejournal)
68
68
69 def reposetup(ui, repo):
69 def reposetup(ui, repo):
70 if repo.local():
70 if repo.local():
71 repo.journal = journalstorage(repo)
71 repo.journal = journalstorage(repo)
72 repo._wlockfreeprefix.add('namejournal')
72 repo._wlockfreeprefix.add('namejournal')
73
73
74 dirstate, cached = localrepo.isfilecached(repo, 'dirstate')
74 dirstate, cached = localrepo.isfilecached(repo, 'dirstate')
75 if cached:
75 if cached:
76 # already instantiated dirstate isn't yet marked as
76 # already instantiated dirstate isn't yet marked as
77 # "journal"-ing, even though repo.dirstate() was already
77 # "journal"-ing, even though repo.dirstate() was already
78 # wrapped by own wrapdirstate()
78 # wrapped by own wrapdirstate()
79 _setupdirstate(repo, dirstate)
79 _setupdirstate(repo, dirstate)
80
80
81 def runcommand(orig, lui, repo, cmd, fullargs, *args):
81 def runcommand(orig, lui, repo, cmd, fullargs, *args):
82 """Track the command line options for recording in the journal"""
82 """Track the command line options for recording in the journal"""
83 journalstorage.recordcommand(*fullargs)
83 journalstorage.recordcommand(*fullargs)
84 return orig(lui, repo, cmd, fullargs, *args)
84 return orig(lui, repo, cmd, fullargs, *args)
85
85
86 def _setupdirstate(repo, dirstate):
86 def _setupdirstate(repo, dirstate):
87 dirstate.journalstorage = repo.journal
87 dirstate.journalstorage = repo.journal
88 dirstate.addparentchangecallback('journal', recorddirstateparents)
88 dirstate.addparentchangecallback('journal', recorddirstateparents)
89
89
90 # hooks to record dirstate changes
90 # hooks to record dirstate changes
91 def wrapdirstate(orig, repo):
91 def wrapdirstate(orig, repo):
92 """Make journal storage available to the dirstate object"""
92 """Make journal storage available to the dirstate object"""
93 dirstate = orig(repo)
93 dirstate = orig(repo)
94 if util.safehasattr(repo, 'journal'):
94 if util.safehasattr(repo, 'journal'):
95 _setupdirstate(repo, dirstate)
95 _setupdirstate(repo, dirstate)
96 return dirstate
96 return dirstate
97
97
98 def recorddirstateparents(dirstate, old, new):
98 def recorddirstateparents(dirstate, old, new):
99 """Records all dirstate parent changes in the journal."""
99 """Records all dirstate parent changes in the journal."""
100 old = list(old)
100 old = list(old)
101 new = list(new)
101 new = list(new)
102 if util.safehasattr(dirstate, 'journalstorage'):
102 if util.safehasattr(dirstate, 'journalstorage'):
103 # only record two hashes if there was a merge
103 # only record two hashes if there was a merge
104 oldhashes = old[:1] if old[1] == node.nullid else old
104 oldhashes = old[:1] if old[1] == node.nullid else old
105 newhashes = new[:1] if new[1] == node.nullid else new
105 newhashes = new[:1] if new[1] == node.nullid else new
106 dirstate.journalstorage.record(
106 dirstate.journalstorage.record(
107 wdirparenttype, '.', oldhashes, newhashes)
107 wdirparenttype, '.', oldhashes, newhashes)
108
108
109 # hooks to record bookmark changes (both local and remote)
109 # hooks to record bookmark changes (both local and remote)
110 def recordbookmarks(orig, store, fp):
110 def recordbookmarks(orig, store, fp):
111 """Records all bookmark changes in the journal."""
111 """Records all bookmark changes in the journal."""
112 repo = store._repo
112 repo = store._repo
113 if util.safehasattr(repo, 'journal'):
113 if util.safehasattr(repo, 'journal'):
114 oldmarks = bookmarks.bmstore(repo)
114 oldmarks = bookmarks.bmstore(repo)
115 for mark, value in store.iteritems():
115 for mark, value in store.iteritems():
116 oldvalue = oldmarks.get(mark, node.nullid)
116 oldvalue = oldmarks.get(mark, node.nullid)
117 if value != oldvalue:
117 if value != oldvalue:
118 repo.journal.record(bookmarktype, mark, oldvalue, value)
118 repo.journal.record(bookmarktype, mark, oldvalue, value)
119 return orig(store, fp)
119 return orig(store, fp)
120
120
121 # shared repository support
121 # shared repository support
122 def _readsharedfeatures(repo):
122 def _readsharedfeatures(repo):
123 """A set of shared features for this repository"""
123 """A set of shared features for this repository"""
124 try:
124 try:
125 return set(repo.vfs.read('shared').splitlines())
125 return set(repo.vfs.read('shared').splitlines())
126 except IOError as inst:
126 except IOError as inst:
127 if inst.errno != errno.ENOENT:
127 if inst.errno != errno.ENOENT:
128 raise
128 raise
129 return set()
129 return set()
130
130
131 def _mergeentriesiter(*iterables, **kwargs):
131 def _mergeentriesiter(*iterables, **kwargs):
132 """Given a set of sorted iterables, yield the next entry in merged order
132 """Given a set of sorted iterables, yield the next entry in merged order
133
133
134 Note that by default entries go from most recent to oldest.
134 Note that by default entries go from most recent to oldest.
135 """
135 """
136 order = kwargs.pop('order', max)
136 order = kwargs.pop('order', max)
137 iterables = [iter(it) for it in iterables]
137 iterables = [iter(it) for it in iterables]
138 # this tracks still active iterables; iterables are deleted as they are
138 # this tracks still active iterables; iterables are deleted as they are
139 # exhausted, which is why this is a dictionary and why each entry also
139 # exhausted, which is why this is a dictionary and why each entry also
140 # stores the key. Entries are mutable so we can store the next value each
140 # stores the key. Entries are mutable so we can store the next value each
141 # time.
141 # time.
142 iterable_map = {}
142 iterable_map = {}
143 for key, it in enumerate(iterables):
143 for key, it in enumerate(iterables):
144 try:
144 try:
145 iterable_map[key] = [next(it), key, it]
145 iterable_map[key] = [next(it), key, it]
146 except StopIteration:
146 except StopIteration:
147 # empty entry, can be ignored
147 # empty entry, can be ignored
148 pass
148 pass
149
149
150 while iterable_map:
150 while iterable_map:
151 value, key, it = order(iterable_map.itervalues())
151 value, key, it = order(iterable_map.itervalues())
152 yield value
152 yield value
153 try:
153 try:
154 iterable_map[key][0] = next(it)
154 iterable_map[key][0] = next(it)
155 except StopIteration:
155 except StopIteration:
156 # this iterable is empty, remove it from consideration
156 # this iterable is empty, remove it from consideration
157 del iterable_map[key]
157 del iterable_map[key]
158
158
159 def wrappostshare(orig, sourcerepo, destrepo, **kwargs):
159 def wrappostshare(orig, sourcerepo, destrepo, **kwargs):
160 """Mark this shared working copy as sharing journal information"""
160 """Mark this shared working copy as sharing journal information"""
161 with destrepo.wlock():
161 with destrepo.wlock():
162 orig(sourcerepo, destrepo, **kwargs)
162 orig(sourcerepo, destrepo, **kwargs)
163 with destrepo.vfs('shared', 'a') as fp:
163 with destrepo.vfs('shared', 'a') as fp:
164 fp.write('journal\n')
164 fp.write('journal\n')
165
165
166 def unsharejournal(orig, ui, repo, repopath):
166 def unsharejournal(orig, ui, repo, repopath):
167 """Copy shared journal entries into this repo when unsharing"""
167 """Copy shared journal entries into this repo when unsharing"""
168 if (repo.path == repopath and repo.shared() and
168 if (repo.path == repopath and repo.shared() and
169 util.safehasattr(repo, 'journal')):
169 util.safehasattr(repo, 'journal')):
170 sharedrepo = share._getsrcrepo(repo)
170 sharedrepo = share._getsrcrepo(repo)
171 sharedfeatures = _readsharedfeatures(repo)
171 sharedfeatures = _readsharedfeatures(repo)
172 if sharedrepo and sharedfeatures > {'journal'}:
172 if sharedrepo and sharedfeatures > {'journal'}:
173 # there is a shared repository and there are shared journal entries
173 # there is a shared repository and there are shared journal entries
174 # to copy. move shared date over from source to destination but
174 # to copy. move shared date over from source to destination but
175 # move the local file first
175 # move the local file first
176 if repo.vfs.exists('namejournal'):
176 if repo.vfs.exists('namejournal'):
177 journalpath = repo.vfs.join('namejournal')
177 journalpath = repo.vfs.join('namejournal')
178 util.rename(journalpath, journalpath + '.bak')
178 util.rename(journalpath, journalpath + '.bak')
179 storage = repo.journal
179 storage = repo.journal
180 local = storage._open(
180 local = storage._open(
181 repo.vfs, filename='namejournal.bak', _newestfirst=False)
181 repo.vfs, filename='namejournal.bak', _newestfirst=False)
182 shared = (
182 shared = (
183 e for e in storage._open(sharedrepo.vfs, _newestfirst=False)
183 e for e in storage._open(sharedrepo.vfs, _newestfirst=False)
184 if sharednamespaces.get(e.namespace) in sharedfeatures)
184 if sharednamespaces.get(e.namespace) in sharedfeatures)
185 for entry in _mergeentriesiter(local, shared, order=min):
185 for entry in _mergeentriesiter(local, shared, order=min):
186 storage._write(repo.vfs, entry)
186 storage._write(repo.vfs, entry)
187
187
188 return orig(ui, repo, repopath)
188 return orig(ui, repo, repopath)
189
189
190 class journalentry(collections.namedtuple(
190 class journalentry(collections.namedtuple(
191 u'journalentry',
191 u'journalentry',
192 u'timestamp user command namespace name oldhashes newhashes')):
192 u'timestamp user command namespace name oldhashes newhashes')):
193 """Individual journal entry
193 """Individual journal entry
194
194
195 * timestamp: a mercurial (time, timezone) tuple
195 * timestamp: a mercurial (time, timezone) tuple
196 * user: the username that ran the command
196 * user: the username that ran the command
197 * namespace: the entry namespace, an opaque string
197 * namespace: the entry namespace, an opaque string
198 * name: the name of the changed item, opaque string with meaning in the
198 * name: the name of the changed item, opaque string with meaning in the
199 namespace
199 namespace
200 * command: the hg command that triggered this record
200 * command: the hg command that triggered this record
201 * oldhashes: a tuple of one or more binary hashes for the old location
201 * oldhashes: a tuple of one or more binary hashes for the old location
202 * newhashes: a tuple of one or more binary hashes for the new location
202 * newhashes: a tuple of one or more binary hashes for the new location
203
203
204 Handles serialisation from and to the storage format. Fields are
204 Handles serialisation from and to the storage format. Fields are
205 separated by newlines, hashes are written out in hex separated by commas,
205 separated by newlines, hashes are written out in hex separated by commas,
206 timestamp and timezone are separated by a space.
206 timestamp and timezone are separated by a space.
207
207
208 """
208 """
209 @classmethod
209 @classmethod
210 def fromstorage(cls, line):
210 def fromstorage(cls, line):
211 (time, user, command, namespace, name,
211 (time, user, command, namespace, name,
212 oldhashes, newhashes) = line.split('\n')
212 oldhashes, newhashes) = line.split('\n')
213 timestamp, tz = time.split()
213 timestamp, tz = time.split()
214 timestamp, tz = float(timestamp), int(tz)
214 timestamp, tz = float(timestamp), int(tz)
215 oldhashes = tuple(node.bin(hash) for hash in oldhashes.split(','))
215 oldhashes = tuple(node.bin(hash) for hash in oldhashes.split(','))
216 newhashes = tuple(node.bin(hash) for hash in newhashes.split(','))
216 newhashes = tuple(node.bin(hash) for hash in newhashes.split(','))
217 return cls(
217 return cls(
218 (timestamp, tz), user, command, namespace, name,
218 (timestamp, tz), user, command, namespace, name,
219 oldhashes, newhashes)
219 oldhashes, newhashes)
220
220
221 def __str__(self):
221 def __str__(self):
222 """String representation for storage"""
222 """String representation for storage"""
223 time = ' '.join(map(str, self.timestamp))
223 time = ' '.join(map(str, self.timestamp))
224 oldhashes = ','.join([node.hex(hash) for hash in self.oldhashes])
224 oldhashes = ','.join([node.hex(hash) for hash in self.oldhashes])
225 newhashes = ','.join([node.hex(hash) for hash in self.newhashes])
225 newhashes = ','.join([node.hex(hash) for hash in self.newhashes])
226 return '\n'.join((
226 return '\n'.join((
227 time, self.user, self.command, self.namespace, self.name,
227 time, self.user, self.command, self.namespace, self.name,
228 oldhashes, newhashes))
228 oldhashes, newhashes))
229
229
230 class journalstorage(object):
230 class journalstorage(object):
231 """Storage for journal entries
231 """Storage for journal entries
232
232
233 Entries are divided over two files; one with entries that pertain to the
233 Entries are divided over two files; one with entries that pertain to the
234 local working copy *only*, and one with entries that are shared across
234 local working copy *only*, and one with entries that are shared across
235 multiple working copies when shared using the share extension.
235 multiple working copies when shared using the share extension.
236
236
237 Entries are stored with NUL bytes as separators. See the journalentry
237 Entries are stored with NUL bytes as separators. See the journalentry
238 class for the per-entry structure.
238 class for the per-entry structure.
239
239
240 The file format starts with an integer version, delimited by a NUL.
240 The file format starts with an integer version, delimited by a NUL.
241
241
242 This storage uses a dedicated lock; this makes it easier to avoid issues
242 This storage uses a dedicated lock; this makes it easier to avoid issues
243 with adding entries that added when the regular wlock is unlocked (e.g.
243 with adding entries that added when the regular wlock is unlocked (e.g.
244 the dirstate).
244 the dirstate).
245
245
246 """
246 """
247 _currentcommand = ()
247 _currentcommand = ()
248 _lockref = None
248 _lockref = None
249
249
250 def __init__(self, repo):
250 def __init__(self, repo):
251 self.user = util.getuser()
251 self.user = util.getuser()
252 self.ui = repo.ui
252 self.ui = repo.ui
253 self.vfs = repo.vfs
253 self.vfs = repo.vfs
254
254
255 # is this working copy using a shared storage?
255 # is this working copy using a shared storage?
256 self.sharedfeatures = self.sharedvfs = None
256 self.sharedfeatures = self.sharedvfs = None
257 if repo.shared():
257 if repo.shared():
258 features = _readsharedfeatures(repo)
258 features = _readsharedfeatures(repo)
259 sharedrepo = share._getsrcrepo(repo)
259 sharedrepo = share._getsrcrepo(repo)
260 if sharedrepo is not None and 'journal' in features:
260 if sharedrepo is not None and 'journal' in features:
261 self.sharedvfs = sharedrepo.vfs
261 self.sharedvfs = sharedrepo.vfs
262 self.sharedfeatures = features
262 self.sharedfeatures = features
263
263
264 # track the current command for recording in journal entries
264 # track the current command for recording in journal entries
265 @property
265 @property
266 def command(self):
266 def command(self):
267 commandstr = ' '.join(
267 commandstr = ' '.join(
268 map(util.shellquote, journalstorage._currentcommand))
268 map(util.shellquote, journalstorage._currentcommand))
269 if '\n' in commandstr:
269 if '\n' in commandstr:
270 # truncate multi-line commands
270 # truncate multi-line commands
271 commandstr = commandstr.partition('\n')[0] + ' ...'
271 commandstr = commandstr.partition('\n')[0] + ' ...'
272 return commandstr
272 return commandstr
273
273
274 @classmethod
274 @classmethod
275 def recordcommand(cls, *fullargs):
275 def recordcommand(cls, *fullargs):
276 """Set the current hg arguments, stored with recorded entries"""
276 """Set the current hg arguments, stored with recorded entries"""
277 # Set the current command on the class because we may have started
277 # Set the current command on the class because we may have started
278 # with a non-local repo (cloning for example).
278 # with a non-local repo (cloning for example).
279 cls._currentcommand = fullargs
279 cls._currentcommand = fullargs
280
280
281 def _currentlock(self, lockref):
281 def _currentlock(self, lockref):
282 """Returns the lock if it's held, or None if it's not.
282 """Returns the lock if it's held, or None if it's not.
283
283
284 (This is copied from the localrepo class)
284 (This is copied from the localrepo class)
285 """
285 """
286 if lockref is None:
286 if lockref is None:
287 return None
287 return None
288 l = lockref()
288 l = lockref()
289 if l is None or not l.held:
289 if l is None or not l.held:
290 return None
290 return None
291 return l
291 return l
292
292
293 def jlock(self, vfs):
293 def jlock(self, vfs):
294 """Create a lock for the journal file"""
294 """Create a lock for the journal file"""
295 if self._currentlock(self._lockref) is not None:
295 if self._currentlock(self._lockref) is not None:
296 raise error.Abort(_('journal lock does not support nesting'))
296 raise error.Abort(_('journal lock does not support nesting'))
297 desc = _('journal of %s') % vfs.base
297 desc = _('journal of %s') % vfs.base
298 try:
298 try:
299 l = lock.lock(vfs, 'namejournal.lock', 0, desc=desc)
299 l = lock.lock(vfs, 'namejournal.lock', 0, desc=desc)
300 except error.LockHeld as inst:
300 except error.LockHeld as inst:
301 self.ui.warn(
301 self.ui.warn(
302 _("waiting for lock on %s held by %r\n") % (desc, inst.locker))
302 _("waiting for lock on %s held by %r\n") % (desc, inst.locker))
303 # default to 600 seconds timeout
303 # default to 600 seconds timeout
304 l = lock.lock(
304 l = lock.lock(
305 vfs, 'namejournal.lock',
305 vfs, 'namejournal.lock',
306 int(self.ui.config("ui", "timeout")), desc=desc)
306 int(self.ui.config("ui", "timeout")), desc=desc)
307 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
307 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
308 self._lockref = weakref.ref(l)
308 self._lockref = weakref.ref(l)
309 return l
309 return l
310
310
311 def record(self, namespace, name, oldhashes, newhashes):
311 def record(self, namespace, name, oldhashes, newhashes):
312 """Record a new journal entry
312 """Record a new journal entry
313
313
314 * namespace: an opaque string; this can be used to filter on the type
314 * namespace: an opaque string; this can be used to filter on the type
315 of recorded entries.
315 of recorded entries.
316 * name: the name defining this entry; for bookmarks, this is the
316 * name: the name defining this entry; for bookmarks, this is the
317 bookmark name. Can be filtered on when retrieving entries.
317 bookmark name. Can be filtered on when retrieving entries.
318 * oldhashes and newhashes: each a single binary hash, or a list of
318 * oldhashes and newhashes: each a single binary hash, or a list of
319 binary hashes. These represent the old and new position of the named
319 binary hashes. These represent the old and new position of the named
320 item.
320 item.
321
321
322 """
322 """
323 if not isinstance(oldhashes, list):
323 if not isinstance(oldhashes, list):
324 oldhashes = [oldhashes]
324 oldhashes = [oldhashes]
325 if not isinstance(newhashes, list):
325 if not isinstance(newhashes, list):
326 newhashes = [newhashes]
326 newhashes = [newhashes]
327
327
328 entry = journalentry(
328 entry = journalentry(
329 util.makedate(), self.user, self.command, namespace, name,
329 util.makedate(), self.user, self.command, namespace, name,
330 oldhashes, newhashes)
330 oldhashes, newhashes)
331
331
332 vfs = self.vfs
332 vfs = self.vfs
333 if self.sharedvfs is not None:
333 if self.sharedvfs is not None:
334 # write to the shared repository if this feature is being
334 # write to the shared repository if this feature is being
335 # shared between working copies.
335 # shared between working copies.
336 if sharednamespaces.get(namespace) in self.sharedfeatures:
336 if sharednamespaces.get(namespace) in self.sharedfeatures:
337 vfs = self.sharedvfs
337 vfs = self.sharedvfs
338
338
339 self._write(vfs, entry)
339 self._write(vfs, entry)
340
340
341 def _write(self, vfs, entry):
341 def _write(self, vfs, entry):
342 with self.jlock(vfs):
342 with self.jlock(vfs):
343 version = None
343 version = None
344 # open file in amend mode to ensure it is created if missing
344 # open file in amend mode to ensure it is created if missing
345 with vfs('namejournal', mode='a+b', atomictemp=True) as f:
345 with vfs('namejournal', mode='a+b') as f:
346 f.seek(0, os.SEEK_SET)
346 f.seek(0, os.SEEK_SET)
347 # Read just enough bytes to get a version number (up to 2
347 # Read just enough bytes to get a version number (up to 2
348 # digits plus separator)
348 # digits plus separator)
349 version = f.read(3).partition('\0')[0]
349 version = f.read(3).partition('\0')[0]
350 if version and version != str(storageversion):
350 if version and version != str(storageversion):
351 # different version of the storage. Exit early (and not
351 # different version of the storage. Exit early (and not
352 # write anything) if this is not a version we can handle or
352 # write anything) if this is not a version we can handle or
353 # the file is corrupt. In future, perhaps rotate the file
353 # the file is corrupt. In future, perhaps rotate the file
354 # instead?
354 # instead?
355 self.ui.warn(
355 self.ui.warn(
356 _("unsupported journal file version '%s'\n") % version)
356 _("unsupported journal file version '%s'\n") % version)
357 return
357 return
358 if not version:
358 if not version:
359 # empty file, write version first
359 # empty file, write version first
360 f.write(str(storageversion) + '\0')
360 f.write(str(storageversion) + '\0')
361 f.seek(0, os.SEEK_END)
361 f.seek(0, os.SEEK_END)
362 f.write(str(entry) + '\0')
362 f.write(str(entry) + '\0')
363
363
364 def filtered(self, namespace=None, name=None):
364 def filtered(self, namespace=None, name=None):
365 """Yield all journal entries with the given namespace or name
365 """Yield all journal entries with the given namespace or name
366
366
367 Both the namespace and the name are optional; if neither is given all
367 Both the namespace and the name are optional; if neither is given all
368 entries in the journal are produced.
368 entries in the journal are produced.
369
369
370 Matching supports regular expressions by using the `re:` prefix
370 Matching supports regular expressions by using the `re:` prefix
371 (use `literal:` to match names or namespaces that start with `re:`)
371 (use `literal:` to match names or namespaces that start with `re:`)
372
372
373 """
373 """
374 if namespace is not None:
374 if namespace is not None:
375 namespace = util.stringmatcher(namespace)[-1]
375 namespace = util.stringmatcher(namespace)[-1]
376 if name is not None:
376 if name is not None:
377 name = util.stringmatcher(name)[-1]
377 name = util.stringmatcher(name)[-1]
378 for entry in self:
378 for entry in self:
379 if namespace is not None and not namespace(entry.namespace):
379 if namespace is not None and not namespace(entry.namespace):
380 continue
380 continue
381 if name is not None and not name(entry.name):
381 if name is not None and not name(entry.name):
382 continue
382 continue
383 yield entry
383 yield entry
384
384
385 def __iter__(self):
385 def __iter__(self):
386 """Iterate over the storage
386 """Iterate over the storage
387
387
388 Yields journalentry instances for each contained journal record.
388 Yields journalentry instances for each contained journal record.
389
389
390 """
390 """
391 local = self._open(self.vfs)
391 local = self._open(self.vfs)
392
392
393 if self.sharedvfs is None:
393 if self.sharedvfs is None:
394 return local
394 return local
395
395
396 # iterate over both local and shared entries, but only those
396 # iterate over both local and shared entries, but only those
397 # shared entries that are among the currently shared features
397 # shared entries that are among the currently shared features
398 shared = (
398 shared = (
399 e for e in self._open(self.sharedvfs)
399 e for e in self._open(self.sharedvfs)
400 if sharednamespaces.get(e.namespace) in self.sharedfeatures)
400 if sharednamespaces.get(e.namespace) in self.sharedfeatures)
401 return _mergeentriesiter(local, shared)
401 return _mergeentriesiter(local, shared)
402
402
403 def _open(self, vfs, filename='namejournal', _newestfirst=True):
403 def _open(self, vfs, filename='namejournal', _newestfirst=True):
404 if not vfs.exists(filename):
404 if not vfs.exists(filename):
405 return
405 return
406
406
407 with vfs(filename) as f:
407 with vfs(filename) as f:
408 raw = f.read()
408 raw = f.read()
409
409
410 lines = raw.split('\0')
410 lines = raw.split('\0')
411 version = lines and lines[0]
411 version = lines and lines[0]
412 if version != str(storageversion):
412 if version != str(storageversion):
413 version = version or _('not available')
413 version = version or _('not available')
414 raise error.Abort(_("unknown journal file version '%s'") % version)
414 raise error.Abort(_("unknown journal file version '%s'") % version)
415
415
416 # Skip the first line, it's a version number. Normally we iterate over
416 # Skip the first line, it's a version number. Normally we iterate over
417 # these in reverse order to list newest first; only when copying across
417 # these in reverse order to list newest first; only when copying across
418 # a shared storage do we forgo reversing.
418 # a shared storage do we forgo reversing.
419 lines = lines[1:]
419 lines = lines[1:]
420 if _newestfirst:
420 if _newestfirst:
421 lines = reversed(lines)
421 lines = reversed(lines)
422 for line in lines:
422 for line in lines:
423 if not line:
423 if not line:
424 continue
424 continue
425 yield journalentry.fromstorage(line)
425 yield journalentry.fromstorage(line)
426
426
427 # journal reading
427 # journal reading
428 # log options that don't make sense for journal
428 # log options that don't make sense for journal
429 _ignoreopts = ('no-merges', 'graph')
429 _ignoreopts = ('no-merges', 'graph')
430 @command(
430 @command(
431 'journal', [
431 'journal', [
432 ('', 'all', None, 'show history for all names'),
432 ('', 'all', None, 'show history for all names'),
433 ('c', 'commits', None, 'show commit metadata'),
433 ('c', 'commits', None, 'show commit metadata'),
434 ] + [opt for opt in cmdutil.logopts if opt[1] not in _ignoreopts],
434 ] + [opt for opt in cmdutil.logopts if opt[1] not in _ignoreopts],
435 '[OPTION]... [BOOKMARKNAME]')
435 '[OPTION]... [BOOKMARKNAME]')
436 def journal(ui, repo, *args, **opts):
436 def journal(ui, repo, *args, **opts):
437 """show the previous position of bookmarks and the working copy
437 """show the previous position of bookmarks and the working copy
438
438
439 The journal is used to see the previous commits that bookmarks and the
439 The journal is used to see the previous commits that bookmarks and the
440 working copy pointed to. By default the previous locations for the working
440 working copy pointed to. By default the previous locations for the working
441 copy. Passing a bookmark name will show all the previous positions of
441 copy. Passing a bookmark name will show all the previous positions of
442 that bookmark. Use the --all switch to show previous locations for all
442 that bookmark. Use the --all switch to show previous locations for all
443 bookmarks and the working copy; each line will then include the bookmark
443 bookmarks and the working copy; each line will then include the bookmark
444 name, or '.' for the working copy, as well.
444 name, or '.' for the working copy, as well.
445
445
446 If `name` starts with `re:`, the remainder of the name is treated as
446 If `name` starts with `re:`, the remainder of the name is treated as
447 a regular expression. To match a name that actually starts with `re:`,
447 a regular expression. To match a name that actually starts with `re:`,
448 use the prefix `literal:`.
448 use the prefix `literal:`.
449
449
450 By default hg journal only shows the commit hash and the command that was
450 By default hg journal only shows the commit hash and the command that was
451 running at that time. -v/--verbose will show the prior hash, the user, and
451 running at that time. -v/--verbose will show the prior hash, the user, and
452 the time at which it happened.
452 the time at which it happened.
453
453
454 Use -c/--commits to output log information on each commit hash; at this
454 Use -c/--commits to output log information on each commit hash; at this
455 point you can use the usual `--patch`, `--git`, `--stat` and `--template`
455 point you can use the usual `--patch`, `--git`, `--stat` and `--template`
456 switches to alter the log output for these.
456 switches to alter the log output for these.
457
457
458 `hg journal -T json` can be used to produce machine readable output.
458 `hg journal -T json` can be used to produce machine readable output.
459
459
460 """
460 """
461 name = '.'
461 name = '.'
462 if opts.get('all'):
462 if opts.get('all'):
463 if args:
463 if args:
464 raise error.Abort(
464 raise error.Abort(
465 _("You can't combine --all and filtering on a name"))
465 _("You can't combine --all and filtering on a name"))
466 name = None
466 name = None
467 if args:
467 if args:
468 name = args[0]
468 name = args[0]
469
469
470 fm = ui.formatter('journal', opts)
470 fm = ui.formatter('journal', opts)
471
471
472 if opts.get("template") != "json":
472 if opts.get("template") != "json":
473 if name is None:
473 if name is None:
474 displayname = _('the working copy and bookmarks')
474 displayname = _('the working copy and bookmarks')
475 else:
475 else:
476 displayname = "'%s'" % name
476 displayname = "'%s'" % name
477 ui.status(_("previous locations of %s:\n") % displayname)
477 ui.status(_("previous locations of %s:\n") % displayname)
478
478
479 limit = cmdutil.loglimit(opts)
479 limit = cmdutil.loglimit(opts)
480 entry = None
480 entry = None
481 for count, entry in enumerate(repo.journal.filtered(name=name)):
481 for count, entry in enumerate(repo.journal.filtered(name=name)):
482 if count == limit:
482 if count == limit:
483 break
483 break
484 newhashesstr = fm.formatlist(map(fm.hexfunc, entry.newhashes),
484 newhashesstr = fm.formatlist(map(fm.hexfunc, entry.newhashes),
485 name='node', sep=',')
485 name='node', sep=',')
486 oldhashesstr = fm.formatlist(map(fm.hexfunc, entry.oldhashes),
486 oldhashesstr = fm.formatlist(map(fm.hexfunc, entry.oldhashes),
487 name='node', sep=',')
487 name='node', sep=',')
488
488
489 fm.startitem()
489 fm.startitem()
490 fm.condwrite(ui.verbose, 'oldhashes', '%s -> ', oldhashesstr)
490 fm.condwrite(ui.verbose, 'oldhashes', '%s -> ', oldhashesstr)
491 fm.write('newhashes', '%s', newhashesstr)
491 fm.write('newhashes', '%s', newhashesstr)
492 fm.condwrite(ui.verbose, 'user', ' %-8s', entry.user)
492 fm.condwrite(ui.verbose, 'user', ' %-8s', entry.user)
493 fm.condwrite(
493 fm.condwrite(
494 opts.get('all') or name.startswith('re:'),
494 opts.get('all') or name.startswith('re:'),
495 'name', ' %-8s', entry.name)
495 'name', ' %-8s', entry.name)
496
496
497 timestring = fm.formatdate(entry.timestamp, '%Y-%m-%d %H:%M %1%2')
497 timestring = fm.formatdate(entry.timestamp, '%Y-%m-%d %H:%M %1%2')
498 fm.condwrite(ui.verbose, 'date', ' %s', timestring)
498 fm.condwrite(ui.verbose, 'date', ' %s', timestring)
499 fm.write('command', ' %s\n', entry.command)
499 fm.write('command', ' %s\n', entry.command)
500
500
501 if opts.get("commits"):
501 if opts.get("commits"):
502 displayer = cmdutil.show_changeset(ui, repo, opts, buffered=False)
502 displayer = cmdutil.show_changeset(ui, repo, opts, buffered=False)
503 for hash in entry.newhashes:
503 for hash in entry.newhashes:
504 try:
504 try:
505 ctx = repo[hash]
505 ctx = repo[hash]
506 displayer.show(ctx)
506 displayer.show(ctx)
507 except error.RepoLookupError as e:
507 except error.RepoLookupError as e:
508 fm.write('repolookuperror', "%s\n\n", str(e))
508 fm.write('repolookuperror', "%s\n\n", str(e))
509 displayer.close()
509 displayer.close()
510
510
511 fm.end()
511 fm.end()
512
512
513 if entry is None:
513 if entry is None:
514 ui.status(_("no recorded locations\n"))
514 ui.status(_("no recorded locations\n"))
General Comments 0
You need to be logged in to leave comments. Login now