##// END OF EJS Templates
typing: disable an attribute-error warning in the journal extension...
Matt Harbison -
r50755:d5116e4d default
parent child Browse files
Show More
@@ -1,603 +1,607 b''
1 # journal.py
1 # journal.py
2 #
2 #
3 # Copyright 2014-2016 Facebook, Inc.
3 # Copyright 2014-2016 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 """track previous positions of bookmarks (EXPERIMENTAL)
7 """track previous positions of bookmarks (EXPERIMENTAL)
8
8
9 This extension adds a new command: `hg journal`, which shows you where
9 This extension adds a new command: `hg journal`, which shows you where
10 bookmarks were previously located.
10 bookmarks were previously located.
11
11
12 """
12 """
13
13
14
14
15 import collections
15 import collections
16 import os
16 import os
17 import weakref
17 import weakref
18
18
19 from mercurial.i18n import _
19 from mercurial.i18n import _
20 from mercurial.node import (
20 from mercurial.node import (
21 bin,
21 bin,
22 hex,
22 hex,
23 )
23 )
24
24
25 from mercurial import (
25 from mercurial import (
26 bookmarks,
26 bookmarks,
27 cmdutil,
27 cmdutil,
28 dispatch,
28 dispatch,
29 encoding,
29 encoding,
30 error,
30 error,
31 extensions,
31 extensions,
32 hg,
32 hg,
33 localrepo,
33 localrepo,
34 lock,
34 lock,
35 logcmdutil,
35 logcmdutil,
36 pycompat,
36 pycompat,
37 registrar,
37 registrar,
38 util,
38 util,
39 )
39 )
40 from mercurial.utils import (
40 from mercurial.utils import (
41 dateutil,
41 dateutil,
42 procutil,
42 procutil,
43 stringutil,
43 stringutil,
44 )
44 )
45
45
46 cmdtable = {}
46 cmdtable = {}
47 command = registrar.command(cmdtable)
47 command = registrar.command(cmdtable)
48
48
49 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
49 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
50 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
50 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
51 # be specifying the version(s) of Mercurial they are tested with, or
51 # be specifying the version(s) of Mercurial they are tested with, or
52 # leave the attribute unspecified.
52 # leave the attribute unspecified.
53 testedwith = b'ships-with-hg-core'
53 testedwith = b'ships-with-hg-core'
54
54
55 # storage format version; increment when the format changes
55 # storage format version; increment when the format changes
56 storageversion = 0
56 storageversion = 0
57
57
58 # namespaces
58 # namespaces
59 bookmarktype = b'bookmark'
59 bookmarktype = b'bookmark'
60 wdirparenttype = b'wdirparent'
60 wdirparenttype = b'wdirparent'
61 # In a shared repository, what shared feature name is used
61 # In a shared repository, what shared feature name is used
62 # to indicate this namespace is shared with the source?
62 # to indicate this namespace is shared with the source?
63 sharednamespaces = {
63 sharednamespaces = {
64 bookmarktype: hg.sharedbookmarks,
64 bookmarktype: hg.sharedbookmarks,
65 }
65 }
66
66
67 # Journal recording, register hooks and storage object
67 # Journal recording, register hooks and storage object
68 def extsetup(ui):
68 def extsetup(ui):
69 extensions.wrapfunction(dispatch, b'runcommand', runcommand)
69 extensions.wrapfunction(dispatch, b'runcommand', runcommand)
70 extensions.wrapfunction(bookmarks.bmstore, b'_write', recordbookmarks)
70 extensions.wrapfunction(bookmarks.bmstore, b'_write', recordbookmarks)
71 extensions.wrapfilecache(
71 extensions.wrapfilecache(
72 localrepo.localrepository, b'dirstate', wrapdirstate
72 localrepo.localrepository, b'dirstate', wrapdirstate
73 )
73 )
74 extensions.wrapfunction(hg, b'postshare', wrappostshare)
74 extensions.wrapfunction(hg, b'postshare', wrappostshare)
75 extensions.wrapfunction(hg, b'copystore', unsharejournal)
75 extensions.wrapfunction(hg, b'copystore', unsharejournal)
76
76
77
77
78 def reposetup(ui, repo):
78 def reposetup(ui, repo):
79 if repo.local():
79 if repo.local():
80 repo.journal = journalstorage(repo)
80 repo.journal = journalstorage(repo)
81 repo._wlockfreeprefix.add(b'namejournal')
81 repo._wlockfreeprefix.add(b'namejournal')
82
82
83 dirstate, cached = localrepo.isfilecached(repo, b'dirstate')
83 dirstate, cached = localrepo.isfilecached(repo, b'dirstate')
84 if cached:
84 if cached:
85 # already instantiated dirstate isn't yet marked as
85 # already instantiated dirstate isn't yet marked as
86 # "journal"-ing, even though repo.dirstate() was already
86 # "journal"-ing, even though repo.dirstate() was already
87 # wrapped by own wrapdirstate()
87 # wrapped by own wrapdirstate()
88 _setupdirstate(repo, dirstate)
88 _setupdirstate(repo, dirstate)
89
89
90
90
91 def runcommand(orig, lui, repo, cmd, fullargs, *args):
91 def runcommand(orig, lui, repo, cmd, fullargs, *args):
92 """Track the command line options for recording in the journal"""
92 """Track the command line options for recording in the journal"""
93 journalstorage.recordcommand(*fullargs)
93 journalstorage.recordcommand(*fullargs)
94 return orig(lui, repo, cmd, fullargs, *args)
94 return orig(lui, repo, cmd, fullargs, *args)
95
95
96
96
97 def _setupdirstate(repo, dirstate):
97 def _setupdirstate(repo, dirstate):
98 dirstate.journalstorage = repo.journal
98 dirstate.journalstorage = repo.journal
99 dirstate.addparentchangecallback(b'journal', recorddirstateparents)
99 dirstate.addparentchangecallback(b'journal', recorddirstateparents)
100
100
101
101
102 # hooks to record dirstate changes
102 # hooks to record dirstate changes
103 def wrapdirstate(orig, repo):
103 def wrapdirstate(orig, repo):
104 """Make journal storage available to the dirstate object"""
104 """Make journal storage available to the dirstate object"""
105 dirstate = orig(repo)
105 dirstate = orig(repo)
106 if util.safehasattr(repo, 'journal'):
106 if util.safehasattr(repo, 'journal'):
107 _setupdirstate(repo, dirstate)
107 _setupdirstate(repo, dirstate)
108 return dirstate
108 return dirstate
109
109
110
110
111 def recorddirstateparents(dirstate, old, new):
111 def recorddirstateparents(dirstate, old, new):
112 """Records all dirstate parent changes in the journal."""
112 """Records all dirstate parent changes in the journal."""
113 old = list(old)
113 old = list(old)
114 new = list(new)
114 new = list(new)
115 if util.safehasattr(dirstate, 'journalstorage'):
115 if util.safehasattr(dirstate, 'journalstorage'):
116 # only record two hashes if there was a merge
116 # only record two hashes if there was a merge
117 oldhashes = old[:1] if old[1] == dirstate._nodeconstants.nullid else old
117 oldhashes = old[:1] if old[1] == dirstate._nodeconstants.nullid else old
118 newhashes = new[:1] if new[1] == dirstate._nodeconstants.nullid else new
118 newhashes = new[:1] if new[1] == dirstate._nodeconstants.nullid else new
119 dirstate.journalstorage.record(
119 dirstate.journalstorage.record(
120 wdirparenttype, b'.', oldhashes, newhashes
120 wdirparenttype, b'.', oldhashes, newhashes
121 )
121 )
122
122
123
123
124 # hooks to record bookmark changes (both local and remote)
124 # hooks to record bookmark changes (both local and remote)
125 def recordbookmarks(orig, store, fp):
125 def recordbookmarks(orig, store, fp):
126 """Records all bookmark changes in the journal."""
126 """Records all bookmark changes in the journal."""
127 repo = store._repo
127 repo = store._repo
128 if util.safehasattr(repo, 'journal'):
128 if util.safehasattr(repo, 'journal'):
129 oldmarks = bookmarks.bmstore(repo)
129 oldmarks = bookmarks.bmstore(repo)
130 for mark, value in store.items():
130 for mark, value in store.items():
131 oldvalue = oldmarks.get(mark, repo.nullid)
131 oldvalue = oldmarks.get(mark, repo.nullid)
132 if value != oldvalue:
132 if value != oldvalue:
133 repo.journal.record(bookmarktype, mark, oldvalue, value)
133 repo.journal.record(bookmarktype, mark, oldvalue, value)
134 return orig(store, fp)
134 return orig(store, fp)
135
135
136
136
137 # shared repository support
137 # shared repository support
138 def _readsharedfeatures(repo):
138 def _readsharedfeatures(repo):
139 """A set of shared features for this repository"""
139 """A set of shared features for this repository"""
140 try:
140 try:
141 return set(repo.vfs.read(b'shared').splitlines())
141 return set(repo.vfs.read(b'shared').splitlines())
142 except FileNotFoundError:
142 except FileNotFoundError:
143 return set()
143 return set()
144
144
145
145
146 def _mergeentriesiter(*iterables, **kwargs):
146 def _mergeentriesiter(*iterables, **kwargs):
147 """Given a set of sorted iterables, yield the next entry in merged order
147 """Given a set of sorted iterables, yield the next entry in merged order
148
148
149 Note that by default entries go from most recent to oldest.
149 Note that by default entries go from most recent to oldest.
150 """
150 """
151 order = kwargs.pop('order', max)
151 order = kwargs.pop('order', max)
152 iterables = [iter(it) for it in iterables]
152 iterables = [iter(it) for it in iterables]
153 # this tracks still active iterables; iterables are deleted as they are
153 # this tracks still active iterables; iterables are deleted as they are
154 # exhausted, which is why this is a dictionary and why each entry also
154 # exhausted, which is why this is a dictionary and why each entry also
155 # stores the key. Entries are mutable so we can store the next value each
155 # stores the key. Entries are mutable so we can store the next value each
156 # time.
156 # time.
157 iterable_map = {}
157 iterable_map = {}
158 for key, it in enumerate(iterables):
158 for key, it in enumerate(iterables):
159 try:
159 try:
160 iterable_map[key] = [next(it), key, it]
160 iterable_map[key] = [next(it), key, it]
161 except StopIteration:
161 except StopIteration:
162 # empty entry, can be ignored
162 # empty entry, can be ignored
163 pass
163 pass
164
164
165 while iterable_map:
165 while iterable_map:
166 value, key, it = order(iterable_map.values())
166 value, key, it = order(iterable_map.values())
167 yield value
167 yield value
168 try:
168 try:
169 iterable_map[key][0] = next(it)
169 iterable_map[key][0] = next(it)
170 except StopIteration:
170 except StopIteration:
171 # this iterable is empty, remove it from consideration
171 # this iterable is empty, remove it from consideration
172 del iterable_map[key]
172 del iterable_map[key]
173
173
174
174
175 def wrappostshare(orig, sourcerepo, destrepo, **kwargs):
175 def wrappostshare(orig, sourcerepo, destrepo, **kwargs):
176 """Mark this shared working copy as sharing journal information"""
176 """Mark this shared working copy as sharing journal information"""
177 with destrepo.wlock():
177 with destrepo.wlock():
178 orig(sourcerepo, destrepo, **kwargs)
178 orig(sourcerepo, destrepo, **kwargs)
179 with destrepo.vfs(b'shared', b'a') as fp:
179 with destrepo.vfs(b'shared', b'a') as fp:
180 fp.write(b'journal\n')
180 fp.write(b'journal\n')
181
181
182
182
183 def unsharejournal(orig, ui, repo, repopath):
183 def unsharejournal(orig, ui, repo, repopath):
184 """Copy shared journal entries into this repo when unsharing"""
184 """Copy shared journal entries into this repo when unsharing"""
185 if (
185 if (
186 repo.path == repopath
186 repo.path == repopath
187 and repo.shared()
187 and repo.shared()
188 and util.safehasattr(repo, 'journal')
188 and util.safehasattr(repo, 'journal')
189 ):
189 ):
190 sharedrepo = hg.sharedreposource(repo)
190 sharedrepo = hg.sharedreposource(repo)
191 sharedfeatures = _readsharedfeatures(repo)
191 sharedfeatures = _readsharedfeatures(repo)
192 if sharedrepo and sharedfeatures > {b'journal'}:
192 if sharedrepo and sharedfeatures > {b'journal'}:
193 # there is a shared repository and there are shared journal entries
193 # there is a shared repository and there are shared journal entries
194 # to copy. move shared date over from source to destination but
194 # to copy. move shared date over from source to destination but
195 # move the local file first
195 # move the local file first
196 if repo.vfs.exists(b'namejournal'):
196 if repo.vfs.exists(b'namejournal'):
197 journalpath = repo.vfs.join(b'namejournal')
197 journalpath = repo.vfs.join(b'namejournal')
198 util.rename(journalpath, journalpath + b'.bak')
198 util.rename(journalpath, journalpath + b'.bak')
199 storage = repo.journal
199 storage = repo.journal
200 local = storage._open(
200 local = storage._open(
201 repo.vfs, filename=b'namejournal.bak', _newestfirst=False
201 repo.vfs, filename=b'namejournal.bak', _newestfirst=False
202 )
202 )
203 shared = (
203 shared = (
204 e
204 e
205 for e in storage._open(sharedrepo.vfs, _newestfirst=False)
205 for e in storage._open(sharedrepo.vfs, _newestfirst=False)
206 if sharednamespaces.get(e.namespace) in sharedfeatures
206 if sharednamespaces.get(e.namespace) in sharedfeatures
207 )
207 )
208 for entry in _mergeentriesiter(local, shared, order=min):
208 for entry in _mergeentriesiter(local, shared, order=min):
209 storage._write(repo.vfs, entry)
209 storage._write(repo.vfs, entry)
210
210
211 return orig(ui, repo, repopath)
211 return orig(ui, repo, repopath)
212
212
213
213
214 class journalentry(
214 class journalentry(
215 collections.namedtuple(
215 collections.namedtuple(
216 'journalentry',
216 'journalentry',
217 'timestamp user command namespace name oldhashes newhashes',
217 'timestamp user command namespace name oldhashes newhashes',
218 )
218 )
219 ):
219 ):
220 """Individual journal entry
220 """Individual journal entry
221
221
222 * timestamp: a mercurial (time, timezone) tuple
222 * timestamp: a mercurial (time, timezone) tuple
223 * user: the username that ran the command
223 * user: the username that ran the command
224 * namespace: the entry namespace, an opaque string
224 * namespace: the entry namespace, an opaque string
225 * name: the name of the changed item, opaque string with meaning in the
225 * name: the name of the changed item, opaque string with meaning in the
226 namespace
226 namespace
227 * command: the hg command that triggered this record
227 * command: the hg command that triggered this record
228 * oldhashes: a tuple of one or more binary hashes for the old location
228 * oldhashes: a tuple of one or more binary hashes for the old location
229 * newhashes: a tuple of one or more binary hashes for the new location
229 * newhashes: a tuple of one or more binary hashes for the new location
230
230
231 Handles serialisation from and to the storage format. Fields are
231 Handles serialisation from and to the storage format. Fields are
232 separated by newlines, hashes are written out in hex separated by commas,
232 separated by newlines, hashes are written out in hex separated by commas,
233 timestamp and timezone are separated by a space.
233 timestamp and timezone are separated by a space.
234
234
235 """
235 """
236
236
237 @classmethod
237 @classmethod
238 def fromstorage(cls, line):
238 def fromstorage(cls, line):
239 (
239 (
240 time,
240 time,
241 user,
241 user,
242 command,
242 command,
243 namespace,
243 namespace,
244 name,
244 name,
245 oldhashes,
245 oldhashes,
246 newhashes,
246 newhashes,
247 ) = line.split(b'\n')
247 ) = line.split(b'\n')
248 timestamp, tz = time.split()
248 timestamp, tz = time.split()
249 timestamp, tz = float(timestamp), int(tz)
249 timestamp, tz = float(timestamp), int(tz)
250 oldhashes = tuple(bin(hash) for hash in oldhashes.split(b','))
250 oldhashes = tuple(bin(hash) for hash in oldhashes.split(b','))
251 newhashes = tuple(bin(hash) for hash in newhashes.split(b','))
251 newhashes = tuple(bin(hash) for hash in newhashes.split(b','))
252 return cls(
252 return cls(
253 (timestamp, tz),
253 (timestamp, tz),
254 user,
254 user,
255 command,
255 command,
256 namespace,
256 namespace,
257 name,
257 name,
258 oldhashes,
258 oldhashes,
259 newhashes,
259 newhashes,
260 )
260 )
261
261
262 def __bytes__(self):
262 def __bytes__(self):
263 """bytes representation for storage"""
263 """bytes representation for storage"""
264 time = b' '.join(map(pycompat.bytestr, self.timestamp))
264 time = b' '.join(map(pycompat.bytestr, self.timestamp))
265 oldhashes = b','.join([hex(hash) for hash in self.oldhashes])
265 oldhashes = b','.join([hex(hash) for hash in self.oldhashes])
266 newhashes = b','.join([hex(hash) for hash in self.newhashes])
266 newhashes = b','.join([hex(hash) for hash in self.newhashes])
267 return b'\n'.join(
267 return b'\n'.join(
268 (
268 (
269 time,
269 time,
270 self.user,
270 self.user,
271 self.command,
271 self.command,
272 self.namespace,
272 self.namespace,
273 self.name,
273 self.name,
274 oldhashes,
274 oldhashes,
275 newhashes,
275 newhashes,
276 )
276 )
277 )
277 )
278
278
279 __str__ = encoding.strmethod(__bytes__)
279 __str__ = encoding.strmethod(__bytes__)
280
280
281
281
282 class journalstorage:
282 class journalstorage:
283 """Storage for journal entries
283 """Storage for journal entries
284
284
285 Entries are divided over two files; one with entries that pertain to the
285 Entries are divided over two files; one with entries that pertain to the
286 local working copy *only*, and one with entries that are shared across
286 local working copy *only*, and one with entries that are shared across
287 multiple working copies when shared using the share extension.
287 multiple working copies when shared using the share extension.
288
288
289 Entries are stored with NUL bytes as separators. See the journalentry
289 Entries are stored with NUL bytes as separators. See the journalentry
290 class for the per-entry structure.
290 class for the per-entry structure.
291
291
292 The file format starts with an integer version, delimited by a NUL.
292 The file format starts with an integer version, delimited by a NUL.
293
293
294 This storage uses a dedicated lock; this makes it easier to avoid issues
294 This storage uses a dedicated lock; this makes it easier to avoid issues
295 with adding entries that added when the regular wlock is unlocked (e.g.
295 with adding entries that added when the regular wlock is unlocked (e.g.
296 the dirstate).
296 the dirstate).
297
297
298 """
298 """
299
299
300 _currentcommand = ()
300 _currentcommand = ()
301 _lockref = None
301 _lockref = None
302
302
303 def __init__(self, repo):
303 def __init__(self, repo):
304 self.user = procutil.getuser()
304 self.user = procutil.getuser()
305 self.ui = repo.ui
305 self.ui = repo.ui
306 self.vfs = repo.vfs
306 self.vfs = repo.vfs
307
307
308 # is this working copy using a shared storage?
308 # is this working copy using a shared storage?
309 self.sharedfeatures = self.sharedvfs = None
309 self.sharedfeatures = self.sharedvfs = None
310 if repo.shared():
310 if repo.shared():
311 features = _readsharedfeatures(repo)
311 features = _readsharedfeatures(repo)
312 sharedrepo = hg.sharedreposource(repo)
312 sharedrepo = hg.sharedreposource(repo)
313 if sharedrepo is not None and b'journal' in features:
313 if sharedrepo is not None and b'journal' in features:
314 self.sharedvfs = sharedrepo.vfs
314 self.sharedvfs = sharedrepo.vfs
315 self.sharedfeatures = features
315 self.sharedfeatures = features
316
316
317 # track the current command for recording in journal entries
317 # track the current command for recording in journal entries
318 @property
318 @property
319 def command(self):
319 def command(self):
320 commandstr = b' '.join(
320 commandstr = b' '.join(
321 map(procutil.shellquote, journalstorage._currentcommand)
321 map(procutil.shellquote, journalstorage._currentcommand)
322 )
322 )
323 if b'\n' in commandstr:
323 if b'\n' in commandstr:
324 # truncate multi-line commands
324 # truncate multi-line commands
325 commandstr = commandstr.partition(b'\n')[0] + b' ...'
325 commandstr = commandstr.partition(b'\n')[0] + b' ...'
326 return commandstr
326 return commandstr
327
327
328 @classmethod
328 @classmethod
329 def recordcommand(cls, *fullargs):
329 def recordcommand(cls, *fullargs):
330 """Set the current hg arguments, stored with recorded entries"""
330 """Set the current hg arguments, stored with recorded entries"""
331 # Set the current command on the class because we may have started
331 # Set the current command on the class because we may have started
332 # with a non-local repo (cloning for example).
332 # with a non-local repo (cloning for example).
333 cls._currentcommand = fullargs
333 cls._currentcommand = fullargs
334
334
335 def _currentlock(self, lockref):
335 def _currentlock(self, lockref):
336 """Returns the lock if it's held, or None if it's not.
336 """Returns the lock if it's held, or None if it's not.
337
337
338 (This is copied from the localrepo class)
338 (This is copied from the localrepo class)
339 """
339 """
340 if lockref is None:
340 if lockref is None:
341 return None
341 return None
342 l = lockref()
342 l = lockref()
343 if l is None or not l.held:
343 if l is None or not l.held:
344 return None
344 return None
345 return l
345 return l
346
346
347 def jlock(self, vfs):
347 def jlock(self, vfs):
348 """Create a lock for the journal file"""
348 """Create a lock for the journal file"""
349 if self._currentlock(self._lockref) is not None:
349 if self._currentlock(self._lockref) is not None:
350 raise error.Abort(_(b'journal lock does not support nesting'))
350 raise error.Abort(_(b'journal lock does not support nesting'))
351 desc = _(b'journal of %s') % vfs.base
351 desc = _(b'journal of %s') % vfs.base
352 try:
352 try:
353 l = lock.lock(vfs, b'namejournal.lock', 0, desc=desc)
353 l = lock.lock(vfs, b'namejournal.lock', 0, desc=desc)
354 except error.LockHeld as inst:
354 except error.LockHeld as inst:
355 self.ui.warn(
355 self.ui.warn(
356 _(b"waiting for lock on %s held by %r\n") % (desc, inst.locker)
356 _(b"waiting for lock on %s held by %r\n") % (desc, inst.locker)
357 )
357 )
358 # default to 600 seconds timeout
358 # default to 600 seconds timeout
359 l = lock.lock(
359 l = lock.lock(
360 vfs,
360 vfs,
361 b'namejournal.lock',
361 b'namejournal.lock',
362 self.ui.configint(b"ui", b"timeout"),
362 self.ui.configint(b"ui", b"timeout"),
363 desc=desc,
363 desc=desc,
364 )
364 )
365 self.ui.warn(_(b"got lock after %s seconds\n") % l.delay)
365 self.ui.warn(_(b"got lock after %s seconds\n") % l.delay)
366 self._lockref = weakref.ref(l)
366 self._lockref = weakref.ref(l)
367 return l
367 return l
368
368
369 def record(self, namespace, name, oldhashes, newhashes):
369 def record(self, namespace, name, oldhashes, newhashes):
370 """Record a new journal entry
370 """Record a new journal entry
371
371
372 * namespace: an opaque string; this can be used to filter on the type
372 * namespace: an opaque string; this can be used to filter on the type
373 of recorded entries.
373 of recorded entries.
374 * name: the name defining this entry; for bookmarks, this is the
374 * name: the name defining this entry; for bookmarks, this is the
375 bookmark name. Can be filtered on when retrieving entries.
375 bookmark name. Can be filtered on when retrieving entries.
376 * oldhashes and newhashes: each a single binary hash, or a list of
376 * oldhashes and newhashes: each a single binary hash, or a list of
377 binary hashes. These represent the old and new position of the named
377 binary hashes. These represent the old and new position of the named
378 item.
378 item.
379
379
380 """
380 """
381 if not isinstance(oldhashes, list):
381 if not isinstance(oldhashes, list):
382 oldhashes = [oldhashes]
382 oldhashes = [oldhashes]
383 if not isinstance(newhashes, list):
383 if not isinstance(newhashes, list):
384 newhashes = [newhashes]
384 newhashes = [newhashes]
385
385
386 entry = journalentry(
386 entry = journalentry(
387 dateutil.makedate(),
387 dateutil.makedate(),
388 self.user,
388 self.user,
389 self.command,
389 self.command,
390 namespace,
390 namespace,
391 name,
391 name,
392 oldhashes,
392 oldhashes,
393 newhashes,
393 newhashes,
394 )
394 )
395
395
396 vfs = self.vfs
396 vfs = self.vfs
397 if self.sharedvfs is not None:
397 if self.sharedvfs is not None:
398 # write to the shared repository if this feature is being
398 # write to the shared repository if this feature is being
399 # shared between working copies.
399 # shared between working copies.
400 if sharednamespaces.get(namespace) in self.sharedfeatures:
400 if sharednamespaces.get(namespace) in self.sharedfeatures:
401 vfs = self.sharedvfs
401 vfs = self.sharedvfs
402
402
403 self._write(vfs, entry)
403 self._write(vfs, entry)
404
404
405 def _write(self, vfs, entry):
405 def _write(self, vfs, entry):
406 with self.jlock(vfs):
406 with self.jlock(vfs):
407 # open file in amend mode to ensure it is created if missing
407 # open file in amend mode to ensure it is created if missing
408 with vfs(b'namejournal', mode=b'a+b') as f:
408 with vfs(b'namejournal', mode=b'a+b') as f:
409 f.seek(0, os.SEEK_SET)
409 f.seek(0, os.SEEK_SET)
410 # Read just enough bytes to get a version number (up to 2
410 # Read just enough bytes to get a version number (up to 2
411 # digits plus separator)
411 # digits plus separator)
412 version = f.read(3).partition(b'\0')[0]
412 version = f.read(3).partition(b'\0')[0]
413 if version and version != b"%d" % storageversion:
413 if version and version != b"%d" % storageversion:
414 # different version of the storage. Exit early (and not
414 # different version of the storage. Exit early (and not
415 # write anything) if this is not a version we can handle or
415 # write anything) if this is not a version we can handle or
416 # the file is corrupt. In future, perhaps rotate the file
416 # the file is corrupt. In future, perhaps rotate the file
417 # instead?
417 # instead?
418 self.ui.warn(
418 self.ui.warn(
419 _(b"unsupported journal file version '%s'\n") % version
419 _(b"unsupported journal file version '%s'\n") % version
420 )
420 )
421 return
421 return
422 if not version:
422 if not version:
423 # empty file, write version first
423 # empty file, write version first
424 f.write((b"%d" % storageversion) + b'\0')
424 f.write((b"%d" % storageversion) + b'\0')
425 f.seek(0, os.SEEK_END)
425 f.seek(0, os.SEEK_END)
426 f.write(bytes(entry) + b'\0')
426 f.write(bytes(entry) + b'\0')
427
427
428 def filtered(self, namespace=None, name=None):
428 def filtered(self, namespace=None, name=None):
429 """Yield all journal entries with the given namespace or name
429 """Yield all journal entries with the given namespace or name
430
430
431 Both the namespace and the name are optional; if neither is given all
431 Both the namespace and the name are optional; if neither is given all
432 entries in the journal are produced.
432 entries in the journal are produced.
433
433
434 Matching supports regular expressions by using the `re:` prefix
434 Matching supports regular expressions by using the `re:` prefix
435 (use `literal:` to match names or namespaces that start with `re:`)
435 (use `literal:` to match names or namespaces that start with `re:`)
436
436
437 """
437 """
438 if namespace is not None:
438 if namespace is not None:
439 namespace = stringutil.stringmatcher(namespace)[-1]
439 namespace = stringutil.stringmatcher(namespace)[-1]
440 if name is not None:
440 if name is not None:
441 name = stringutil.stringmatcher(name)[-1]
441 name = stringutil.stringmatcher(name)[-1]
442 for entry in self:
442 for entry in self:
443 if namespace is not None and not namespace(entry.namespace):
443 if namespace is not None and not namespace(entry.namespace):
444 continue
444 continue
445 if name is not None and not name(entry.name):
445 if name is not None and not name(entry.name):
446 continue
446 continue
447 yield entry
447 yield entry
448
448
449 def __iter__(self):
449 def __iter__(self):
450 """Iterate over the storage
450 """Iterate over the storage
451
451
452 Yields journalentry instances for each contained journal record.
452 Yields journalentry instances for each contained journal record.
453
453
454 """
454 """
455 local = self._open(self.vfs)
455 local = self._open(self.vfs)
456
456
457 if self.sharedvfs is None:
457 if self.sharedvfs is None:
458 return local
458 return local
459
459
460 # iterate over both local and shared entries, but only those
460 # iterate over both local and shared entries, but only those
461 # shared entries that are among the currently shared features
461 # shared entries that are among the currently shared features
462 shared = (
462 shared = (
463 e
463 e
464 for e in self._open(self.sharedvfs)
464 for e in self._open(self.sharedvfs)
465 if sharednamespaces.get(e.namespace) in self.sharedfeatures
465 if sharednamespaces.get(e.namespace) in self.sharedfeatures
466 )
466 )
467 return _mergeentriesiter(local, shared)
467 return _mergeentriesiter(local, shared)
468
468
469 def _open(self, vfs, filename=b'namejournal', _newestfirst=True):
469 def _open(self, vfs, filename=b'namejournal', _newestfirst=True):
470 if not vfs.exists(filename):
470 if not vfs.exists(filename):
471 return
471 return
472
472
473 with vfs(filename) as f:
473 with vfs(filename) as f:
474 raw = f.read()
474 raw = f.read()
475
475
476 lines = raw.split(b'\0')
476 lines = raw.split(b'\0')
477 version = lines and lines[0]
477 version = lines and lines[0]
478 if version != b"%d" % storageversion:
478 if version != b"%d" % storageversion:
479 version = version or _(b'not available')
479 version = version or _(b'not available')
480 raise error.Abort(_(b"unknown journal file version '%s'") % version)
480 raise error.Abort(_(b"unknown journal file version '%s'") % version)
481
481
482 # Skip the first line, it's a version number. Normally we iterate over
482 # Skip the first line, it's a version number. Normally we iterate over
483 # these in reverse order to list newest first; only when copying across
483 # these in reverse order to list newest first; only when copying across
484 # a shared storage do we forgo reversing.
484 # a shared storage do we forgo reversing.
485 lines = lines[1:]
485 lines = lines[1:]
486 if _newestfirst:
486 if _newestfirst:
487 lines = reversed(lines)
487 lines = reversed(lines)
488 for line in lines:
488 for line in lines:
489 if not line:
489 if not line:
490 continue
490 continue
491 yield journalentry.fromstorage(line)
491 yield journalentry.fromstorage(line)
492
492
493
493
494 # journal reading
494 # journal reading
495 # log options that don't make sense for journal
495 # log options that don't make sense for journal
496 _ignoreopts = (b'no-merges', b'graph')
496 _ignoreopts = (b'no-merges', b'graph')
497
497
498
498
499 @command(
499 @command(
500 b'journal',
500 b'journal',
501 [
501 [
502 (b'', b'all', None, b'show history for all names'),
502 (b'', b'all', None, b'show history for all names'),
503 (b'c', b'commits', None, b'show commit metadata'),
503 (b'c', b'commits', None, b'show commit metadata'),
504 ]
504 ]
505 + [opt for opt in cmdutil.logopts if opt[1] not in _ignoreopts],
505 + [opt for opt in cmdutil.logopts if opt[1] not in _ignoreopts],
506 b'[OPTION]... [BOOKMARKNAME]',
506 b'[OPTION]... [BOOKMARKNAME]',
507 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
507 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
508 )
508 )
509 def journal(ui, repo, *args, **opts):
509 def journal(ui, repo, *args, **opts):
510 """show the previous position of bookmarks and the working copy
510 """show the previous position of bookmarks and the working copy
511
511
512 The journal is used to see the previous commits that bookmarks and the
512 The journal is used to see the previous commits that bookmarks and the
513 working copy pointed to. By default the previous locations for the working
513 working copy pointed to. By default the previous locations for the working
514 copy. Passing a bookmark name will show all the previous positions of
514 copy. Passing a bookmark name will show all the previous positions of
515 that bookmark. Use the --all switch to show previous locations for all
515 that bookmark. Use the --all switch to show previous locations for all
516 bookmarks and the working copy; each line will then include the bookmark
516 bookmarks and the working copy; each line will then include the bookmark
517 name, or '.' for the working copy, as well.
517 name, or '.' for the working copy, as well.
518
518
519 If `name` starts with `re:`, the remainder of the name is treated as
519 If `name` starts with `re:`, the remainder of the name is treated as
520 a regular expression. To match a name that actually starts with `re:`,
520 a regular expression. To match a name that actually starts with `re:`,
521 use the prefix `literal:`.
521 use the prefix `literal:`.
522
522
523 By default hg journal only shows the commit hash and the command that was
523 By default hg journal only shows the commit hash and the command that was
524 running at that time. -v/--verbose will show the prior hash, the user, and
524 running at that time. -v/--verbose will show the prior hash, the user, and
525 the time at which it happened.
525 the time at which it happened.
526
526
527 Use -c/--commits to output log information on each commit hash; at this
527 Use -c/--commits to output log information on each commit hash; at this
528 point you can use the usual `--patch`, `--git`, `--stat` and `--template`
528 point you can use the usual `--patch`, `--git`, `--stat` and `--template`
529 switches to alter the log output for these.
529 switches to alter the log output for these.
530
530
531 `hg journal -T json` can be used to produce machine readable output.
531 `hg journal -T json` can be used to produce machine readable output.
532
532
533 """
533 """
534 opts = pycompat.byteskwargs(opts)
534 opts = pycompat.byteskwargs(opts)
535 name = b'.'
535 name = b'.'
536 if opts.get(b'all'):
536 if opts.get(b'all'):
537 if args:
537 if args:
538 raise error.Abort(
538 raise error.Abort(
539 _(b"You can't combine --all and filtering on a name")
539 _(b"You can't combine --all and filtering on a name")
540 )
540 )
541 name = None
541 name = None
542 if args:
542 if args:
543 name = args[0]
543 name = args[0]
544
544
545 fm = ui.formatter(b'journal', opts)
545 fm = ui.formatter(b'journal', opts)
546
546
547 def formatnodes(nodes):
547 def formatnodes(nodes):
548 return fm.formatlist(map(fm.hexfunc, nodes), name=b'node', sep=b',')
548 return fm.formatlist(map(fm.hexfunc, nodes), name=b'node', sep=b',')
549
549
550 if opts.get(b"template") != b"json":
550 if opts.get(b"template") != b"json":
551 if name is None:
551 if name is None:
552 displayname = _(b'the working copy and bookmarks')
552 displayname = _(b'the working copy and bookmarks')
553 else:
553 else:
554 displayname = b"'%s'" % name
554 displayname = b"'%s'" % name
555 ui.status(_(b"previous locations of %s:\n") % displayname)
555 ui.status(_(b"previous locations of %s:\n") % displayname)
556
556
557 limit = logcmdutil.getlimit(opts)
557 limit = logcmdutil.getlimit(opts)
558 entry = None
558 entry = None
559 ui.pager(b'journal')
559 ui.pager(b'journal')
560 for count, entry in enumerate(repo.journal.filtered(name=name)):
560 for count, entry in enumerate(repo.journal.filtered(name=name)):
561 if count == limit:
561 if count == limit:
562 break
562 break
563
563
564 fm.startitem()
564 fm.startitem()
565 fm.condwrite(
565 fm.condwrite(
566 ui.verbose, b'oldnodes', b'%s -> ', formatnodes(entry.oldhashes)
566 ui.verbose, b'oldnodes', b'%s -> ', formatnodes(entry.oldhashes)
567 )
567 )
568 fm.write(b'newnodes', b'%s', formatnodes(entry.newhashes))
568 fm.write(b'newnodes', b'%s', formatnodes(entry.newhashes))
569 fm.condwrite(ui.verbose, b'user', b' %-8s', entry.user)
569 fm.condwrite(ui.verbose, b'user', b' %-8s', entry.user)
570
571 # ``name`` is bytes, or None only if 'all' was an option.
570 fm.condwrite(
572 fm.condwrite(
573 # pytype: disable=attribute-error
571 opts.get(b'all') or name.startswith(b're:'),
574 opts.get(b'all') or name.startswith(b're:'),
575 # pytype: enable=attribute-error
572 b'name',
576 b'name',
573 b' %-8s',
577 b' %-8s',
574 entry.name,
578 entry.name,
575 )
579 )
576
580
577 fm.condwrite(
581 fm.condwrite(
578 ui.verbose,
582 ui.verbose,
579 b'date',
583 b'date',
580 b' %s',
584 b' %s',
581 fm.formatdate(entry.timestamp, b'%Y-%m-%d %H:%M %1%2'),
585 fm.formatdate(entry.timestamp, b'%Y-%m-%d %H:%M %1%2'),
582 )
586 )
583 fm.write(b'command', b' %s\n', entry.command)
587 fm.write(b'command', b' %s\n', entry.command)
584
588
585 if opts.get(b"commits"):
589 if opts.get(b"commits"):
586 if fm.isplain():
590 if fm.isplain():
587 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
591 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
588 else:
592 else:
589 displayer = logcmdutil.changesetformatter(
593 displayer = logcmdutil.changesetformatter(
590 ui, repo, fm.nested(b'changesets'), diffopts=opts
594 ui, repo, fm.nested(b'changesets'), diffopts=opts
591 )
595 )
592 for hash in entry.newhashes:
596 for hash in entry.newhashes:
593 try:
597 try:
594 ctx = repo[hash]
598 ctx = repo[hash]
595 displayer.show(ctx)
599 displayer.show(ctx)
596 except error.RepoLookupError as e:
600 except error.RepoLookupError as e:
597 fm.plain(b"%s\n\n" % pycompat.bytestr(e))
601 fm.plain(b"%s\n\n" % pycompat.bytestr(e))
598 displayer.close()
602 displayer.close()
599
603
600 fm.end()
604 fm.end()
601
605
602 if entry is None:
606 if entry is None:
603 ui.status(_(b"no recorded locations\n"))
607 ui.status(_(b"no recorded locations\n"))
General Comments 0
You need to be logged in to leave comments. Login now