Show More
@@ -0,0 +1,153 | |||||
|
1 | Journal extension test: tests the share extension support | |||
|
2 | ||||
|
3 | $ cat >> testmocks.py << EOF | |||
|
4 | > # mock out util.getuser() and util.makedate() to supply testable values | |||
|
5 | > import os | |||
|
6 | > from mercurial import util | |||
|
7 | > def mockgetuser(): | |||
|
8 | > return 'foobar' | |||
|
9 | > | |||
|
10 | > def mockmakedate(): | |||
|
11 | > filename = os.path.join(os.environ['TESTTMP'], 'testtime') | |||
|
12 | > try: | |||
|
13 | > with open(filename, 'rb') as timef: | |||
|
14 | > time = float(timef.read()) + 1 | |||
|
15 | > except IOError: | |||
|
16 | > time = 0.0 | |||
|
17 | > with open(filename, 'wb') as timef: | |||
|
18 | > timef.write(str(time)) | |||
|
19 | > return (time, 0) | |||
|
20 | > | |||
|
21 | > util.getuser = mockgetuser | |||
|
22 | > util.makedate = mockmakedate | |||
|
23 | > EOF | |||
|
24 | ||||
|
25 | $ cat >> $HGRCPATH << EOF | |||
|
26 | > [extensions] | |||
|
27 | > journal= | |||
|
28 | > share= | |||
|
29 | > testmocks=`pwd`/testmocks.py | |||
|
30 | > [remotenames] | |||
|
31 | > rename.default=remote | |||
|
32 | > EOF | |||
|
33 | ||||
|
34 | $ hg init repo | |||
|
35 | $ cd repo | |||
|
36 | $ hg bookmark bm | |||
|
37 | $ touch file0 | |||
|
38 | $ hg commit -Am 'file0 added' | |||
|
39 | adding file0 | |||
|
40 | $ hg journal --all | |||
|
41 | previous locations of the working copy and bookmarks: | |||
|
42 | 5640b525682e . commit -Am 'file0 added' | |||
|
43 | 5640b525682e bm commit -Am 'file0 added' | |||
|
44 | ||||
|
45 | A shared working copy initially receives the same bookmarks and working copy | |||
|
46 | ||||
|
47 | $ cd .. | |||
|
48 | $ hg share repo shared1 | |||
|
49 | updating working directory | |||
|
50 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved | |||
|
51 | $ cd shared1 | |||
|
52 | $ hg journal --all | |||
|
53 | previous locations of the working copy and bookmarks: | |||
|
54 | 5640b525682e . share repo shared1 | |||
|
55 | ||||
|
56 | unless you explicitly share bookmarks | |||
|
57 | ||||
|
58 | $ cd .. | |||
|
59 | $ hg share --bookmarks repo shared2 | |||
|
60 | updating working directory | |||
|
61 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved | |||
|
62 | $ cd shared2 | |||
|
63 | $ hg journal --all | |||
|
64 | previous locations of the working copy and bookmarks: | |||
|
65 | 5640b525682e . share --bookmarks repo shared2 | |||
|
66 | 5640b525682e bm commit -Am 'file0 added' | |||
|
67 | ||||
|
68 | Moving the bookmark in the original repository is only shown in the repository | |||
|
69 | that shares bookmarks | |||
|
70 | ||||
|
71 | $ cd ../repo | |||
|
72 | $ touch file1 | |||
|
73 | $ hg commit -Am "file1 added" | |||
|
74 | adding file1 | |||
|
75 | $ cd ../shared1 | |||
|
76 | $ hg journal --all | |||
|
77 | previous locations of the working copy and bookmarks: | |||
|
78 | 5640b525682e . share repo shared1 | |||
|
79 | $ cd ../shared2 | |||
|
80 | $ hg journal --all | |||
|
81 | previous locations of the working copy and bookmarks: | |||
|
82 | 6432d239ac5d bm commit -Am 'file1 added' | |||
|
83 | 5640b525682e . share --bookmarks repo shared2 | |||
|
84 | 5640b525682e bm commit -Am 'file0 added' | |||
|
85 | ||||
|
86 | But working copy changes are always 'local' | |||
|
87 | ||||
|
88 | $ cd ../repo | |||
|
89 | $ hg up 0 | |||
|
90 | 0 files updated, 0 files merged, 1 files removed, 0 files unresolved | |||
|
91 | (leaving bookmark bm) | |||
|
92 | $ hg journal --all | |||
|
93 | previous locations of the working copy and bookmarks: | |||
|
94 | 5640b525682e . up 0 | |||
|
95 | 6432d239ac5d . commit -Am 'file1 added' | |||
|
96 | 6432d239ac5d bm commit -Am 'file1 added' | |||
|
97 | 5640b525682e . commit -Am 'file0 added' | |||
|
98 | 5640b525682e bm commit -Am 'file0 added' | |||
|
99 | $ cd ../shared2 | |||
|
100 | $ hg journal --all | |||
|
101 | previous locations of the working copy and bookmarks: | |||
|
102 | 6432d239ac5d bm commit -Am 'file1 added' | |||
|
103 | 5640b525682e . share --bookmarks repo shared2 | |||
|
104 | 5640b525682e bm commit -Am 'file0 added' | |||
|
105 | $ hg up tip | |||
|
106 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved | |||
|
107 | $ hg up 0 | |||
|
108 | 0 files updated, 0 files merged, 1 files removed, 0 files unresolved | |||
|
109 | $ hg journal | |||
|
110 | previous locations of '.': | |||
|
111 | 5640b525682e up 0 | |||
|
112 | 6432d239ac5d up tip | |||
|
113 | 5640b525682e share --bookmarks repo shared2 | |||
|
114 | ||||
|
115 | Unsharing works as expected; the journal remains consistent | |||
|
116 | ||||
|
117 | $ cd ../shared1 | |||
|
118 | $ hg unshare | |||
|
119 | $ hg journal --all | |||
|
120 | previous locations of the working copy and bookmarks: | |||
|
121 | 5640b525682e . share repo shared1 | |||
|
122 | $ cd ../shared2 | |||
|
123 | $ hg unshare | |||
|
124 | $ hg journal --all | |||
|
125 | previous locations of the working copy and bookmarks: | |||
|
126 | 5640b525682e . up 0 | |||
|
127 | 6432d239ac5d . up tip | |||
|
128 | 6432d239ac5d bm commit -Am 'file1 added' | |||
|
129 | 5640b525682e . share --bookmarks repo shared2 | |||
|
130 | 5640b525682e bm commit -Am 'file0 added' | |||
|
131 | ||||
|
132 | New journal entries in the source repo no longer show up in the other working copies | |||
|
133 | ||||
|
134 | $ cd ../repo | |||
|
135 | $ hg bookmark newbm -r tip | |||
|
136 | $ hg journal newbm | |||
|
137 | previous locations of 'newbm': | |||
|
138 | 6432d239ac5d bookmark newbm -r tip | |||
|
139 | $ cd ../shared2 | |||
|
140 | $ hg journal newbm | |||
|
141 | previous locations of 'newbm': | |||
|
142 | no recorded locations | |||
|
143 | ||||
|
144 | This applies for both directions | |||
|
145 | ||||
|
146 | $ hg bookmark shared2bm -r tip | |||
|
147 | $ hg journal shared2bm | |||
|
148 | previous locations of 'shared2bm': | |||
|
149 | 6432d239ac5d bookmark shared2bm -r tip | |||
|
150 | $ cd ../repo | |||
|
151 | $ hg journal shared2bm | |||
|
152 | previous locations of 'shared2bm': | |||
|
153 | no recorded locations |
@@ -1,374 +1,493 | |||||
1 | # journal.py |
|
1 | # journal.py | |
2 | # |
|
2 | # | |
3 | # Copyright 2014-2016 Facebook, Inc. |
|
3 | # Copyright 2014-2016 Facebook, Inc. | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 | """Track previous positions of bookmarks (EXPERIMENTAL) |
|
7 | """Track previous positions of bookmarks (EXPERIMENTAL) | |
8 |
|
8 | |||
9 | This extension adds a new command: `hg journal`, which shows you where |
|
9 | This extension adds a new command: `hg journal`, which shows you where | |
10 | bookmarks were previously located. |
|
10 | bookmarks were previously located. | |
11 |
|
11 | |||
12 | """ |
|
12 | """ | |
13 |
|
13 | |||
14 | from __future__ import absolute_import |
|
14 | from __future__ import absolute_import | |
15 |
|
15 | |||
16 | import collections |
|
16 | import collections | |
|
17 | import errno | |||
17 | import os |
|
18 | import os | |
18 | import weakref |
|
19 | import weakref | |
19 |
|
20 | |||
20 | from mercurial.i18n import _ |
|
21 | from mercurial.i18n import _ | |
21 |
|
22 | |||
22 | from mercurial import ( |
|
23 | from mercurial import ( | |
23 | bookmarks, |
|
24 | bookmarks, | |
24 | cmdutil, |
|
25 | cmdutil, | |
25 | commands, |
|
26 | commands, | |
26 | dirstate, |
|
27 | dirstate, | |
27 | dispatch, |
|
28 | dispatch, | |
28 | error, |
|
29 | error, | |
29 | extensions, |
|
30 | extensions, | |
|
31 | hg, | |||
30 | localrepo, |
|
32 | localrepo, | |
31 | lock, |
|
33 | lock, | |
32 | node, |
|
34 | node, | |
33 | util, |
|
35 | util, | |
34 | ) |
|
36 | ) | |
35 |
|
37 | |||
|
38 | from . import share | |||
|
39 | ||||
36 | cmdtable = {} |
|
40 | cmdtable = {} | |
37 | command = cmdutil.command(cmdtable) |
|
41 | command = cmdutil.command(cmdtable) | |
38 |
|
42 | |||
39 | # Note for extension authors: ONLY specify testedwith = 'internal' for |
|
43 | # Note for extension authors: ONLY specify testedwith = 'internal' for | |
40 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
44 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should | |
41 | # be specifying the version(s) of Mercurial they are tested with, or |
|
45 | # be specifying the version(s) of Mercurial they are tested with, or | |
42 | # leave the attribute unspecified. |
|
46 | # leave the attribute unspecified. | |
43 | testedwith = 'internal' |
|
47 | testedwith = 'internal' | |
44 |
|
48 | |||
45 | # storage format version; increment when the format changes |
|
49 | # storage format version; increment when the format changes | |
46 | storageversion = 0 |
|
50 | storageversion = 0 | |
47 |
|
51 | |||
48 | # namespaces |
|
52 | # namespaces | |
49 | bookmarktype = 'bookmark' |
|
53 | bookmarktype = 'bookmark' | |
50 | wdirparenttype = 'wdirparent' |
|
54 | wdirparenttype = 'wdirparent' | |
|
55 | # In a shared repository, what shared feature name is used | |||
|
56 | # to indicate this namespace is shared with the source? | |||
|
57 | sharednamespaces = { | |||
|
58 | bookmarktype: hg.sharedbookmarks, | |||
|
59 | } | |||
51 |
|
60 | |||
52 | # Journal recording, register hooks and storage object |
|
61 | # Journal recording, register hooks and storage object | |
53 | def extsetup(ui): |
|
62 | def extsetup(ui): | |
54 | extensions.wrapfunction(dispatch, 'runcommand', runcommand) |
|
63 | extensions.wrapfunction(dispatch, 'runcommand', runcommand) | |
55 | extensions.wrapfunction(bookmarks.bmstore, '_write', recordbookmarks) |
|
64 | extensions.wrapfunction(bookmarks.bmstore, '_write', recordbookmarks) | |
56 | extensions.wrapfunction( |
|
65 | extensions.wrapfunction( | |
57 | dirstate.dirstate, '_writedirstate', recorddirstateparents) |
|
66 | dirstate.dirstate, '_writedirstate', recorddirstateparents) | |
58 | extensions.wrapfunction( |
|
67 | extensions.wrapfunction( | |
59 | localrepo.localrepository.dirstate, 'func', wrapdirstate) |
|
68 | localrepo.localrepository.dirstate, 'func', wrapdirstate) | |
|
69 | extensions.wrapfunction(hg, 'postshare', wrappostshare) | |||
|
70 | extensions.wrapfunction(hg, 'copystore', unsharejournal) | |||
60 |
|
71 | |||
61 | def reposetup(ui, repo): |
|
72 | def reposetup(ui, repo): | |
62 | if repo.local(): |
|
73 | if repo.local(): | |
63 | repo.journal = journalstorage(repo) |
|
74 | repo.journal = journalstorage(repo) | |
64 |
|
75 | |||
65 | def runcommand(orig, lui, repo, cmd, fullargs, *args): |
|
76 | def runcommand(orig, lui, repo, cmd, fullargs, *args): | |
66 | """Track the command line options for recording in the journal""" |
|
77 | """Track the command line options for recording in the journal""" | |
67 | journalstorage.recordcommand(*fullargs) |
|
78 | journalstorage.recordcommand(*fullargs) | |
68 | return orig(lui, repo, cmd, fullargs, *args) |
|
79 | return orig(lui, repo, cmd, fullargs, *args) | |
69 |
|
80 | |||
70 | # hooks to record dirstate changes |
|
81 | # hooks to record dirstate changes | |
71 | def wrapdirstate(orig, repo): |
|
82 | def wrapdirstate(orig, repo): | |
72 | """Make journal storage available to the dirstate object""" |
|
83 | """Make journal storage available to the dirstate object""" | |
73 | dirstate = orig(repo) |
|
84 | dirstate = orig(repo) | |
74 | if util.safehasattr(repo, 'journal'): |
|
85 | if util.safehasattr(repo, 'journal'): | |
75 | dirstate.journalstorage = repo.journal |
|
86 | dirstate.journalstorage = repo.journal | |
76 | return dirstate |
|
87 | return dirstate | |
77 |
|
88 | |||
78 | def recorddirstateparents(orig, dirstate, dirstatefp): |
|
89 | def recorddirstateparents(orig, dirstate, dirstatefp): | |
79 | """Records all dirstate parent changes in the journal.""" |
|
90 | """Records all dirstate parent changes in the journal.""" | |
80 | if util.safehasattr(dirstate, 'journalstorage'): |
|
91 | if util.safehasattr(dirstate, 'journalstorage'): | |
81 | old = [node.nullid, node.nullid] |
|
92 | old = [node.nullid, node.nullid] | |
82 | nodesize = len(node.nullid) |
|
93 | nodesize = len(node.nullid) | |
83 | try: |
|
94 | try: | |
84 | # The only source for the old state is in the dirstate file still |
|
95 | # The only source for the old state is in the dirstate file still | |
85 | # on disk; the in-memory dirstate object only contains the new |
|
96 | # on disk; the in-memory dirstate object only contains the new | |
86 | # state. dirstate._opendirstatefile() switches beteen .hg/dirstate |
|
97 | # state. dirstate._opendirstatefile() switches beteen .hg/dirstate | |
87 | # and .hg/dirstate.pending depending on the transaction state. |
|
98 | # and .hg/dirstate.pending depending on the transaction state. | |
88 | with dirstate._opendirstatefile() as fp: |
|
99 | with dirstate._opendirstatefile() as fp: | |
89 | state = fp.read(2 * nodesize) |
|
100 | state = fp.read(2 * nodesize) | |
90 | if len(state) == 2 * nodesize: |
|
101 | if len(state) == 2 * nodesize: | |
91 | old = [state[:nodesize], state[nodesize:]] |
|
102 | old = [state[:nodesize], state[nodesize:]] | |
92 | except IOError: |
|
103 | except IOError: | |
93 | pass |
|
104 | pass | |
94 |
|
105 | |||
95 | new = dirstate.parents() |
|
106 | new = dirstate.parents() | |
96 | if old != new: |
|
107 | if old != new: | |
97 | # only record two hashes if there was a merge |
|
108 | # only record two hashes if there was a merge | |
98 | oldhashes = old[:1] if old[1] == node.nullid else old |
|
109 | oldhashes = old[:1] if old[1] == node.nullid else old | |
99 | newhashes = new[:1] if new[1] == node.nullid else new |
|
110 | newhashes = new[:1] if new[1] == node.nullid else new | |
100 | dirstate.journalstorage.record( |
|
111 | dirstate.journalstorage.record( | |
101 | wdirparenttype, '.', oldhashes, newhashes) |
|
112 | wdirparenttype, '.', oldhashes, newhashes) | |
102 |
|
113 | |||
103 | return orig(dirstate, dirstatefp) |
|
114 | return orig(dirstate, dirstatefp) | |
104 |
|
115 | |||
105 | # hooks to record bookmark changes (both local and remote) |
|
116 | # hooks to record bookmark changes (both local and remote) | |
106 | def recordbookmarks(orig, store, fp): |
|
117 | def recordbookmarks(orig, store, fp): | |
107 | """Records all bookmark changes in the journal.""" |
|
118 | """Records all bookmark changes in the journal.""" | |
108 | repo = store._repo |
|
119 | repo = store._repo | |
109 | if util.safehasattr(repo, 'journal'): |
|
120 | if util.safehasattr(repo, 'journal'): | |
110 | oldmarks = bookmarks.bmstore(repo) |
|
121 | oldmarks = bookmarks.bmstore(repo) | |
111 | for mark, value in store.iteritems(): |
|
122 | for mark, value in store.iteritems(): | |
112 | oldvalue = oldmarks.get(mark, node.nullid) |
|
123 | oldvalue = oldmarks.get(mark, node.nullid) | |
113 | if value != oldvalue: |
|
124 | if value != oldvalue: | |
114 | repo.journal.record(bookmarktype, mark, oldvalue, value) |
|
125 | repo.journal.record(bookmarktype, mark, oldvalue, value) | |
115 | return orig(store, fp) |
|
126 | return orig(store, fp) | |
116 |
|
127 | |||
|
128 | # shared repository support | |||
|
129 | def _readsharedfeatures(repo): | |||
|
130 | """A set of shared features for this repository""" | |||
|
131 | try: | |||
|
132 | return set(repo.vfs.read('shared').splitlines()) | |||
|
133 | except IOError as inst: | |||
|
134 | if inst.errno != errno.ENOENT: | |||
|
135 | raise | |||
|
136 | return set() | |||
|
137 | ||||
|
138 | def _mergeentriesiter(*iterables, **kwargs): | |||
|
139 | """Given a set of sorted iterables, yield the next entry in merged order | |||
|
140 | ||||
|
141 | Note that by default entries go from most recent to oldest. | |||
|
142 | """ | |||
|
143 | order = kwargs.pop('order', max) | |||
|
144 | iterables = [iter(it) for it in iterables] | |||
|
145 | # this tracks still active iterables; iterables are deleted as they are | |||
|
146 | # exhausted, which is why this is a dictionary and why each entry also | |||
|
147 | # stores the key. Entries are mutable so we can store the next value each | |||
|
148 | # time. | |||
|
149 | iterable_map = {} | |||
|
150 | for key, it in enumerate(iterables): | |||
|
151 | try: | |||
|
152 | iterable_map[key] = [next(it), key, it] | |||
|
153 | except StopIteration: | |||
|
154 | # empty entry, can be ignored | |||
|
155 | pass | |||
|
156 | ||||
|
157 | while iterable_map: | |||
|
158 | value, key, it = order(iterable_map.itervalues()) | |||
|
159 | yield value | |||
|
160 | try: | |||
|
161 | iterable_map[key][0] = next(it) | |||
|
162 | except StopIteration: | |||
|
163 | # this iterable is empty, remove it from consideration | |||
|
164 | del iterable_map[key] | |||
|
165 | ||||
|
166 | def wrappostshare(orig, sourcerepo, destrepo, **kwargs): | |||
|
167 | """Mark this shared working copy as sharing journal information""" | |||
|
168 | orig(sourcerepo, destrepo, **kwargs) | |||
|
169 | with destrepo.vfs('shared', 'a') as fp: | |||
|
170 | fp.write('journal\n') | |||
|
171 | ||||
|
172 | def unsharejournal(orig, ui, repo, repopath): | |||
|
173 | """Copy shared journal entries into this repo when unsharing""" | |||
|
174 | if (repo.path == repopath and repo.shared() and | |||
|
175 | util.safehasattr(repo, 'journal')): | |||
|
176 | sharedrepo = share._getsrcrepo(repo) | |||
|
177 | sharedfeatures = _readsharedfeatures(repo) | |||
|
178 | if sharedrepo and sharedfeatures > set(['journal']): | |||
|
179 | # there is a shared repository and there are shared journal entries | |||
|
180 | # to copy. move shared date over from source to destination but | |||
|
181 | # move the local file first | |||
|
182 | if repo.vfs.exists('journal'): | |||
|
183 | journalpath = repo.join('journal') | |||
|
184 | util.rename(journalpath, journalpath + '.bak') | |||
|
185 | storage = repo.journal | |||
|
186 | local = storage._open( | |||
|
187 | repo.vfs, filename='journal.bak', _newestfirst=False) | |||
|
188 | shared = ( | |||
|
189 | e for e in storage._open(sharedrepo.vfs, _newestfirst=False) | |||
|
190 | if sharednamespaces.get(e.namespace) in sharedfeatures) | |||
|
191 | for entry in _mergeentriesiter(local, shared, order=min): | |||
|
192 | storage._write(repo.vfs, entry) | |||
|
193 | ||||
|
194 | return orig(ui, repo, repopath) | |||
|
195 | ||||
117 | class journalentry(collections.namedtuple( |
|
196 | class journalentry(collections.namedtuple( | |
118 | 'journalentry', |
|
197 | 'journalentry', | |
119 | 'timestamp user command namespace name oldhashes newhashes')): |
|
198 | 'timestamp user command namespace name oldhashes newhashes')): | |
120 | """Individual journal entry |
|
199 | """Individual journal entry | |
121 |
|
200 | |||
122 | * timestamp: a mercurial (time, timezone) tuple |
|
201 | * timestamp: a mercurial (time, timezone) tuple | |
123 | * user: the username that ran the command |
|
202 | * user: the username that ran the command | |
124 | * namespace: the entry namespace, an opaque string |
|
203 | * namespace: the entry namespace, an opaque string | |
125 | * name: the name of the changed item, opaque string with meaning in the |
|
204 | * name: the name of the changed item, opaque string with meaning in the | |
126 | namespace |
|
205 | namespace | |
127 | * command: the hg command that triggered this record |
|
206 | * command: the hg command that triggered this record | |
128 | * oldhashes: a tuple of one or more binary hashes for the old location |
|
207 | * oldhashes: a tuple of one or more binary hashes for the old location | |
129 | * newhashes: a tuple of one or more binary hashes for the new location |
|
208 | * newhashes: a tuple of one or more binary hashes for the new location | |
130 |
|
209 | |||
131 | Handles serialisation from and to the storage format. Fields are |
|
210 | Handles serialisation from and to the storage format. Fields are | |
132 | separated by newlines, hashes are written out in hex separated by commas, |
|
211 | separated by newlines, hashes are written out in hex separated by commas, | |
133 | timestamp and timezone are separated by a space. |
|
212 | timestamp and timezone are separated by a space. | |
134 |
|
213 | |||
135 | """ |
|
214 | """ | |
136 | @classmethod |
|
215 | @classmethod | |
137 | def fromstorage(cls, line): |
|
216 | def fromstorage(cls, line): | |
138 | (time, user, command, namespace, name, |
|
217 | (time, user, command, namespace, name, | |
139 | oldhashes, newhashes) = line.split('\n') |
|
218 | oldhashes, newhashes) = line.split('\n') | |
140 | timestamp, tz = time.split() |
|
219 | timestamp, tz = time.split() | |
141 | timestamp, tz = float(timestamp), int(tz) |
|
220 | timestamp, tz = float(timestamp), int(tz) | |
142 | oldhashes = tuple(node.bin(hash) for hash in oldhashes.split(',')) |
|
221 | oldhashes = tuple(node.bin(hash) for hash in oldhashes.split(',')) | |
143 | newhashes = tuple(node.bin(hash) for hash in newhashes.split(',')) |
|
222 | newhashes = tuple(node.bin(hash) for hash in newhashes.split(',')) | |
144 | return cls( |
|
223 | return cls( | |
145 | (timestamp, tz), user, command, namespace, name, |
|
224 | (timestamp, tz), user, command, namespace, name, | |
146 | oldhashes, newhashes) |
|
225 | oldhashes, newhashes) | |
147 |
|
226 | |||
148 | def __str__(self): |
|
227 | def __str__(self): | |
149 | """String representation for storage""" |
|
228 | """String representation for storage""" | |
150 | time = ' '.join(map(str, self.timestamp)) |
|
229 | time = ' '.join(map(str, self.timestamp)) | |
151 | oldhashes = ','.join([node.hex(hash) for hash in self.oldhashes]) |
|
230 | oldhashes = ','.join([node.hex(hash) for hash in self.oldhashes]) | |
152 | newhashes = ','.join([node.hex(hash) for hash in self.newhashes]) |
|
231 | newhashes = ','.join([node.hex(hash) for hash in self.newhashes]) | |
153 | return '\n'.join(( |
|
232 | return '\n'.join(( | |
154 | time, self.user, self.command, self.namespace, self.name, |
|
233 | time, self.user, self.command, self.namespace, self.name, | |
155 | oldhashes, newhashes)) |
|
234 | oldhashes, newhashes)) | |
156 |
|
235 | |||
157 | class journalstorage(object): |
|
236 | class journalstorage(object): | |
158 | """Storage for journal entries |
|
237 | """Storage for journal entries | |
159 |
|
238 | |||
|
239 | Entries are divided over two files; one with entries that pertain to the | |||
|
240 | local working copy *only*, and one with entries that are shared across | |||
|
241 | multiple working copies when shared using the share extension. | |||
|
242 | ||||
160 | Entries are stored with NUL bytes as separators. See the journalentry |
|
243 | Entries are stored with NUL bytes as separators. See the journalentry | |
161 | class for the per-entry structure. |
|
244 | class for the per-entry structure. | |
162 |
|
245 | |||
163 | The file format starts with an integer version, delimited by a NUL. |
|
246 | The file format starts with an integer version, delimited by a NUL. | |
164 |
|
247 | |||
165 | This storage uses a dedicated lock; this makes it easier to avoid issues |
|
248 | This storage uses a dedicated lock; this makes it easier to avoid issues | |
166 | with adding entries that added when the regular wlock is unlocked (e.g. |
|
249 | with adding entries that added when the regular wlock is unlocked (e.g. | |
167 | the dirstate). |
|
250 | the dirstate). | |
168 |
|
251 | |||
169 | """ |
|
252 | """ | |
170 | _currentcommand = () |
|
253 | _currentcommand = () | |
171 | _lockref = None |
|
254 | _lockref = None | |
172 |
|
255 | |||
173 | def __init__(self, repo): |
|
256 | def __init__(self, repo): | |
174 | self.user = util.getuser() |
|
257 | self.user = util.getuser() | |
175 | self.ui = repo.ui |
|
258 | self.ui = repo.ui | |
176 | self.vfs = repo.vfs |
|
259 | self.vfs = repo.vfs | |
177 |
|
260 | |||
|
261 | # is this working copy using a shared storage? | |||
|
262 | self.sharedfeatures = self.sharedvfs = None | |||
|
263 | if repo.shared(): | |||
|
264 | features = _readsharedfeatures(repo) | |||
|
265 | sharedrepo = share._getsrcrepo(repo) | |||
|
266 | if sharedrepo is not None and 'journal' in features: | |||
|
267 | self.sharedvfs = sharedrepo.vfs | |||
|
268 | self.sharedfeatures = features | |||
|
269 | ||||
178 | # track the current command for recording in journal entries |
|
270 | # track the current command for recording in journal entries | |
179 | @property |
|
271 | @property | |
180 | def command(self): |
|
272 | def command(self): | |
181 | commandstr = ' '.join( |
|
273 | commandstr = ' '.join( | |
182 | map(util.shellquote, journalstorage._currentcommand)) |
|
274 | map(util.shellquote, journalstorage._currentcommand)) | |
183 | if '\n' in commandstr: |
|
275 | if '\n' in commandstr: | |
184 | # truncate multi-line commands |
|
276 | # truncate multi-line commands | |
185 | commandstr = commandstr.partition('\n')[0] + ' ...' |
|
277 | commandstr = commandstr.partition('\n')[0] + ' ...' | |
186 | return commandstr |
|
278 | return commandstr | |
187 |
|
279 | |||
188 | @classmethod |
|
280 | @classmethod | |
189 | def recordcommand(cls, *fullargs): |
|
281 | def recordcommand(cls, *fullargs): | |
190 | """Set the current hg arguments, stored with recorded entries""" |
|
282 | """Set the current hg arguments, stored with recorded entries""" | |
191 | # Set the current command on the class because we may have started |
|
283 | # Set the current command on the class because we may have started | |
192 | # with a non-local repo (cloning for example). |
|
284 | # with a non-local repo (cloning for example). | |
193 | cls._currentcommand = fullargs |
|
285 | cls._currentcommand = fullargs | |
194 |
|
286 | |||
195 | def jlock(self): |
|
287 | def jlock(self, vfs): | |
196 | """Create a lock for the journal file""" |
|
288 | """Create a lock for the journal file""" | |
197 | if self._lockref and self._lockref(): |
|
289 | if self._lockref and self._lockref(): | |
198 | raise error.Abort(_('journal lock does not support nesting')) |
|
290 | raise error.Abort(_('journal lock does not support nesting')) | |
199 |
desc = _('journal of %s') % |
|
291 | desc = _('journal of %s') % vfs.base | |
200 | try: |
|
292 | try: | |
201 |
l = lock.lock( |
|
293 | l = lock.lock(vfs, 'journal.lock', 0, desc=desc) | |
202 | except error.LockHeld as inst: |
|
294 | except error.LockHeld as inst: | |
203 | self.ui.warn( |
|
295 | self.ui.warn( | |
204 | _("waiting for lock on %s held by %r\n") % (desc, inst.locker)) |
|
296 | _("waiting for lock on %s held by %r\n") % (desc, inst.locker)) | |
205 | # default to 600 seconds timeout |
|
297 | # default to 600 seconds timeout | |
206 | l = lock.lock( |
|
298 | l = lock.lock( | |
207 |
|
|
299 | vfs, 'journal.lock', | |
208 | int(self.ui.config("ui", "timeout", "600")), desc=desc) |
|
300 | int(self.ui.config("ui", "timeout", "600")), desc=desc) | |
209 | self.ui.warn(_("got lock after %s seconds\n") % l.delay) |
|
301 | self.ui.warn(_("got lock after %s seconds\n") % l.delay) | |
210 | self._lockref = weakref.ref(l) |
|
302 | self._lockref = weakref.ref(l) | |
211 | return l |
|
303 | return l | |
212 |
|
304 | |||
213 | def record(self, namespace, name, oldhashes, newhashes): |
|
305 | def record(self, namespace, name, oldhashes, newhashes): | |
214 | """Record a new journal entry |
|
306 | """Record a new journal entry | |
215 |
|
307 | |||
216 | * namespace: an opaque string; this can be used to filter on the type |
|
308 | * namespace: an opaque string; this can be used to filter on the type | |
217 | of recorded entries. |
|
309 | of recorded entries. | |
218 | * name: the name defining this entry; for bookmarks, this is the |
|
310 | * name: the name defining this entry; for bookmarks, this is the | |
219 | bookmark name. Can be filtered on when retrieving entries. |
|
311 | bookmark name. Can be filtered on when retrieving entries. | |
220 | * oldhashes and newhashes: each a single binary hash, or a list of |
|
312 | * oldhashes and newhashes: each a single binary hash, or a list of | |
221 | binary hashes. These represent the old and new position of the named |
|
313 | binary hashes. These represent the old and new position of the named | |
222 | item. |
|
314 | item. | |
223 |
|
315 | |||
224 | """ |
|
316 | """ | |
225 | if not isinstance(oldhashes, list): |
|
317 | if not isinstance(oldhashes, list): | |
226 | oldhashes = [oldhashes] |
|
318 | oldhashes = [oldhashes] | |
227 | if not isinstance(newhashes, list): |
|
319 | if not isinstance(newhashes, list): | |
228 | newhashes = [newhashes] |
|
320 | newhashes = [newhashes] | |
229 |
|
321 | |||
230 | entry = journalentry( |
|
322 | entry = journalentry( | |
231 | util.makedate(), self.user, self.command, namespace, name, |
|
323 | util.makedate(), self.user, self.command, namespace, name, | |
232 | oldhashes, newhashes) |
|
324 | oldhashes, newhashes) | |
233 |
|
325 | |||
234 | with self.jlock(): |
|
326 | vfs = self.vfs | |
|
327 | if self.sharedvfs is not None: | |||
|
328 | # write to the shared repository if this feature is being | |||
|
329 | # shared between working copies. | |||
|
330 | if sharednamespaces.get(namespace) in self.sharedfeatures: | |||
|
331 | vfs = self.sharedvfs | |||
|
332 | ||||
|
333 | self._write(vfs, entry) | |||
|
334 | ||||
|
335 | def _write(self, vfs, entry): | |||
|
336 | with self.jlock(vfs): | |||
235 | version = None |
|
337 | version = None | |
236 | # open file in amend mode to ensure it is created if missing |
|
338 | # open file in amend mode to ensure it is created if missing | |
237 |
with |
|
339 | with vfs('journal', mode='a+b', atomictemp=True) as f: | |
238 | f.seek(0, os.SEEK_SET) |
|
340 | f.seek(0, os.SEEK_SET) | |
239 | # Read just enough bytes to get a version number (up to 2 |
|
341 | # Read just enough bytes to get a version number (up to 2 | |
240 | # digits plus separator) |
|
342 | # digits plus separator) | |
241 | version = f.read(3).partition('\0')[0] |
|
343 | version = f.read(3).partition('\0')[0] | |
242 | if version and version != str(storageversion): |
|
344 | if version and version != str(storageversion): | |
243 | # different version of the storage. Exit early (and not |
|
345 | # different version of the storage. Exit early (and not | |
244 | # write anything) if this is not a version we can handle or |
|
346 | # write anything) if this is not a version we can handle or | |
245 | # the file is corrupt. In future, perhaps rotate the file |
|
347 | # the file is corrupt. In future, perhaps rotate the file | |
246 | # instead? |
|
348 | # instead? | |
247 | self.ui.warn( |
|
349 | self.ui.warn( | |
248 | _("unsupported journal file version '%s'\n") % version) |
|
350 | _("unsupported journal file version '%s'\n") % version) | |
249 | return |
|
351 | return | |
250 | if not version: |
|
352 | if not version: | |
251 | # empty file, write version first |
|
353 | # empty file, write version first | |
252 | f.write(str(storageversion) + '\0') |
|
354 | f.write(str(storageversion) + '\0') | |
253 | f.seek(0, os.SEEK_END) |
|
355 | f.seek(0, os.SEEK_END) | |
254 | f.write(str(entry) + '\0') |
|
356 | f.write(str(entry) + '\0') | |
255 |
|
357 | |||
256 | def filtered(self, namespace=None, name=None): |
|
358 | def filtered(self, namespace=None, name=None): | |
257 | """Yield all journal entries with the given namespace or name |
|
359 | """Yield all journal entries with the given namespace or name | |
258 |
|
360 | |||
259 | Both the namespace and the name are optional; if neither is given all |
|
361 | Both the namespace and the name are optional; if neither is given all | |
260 | entries in the journal are produced. |
|
362 | entries in the journal are produced. | |
261 |
|
363 | |||
262 | """ |
|
364 | """ | |
263 | for entry in self: |
|
365 | for entry in self: | |
264 | if namespace is not None and entry.namespace != namespace: |
|
366 | if namespace is not None and entry.namespace != namespace: | |
265 | continue |
|
367 | continue | |
266 | if name is not None and entry.name != name: |
|
368 | if name is not None and entry.name != name: | |
267 | continue |
|
369 | continue | |
268 | yield entry |
|
370 | yield entry | |
269 |
|
371 | |||
270 | def __iter__(self): |
|
372 | def __iter__(self): | |
271 | """Iterate over the storage |
|
373 | """Iterate over the storage | |
272 |
|
374 | |||
273 | Yields journalentry instances for each contained journal record. |
|
375 | Yields journalentry instances for each contained journal record. | |
274 |
|
376 | |||
275 | """ |
|
377 | """ | |
276 | if not self.vfs.exists('journal'): |
|
378 | local = self._open(self.vfs) | |
|
379 | ||||
|
380 | if self.sharedvfs is None: | |||
|
381 | return local | |||
|
382 | ||||
|
383 | # iterate over both local and shared entries, but only those | |||
|
384 | # shared entries that are among the currently shared features | |||
|
385 | shared = ( | |||
|
386 | e for e in self._open(self.sharedvfs) | |||
|
387 | if sharednamespaces.get(e.namespace) in self.sharedfeatures) | |||
|
388 | return _mergeentriesiter(local, shared) | |||
|
389 | ||||
|
390 | def _open(self, vfs, filename='journal', _newestfirst=True): | |||
|
391 | if not vfs.exists(filename): | |||
277 | return |
|
392 | return | |
278 |
|
393 | |||
279 |
with |
|
394 | with vfs(filename) as f: | |
280 | raw = f.read() |
|
395 | raw = f.read() | |
281 |
|
396 | |||
282 | lines = raw.split('\0') |
|
397 | lines = raw.split('\0') | |
283 | version = lines and lines[0] |
|
398 | version = lines and lines[0] | |
284 | if version != str(storageversion): |
|
399 | if version != str(storageversion): | |
285 | version = version or _('not available') |
|
400 | version = version or _('not available') | |
286 | raise error.Abort(_("unknown journal file version '%s'") % version) |
|
401 | raise error.Abort(_("unknown journal file version '%s'") % version) | |
287 |
|
402 | |||
288 |
# Skip the first line, it's a version number. |
|
403 | # Skip the first line, it's a version number. Normally we iterate over | |
289 | lines = reversed(lines[1:]) |
|
404 | # these in reverse order to list newest first; only when copying across | |
|
405 | # a shared storage do we forgo reversing. | |||
|
406 | lines = lines[1:] | |||
|
407 | if _newestfirst: | |||
|
408 | lines = reversed(lines) | |||
290 | for line in lines: |
|
409 | for line in lines: | |
291 | if not line: |
|
410 | if not line: | |
292 | continue |
|
411 | continue | |
293 | yield journalentry.fromstorage(line) |
|
412 | yield journalentry.fromstorage(line) | |
294 |
|
413 | |||
295 | # journal reading |
|
414 | # journal reading | |
296 | # log options that don't make sense for journal |
|
415 | # log options that don't make sense for journal | |
297 | _ignoreopts = ('no-merges', 'graph') |
|
416 | _ignoreopts = ('no-merges', 'graph') | |
298 | @command( |
|
417 | @command( | |
299 | 'journal', [ |
|
418 | 'journal', [ | |
300 | ('', 'all', None, 'show history for all names'), |
|
419 | ('', 'all', None, 'show history for all names'), | |
301 | ('c', 'commits', None, 'show commit metadata'), |
|
420 | ('c', 'commits', None, 'show commit metadata'), | |
302 | ] + [opt for opt in commands.logopts if opt[1] not in _ignoreopts], |
|
421 | ] + [opt for opt in commands.logopts if opt[1] not in _ignoreopts], | |
303 | '[OPTION]... [BOOKMARKNAME]') |
|
422 | '[OPTION]... [BOOKMARKNAME]') | |
304 | def journal(ui, repo, *args, **opts): |
|
423 | def journal(ui, repo, *args, **opts): | |
305 | """show the previous position of bookmarks and the working copy |
|
424 | """show the previous position of bookmarks and the working copy | |
306 |
|
425 | |||
307 | The journal is used to see the previous commits that bookmarks and the |
|
426 | The journal is used to see the previous commits that bookmarks and the | |
308 | working copy pointed to. By default the previous locations for the working |
|
427 | working copy pointed to. By default the previous locations for the working | |
309 | copy. Passing a bookmark name will show all the previous positions of |
|
428 | copy. Passing a bookmark name will show all the previous positions of | |
310 | that bookmark. Use the --all switch to show previous locations for all |
|
429 | that bookmark. Use the --all switch to show previous locations for all | |
311 | bookmarks and the working copy; each line will then include the bookmark |
|
430 | bookmarks and the working copy; each line will then include the bookmark | |
312 | name, or '.' for the working copy, as well. |
|
431 | name, or '.' for the working copy, as well. | |
313 |
|
432 | |||
314 | By default hg journal only shows the commit hash and the command that was |
|
433 | By default hg journal only shows the commit hash and the command that was | |
315 | running at that time. -v/--verbose will show the prior hash, the user, and |
|
434 | running at that time. -v/--verbose will show the prior hash, the user, and | |
316 | the time at which it happened. |
|
435 | the time at which it happened. | |
317 |
|
436 | |||
318 | Use -c/--commits to output log information on each commit hash; at this |
|
437 | Use -c/--commits to output log information on each commit hash; at this | |
319 | point you can use the usual `--patch`, `--git`, `--stat` and `--template` |
|
438 | point you can use the usual `--patch`, `--git`, `--stat` and `--template` | |
320 | switches to alter the log output for these. |
|
439 | switches to alter the log output for these. | |
321 |
|
440 | |||
322 | `hg journal -T json` can be used to produce machine readable output. |
|
441 | `hg journal -T json` can be used to produce machine readable output. | |
323 |
|
442 | |||
324 | """ |
|
443 | """ | |
325 | name = '.' |
|
444 | name = '.' | |
326 | if opts.get('all'): |
|
445 | if opts.get('all'): | |
327 | if args: |
|
446 | if args: | |
328 | raise error.Abort( |
|
447 | raise error.Abort( | |
329 | _("You can't combine --all and filtering on a name")) |
|
448 | _("You can't combine --all and filtering on a name")) | |
330 | name = None |
|
449 | name = None | |
331 | if args: |
|
450 | if args: | |
332 | name = args[0] |
|
451 | name = args[0] | |
333 |
|
452 | |||
334 | fm = ui.formatter('journal', opts) |
|
453 | fm = ui.formatter('journal', opts) | |
335 |
|
454 | |||
336 | if opts.get("template") != "json": |
|
455 | if opts.get("template") != "json": | |
337 | if name is None: |
|
456 | if name is None: | |
338 | displayname = _('the working copy and bookmarks') |
|
457 | displayname = _('the working copy and bookmarks') | |
339 | else: |
|
458 | else: | |
340 | displayname = "'%s'" % name |
|
459 | displayname = "'%s'" % name | |
341 | ui.status(_("previous locations of %s:\n") % displayname) |
|
460 | ui.status(_("previous locations of %s:\n") % displayname) | |
342 |
|
461 | |||
343 | limit = cmdutil.loglimit(opts) |
|
462 | limit = cmdutil.loglimit(opts) | |
344 | entry = None |
|
463 | entry = None | |
345 | for count, entry in enumerate(repo.journal.filtered(name=name)): |
|
464 | for count, entry in enumerate(repo.journal.filtered(name=name)): | |
346 | if count == limit: |
|
465 | if count == limit: | |
347 | break |
|
466 | break | |
348 | newhashesstr = ','.join([node.short(hash) for hash in entry.newhashes]) |
|
467 | newhashesstr = ','.join([node.short(hash) for hash in entry.newhashes]) | |
349 | oldhashesstr = ','.join([node.short(hash) for hash in entry.oldhashes]) |
|
468 | oldhashesstr = ','.join([node.short(hash) for hash in entry.oldhashes]) | |
350 |
|
469 | |||
351 | fm.startitem() |
|
470 | fm.startitem() | |
352 | fm.condwrite(ui.verbose, 'oldhashes', '%s -> ', oldhashesstr) |
|
471 | fm.condwrite(ui.verbose, 'oldhashes', '%s -> ', oldhashesstr) | |
353 | fm.write('newhashes', '%s', newhashesstr) |
|
472 | fm.write('newhashes', '%s', newhashesstr) | |
354 | fm.condwrite(ui.verbose, 'user', ' %-8s', entry.user) |
|
473 | fm.condwrite(ui.verbose, 'user', ' %-8s', entry.user) | |
355 | fm.condwrite(opts.get('all'), 'name', ' %-8s', entry.name) |
|
474 | fm.condwrite(opts.get('all'), 'name', ' %-8s', entry.name) | |
356 |
|
475 | |||
357 | timestring = util.datestr(entry.timestamp, '%Y-%m-%d %H:%M %1%2') |
|
476 | timestring = util.datestr(entry.timestamp, '%Y-%m-%d %H:%M %1%2') | |
358 | fm.condwrite(ui.verbose, 'date', ' %s', timestring) |
|
477 | fm.condwrite(ui.verbose, 'date', ' %s', timestring) | |
359 | fm.write('command', ' %s\n', entry.command) |
|
478 | fm.write('command', ' %s\n', entry.command) | |
360 |
|
479 | |||
361 | if opts.get("commits"): |
|
480 | if opts.get("commits"): | |
362 | displayer = cmdutil.show_changeset(ui, repo, opts, buffered=False) |
|
481 | displayer = cmdutil.show_changeset(ui, repo, opts, buffered=False) | |
363 | for hash in entry.newhashes: |
|
482 | for hash in entry.newhashes: | |
364 | try: |
|
483 | try: | |
365 | ctx = repo[hash] |
|
484 | ctx = repo[hash] | |
366 | displayer.show(ctx) |
|
485 | displayer.show(ctx) | |
367 | except error.RepoLookupError as e: |
|
486 | except error.RepoLookupError as e: | |
368 | fm.write('repolookuperror', "%s\n\n", str(e)) |
|
487 | fm.write('repolookuperror', "%s\n\n", str(e)) | |
369 | displayer.close() |
|
488 | displayer.close() | |
370 |
|
489 | |||
371 | fm.end() |
|
490 | fm.end() | |
372 |
|
491 | |||
373 | if entry is None: |
|
492 | if entry is None: | |
374 | ui.status(_("no recorded locations\n")) |
|
493 | ui.status(_("no recorded locations\n")) |
General Comments 0
You need to be logged in to leave comments.
Login now