Show More
@@ -1,310 +1,298 | |||||
1 | # Mercurial bookmark support code |
|
1 | # Mercurial bookmark support code | |
2 | # |
|
2 | # | |
3 | # Copyright 2008 David Soria Parra <dsp@php.net> |
|
3 | # Copyright 2008 David Soria Parra <dsp@php.net> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from mercurial.i18n import _ |
|
8 | from mercurial.i18n import _ | |
9 | from mercurial.node import hex |
|
9 | from mercurial.node import hex | |
10 | from mercurial import encoding, error, util, obsolete |
|
10 | from mercurial import encoding, error, util, obsolete | |
11 | import errno, os |
|
11 | import errno, os | |
12 |
|
12 | |||
13 | class bmstore(dict): |
|
13 | class bmstore(dict): | |
14 | """Storage for bookmarks. |
|
14 | """Storage for bookmarks. | |
15 |
|
15 | |||
16 | This object should do all bookmark reads and writes, so that it's |
|
16 | This object should do all bookmark reads and writes, so that it's | |
17 | fairly simple to replace the storage underlying bookmarks without |
|
17 | fairly simple to replace the storage underlying bookmarks without | |
18 | having to clone the logic surrounding bookmarks. |
|
18 | having to clone the logic surrounding bookmarks. | |
19 |
|
19 | |||
20 | This particular bmstore implementation stores bookmarks as |
|
20 | This particular bmstore implementation stores bookmarks as | |
21 | {hash}\s{name}\n (the same format as localtags) in |
|
21 | {hash}\s{name}\n (the same format as localtags) in | |
22 | .hg/bookmarks. The mapping is stored as {name: nodeid}. |
|
22 | .hg/bookmarks. The mapping is stored as {name: nodeid}. | |
23 |
|
23 | |||
24 | This class does NOT handle the "current" bookmark state at this |
|
24 | This class does NOT handle the "current" bookmark state at this | |
25 | time. |
|
25 | time. | |
26 | """ |
|
26 | """ | |
27 |
|
27 | |||
28 | def __init__(self, repo): |
|
28 | def __init__(self, repo): | |
29 | dict.__init__(self) |
|
29 | dict.__init__(self) | |
30 | self._repo = repo |
|
30 | self._repo = repo | |
31 | try: |
|
31 | try: | |
32 | for line in repo.vfs('bookmarks'): |
|
32 | for line in repo.vfs('bookmarks'): | |
33 | line = line.strip() |
|
33 | line = line.strip() | |
34 | if not line: |
|
34 | if not line: | |
35 | continue |
|
35 | continue | |
36 | if ' ' not in line: |
|
36 | if ' ' not in line: | |
37 | repo.ui.warn(_('malformed line in .hg/bookmarks: %r\n') |
|
37 | repo.ui.warn(_('malformed line in .hg/bookmarks: %r\n') | |
38 | % line) |
|
38 | % line) | |
39 | continue |
|
39 | continue | |
40 | sha, refspec = line.split(' ', 1) |
|
40 | sha, refspec = line.split(' ', 1) | |
41 | refspec = encoding.tolocal(refspec) |
|
41 | refspec = encoding.tolocal(refspec) | |
42 | try: |
|
42 | try: | |
43 | self[refspec] = repo.changelog.lookup(sha) |
|
43 | self[refspec] = repo.changelog.lookup(sha) | |
44 | except LookupError: |
|
44 | except LookupError: | |
45 | pass |
|
45 | pass | |
46 | except IOError, inst: |
|
46 | except IOError, inst: | |
47 | if inst.errno != errno.ENOENT: |
|
47 | if inst.errno != errno.ENOENT: | |
48 | raise |
|
48 | raise | |
49 |
|
49 | |||
50 | def write(self): |
|
50 | def write(self): | |
51 | '''Write bookmarks |
|
51 | '''Write bookmarks | |
52 |
|
52 | |||
53 | Write the given bookmark => hash dictionary to the .hg/bookmarks file |
|
53 | Write the given bookmark => hash dictionary to the .hg/bookmarks file | |
54 | in a format equal to those of localtags. |
|
54 | in a format equal to those of localtags. | |
55 |
|
55 | |||
56 | We also store a backup of the previous state in undo.bookmarks that |
|
56 | We also store a backup of the previous state in undo.bookmarks that | |
57 | can be copied back on rollback. |
|
57 | can be copied back on rollback. | |
58 | ''' |
|
58 | ''' | |
59 | repo = self._repo |
|
59 | repo = self._repo | |
60 | if repo._bookmarkcurrent not in self: |
|
60 | if repo._bookmarkcurrent not in self: | |
61 | setcurrent(repo, None) |
|
61 | setcurrent(repo, None) | |
62 |
|
62 | |||
63 | wlock = repo.wlock() |
|
63 | wlock = repo.wlock() | |
64 | try: |
|
64 | try: | |
65 |
|
65 | |||
66 | file = repo.vfs('bookmarks', 'w', atomictemp=True) |
|
66 | file = repo.vfs('bookmarks', 'w', atomictemp=True) | |
67 | for name, node in self.iteritems(): |
|
67 | for name, node in self.iteritems(): | |
68 | file.write("%s %s\n" % (hex(node), encoding.fromlocal(name))) |
|
68 | file.write("%s %s\n" % (hex(node), encoding.fromlocal(name))) | |
69 | file.close() |
|
69 | file.close() | |
70 |
|
70 | |||
71 | # touch 00changelog.i so hgweb reloads bookmarks (no lock needed) |
|
71 | # touch 00changelog.i so hgweb reloads bookmarks (no lock needed) | |
72 | try: |
|
72 | try: | |
73 | os.utime(repo.sjoin('00changelog.i'), None) |
|
73 | os.utime(repo.sjoin('00changelog.i'), None) | |
74 | except OSError: |
|
74 | except OSError: | |
75 | pass |
|
75 | pass | |
76 |
|
76 | |||
77 | finally: |
|
77 | finally: | |
78 | wlock.release() |
|
78 | wlock.release() | |
79 |
|
79 | |||
80 | def readcurrent(repo): |
|
80 | def readcurrent(repo): | |
81 | '''Get the current bookmark |
|
81 | '''Get the current bookmark | |
82 |
|
82 | |||
83 | If we use gittish branches we have a current bookmark that |
|
83 | If we use gittish branches we have a current bookmark that | |
84 | we are on. This function returns the name of the bookmark. It |
|
84 | we are on. This function returns the name of the bookmark. It | |
85 | is stored in .hg/bookmarks.current |
|
85 | is stored in .hg/bookmarks.current | |
86 | ''' |
|
86 | ''' | |
87 | mark = None |
|
87 | mark = None | |
88 | try: |
|
88 | try: | |
89 | file = repo.opener('bookmarks.current') |
|
89 | file = repo.opener('bookmarks.current') | |
90 | except IOError, inst: |
|
90 | except IOError, inst: | |
91 | if inst.errno != errno.ENOENT: |
|
91 | if inst.errno != errno.ENOENT: | |
92 | raise |
|
92 | raise | |
93 | return None |
|
93 | return None | |
94 | try: |
|
94 | try: | |
95 | # No readline() in osutil.posixfile, reading everything is cheap |
|
95 | # No readline() in osutil.posixfile, reading everything is cheap | |
96 | mark = encoding.tolocal((file.readlines() or [''])[0]) |
|
96 | mark = encoding.tolocal((file.readlines() or [''])[0]) | |
97 | if mark == '' or mark not in repo._bookmarks: |
|
97 | if mark == '' or mark not in repo._bookmarks: | |
98 | mark = None |
|
98 | mark = None | |
99 | finally: |
|
99 | finally: | |
100 | file.close() |
|
100 | file.close() | |
101 | return mark |
|
101 | return mark | |
102 |
|
102 | |||
103 | def setcurrent(repo, mark): |
|
103 | def setcurrent(repo, mark): | |
104 | '''Set the name of the bookmark that we are currently on |
|
104 | '''Set the name of the bookmark that we are currently on | |
105 |
|
105 | |||
106 | Set the name of the bookmark that we are on (hg update <bookmark>). |
|
106 | Set the name of the bookmark that we are on (hg update <bookmark>). | |
107 | The name is recorded in .hg/bookmarks.current |
|
107 | The name is recorded in .hg/bookmarks.current | |
108 | ''' |
|
108 | ''' | |
109 | current = repo._bookmarkcurrent |
|
109 | current = repo._bookmarkcurrent | |
110 | if current == mark: |
|
110 | if current == mark: | |
111 | return |
|
111 | return | |
112 |
|
112 | |||
113 | if mark not in repo._bookmarks: |
|
113 | if mark not in repo._bookmarks: | |
114 | mark = '' |
|
114 | mark = '' | |
115 |
|
115 | |||
116 | wlock = repo.wlock() |
|
116 | wlock = repo.wlock() | |
117 | try: |
|
117 | try: | |
118 | file = repo.opener('bookmarks.current', 'w', atomictemp=True) |
|
118 | file = repo.opener('bookmarks.current', 'w', atomictemp=True) | |
119 | file.write(encoding.fromlocal(mark)) |
|
119 | file.write(encoding.fromlocal(mark)) | |
120 | file.close() |
|
120 | file.close() | |
121 | finally: |
|
121 | finally: | |
122 | wlock.release() |
|
122 | wlock.release() | |
123 | repo._bookmarkcurrent = mark |
|
123 | repo._bookmarkcurrent = mark | |
124 |
|
124 | |||
125 | def unsetcurrent(repo): |
|
125 | def unsetcurrent(repo): | |
126 | wlock = repo.wlock() |
|
126 | wlock = repo.wlock() | |
127 | try: |
|
127 | try: | |
128 | try: |
|
128 | try: | |
129 | util.unlink(repo.join('bookmarks.current')) |
|
129 | util.unlink(repo.join('bookmarks.current')) | |
130 | repo._bookmarkcurrent = None |
|
130 | repo._bookmarkcurrent = None | |
131 | except OSError, inst: |
|
131 | except OSError, inst: | |
132 | if inst.errno != errno.ENOENT: |
|
132 | if inst.errno != errno.ENOENT: | |
133 | raise |
|
133 | raise | |
134 | finally: |
|
134 | finally: | |
135 | wlock.release() |
|
135 | wlock.release() | |
136 |
|
136 | |||
137 | def iscurrent(repo, mark=None, parents=None): |
|
137 | def iscurrent(repo, mark=None, parents=None): | |
138 | '''Tell whether the current bookmark is also active |
|
138 | '''Tell whether the current bookmark is also active | |
139 |
|
139 | |||
140 | I.e., the bookmark listed in .hg/bookmarks.current also points to a |
|
140 | I.e., the bookmark listed in .hg/bookmarks.current also points to a | |
141 | parent of the working directory. |
|
141 | parent of the working directory. | |
142 | ''' |
|
142 | ''' | |
143 | if not mark: |
|
143 | if not mark: | |
144 | mark = repo._bookmarkcurrent |
|
144 | mark = repo._bookmarkcurrent | |
145 | if not parents: |
|
145 | if not parents: | |
146 | parents = [p.node() for p in repo[None].parents()] |
|
146 | parents = [p.node() for p in repo[None].parents()] | |
147 | marks = repo._bookmarks |
|
147 | marks = repo._bookmarks | |
148 | return (mark in marks and marks[mark] in parents) |
|
148 | return (mark in marks and marks[mark] in parents) | |
149 |
|
149 | |||
150 | def updatecurrentbookmark(repo, oldnode, curbranch): |
|
150 | def updatecurrentbookmark(repo, oldnode, curbranch): | |
151 | try: |
|
151 | try: | |
152 | return update(repo, oldnode, repo.branchtip(curbranch)) |
|
152 | return update(repo, oldnode, repo.branchtip(curbranch)) | |
153 | except error.RepoLookupError: |
|
153 | except error.RepoLookupError: | |
154 | if curbranch == "default": # no default branch! |
|
154 | if curbranch == "default": # no default branch! | |
155 | return update(repo, oldnode, repo.lookup("tip")) |
|
155 | return update(repo, oldnode, repo.lookup("tip")) | |
156 | else: |
|
156 | else: | |
157 | raise util.Abort(_("branch %s not found") % curbranch) |
|
157 | raise util.Abort(_("branch %s not found") % curbranch) | |
158 |
|
158 | |||
159 | def deletedivergent(repo, deletefrom, bm): |
|
159 | def deletedivergent(repo, deletefrom, bm): | |
160 | '''Delete divergent versions of bm on nodes in deletefrom. |
|
160 | '''Delete divergent versions of bm on nodes in deletefrom. | |
161 |
|
161 | |||
162 | Return True if at least one bookmark was deleted, False otherwise.''' |
|
162 | Return True if at least one bookmark was deleted, False otherwise.''' | |
163 | deleted = False |
|
163 | deleted = False | |
164 | marks = repo._bookmarks |
|
164 | marks = repo._bookmarks | |
165 | divergent = [b for b in marks if b.split('@', 1)[0] == bm.split('@', 1)[0]] |
|
165 | divergent = [b for b in marks if b.split('@', 1)[0] == bm.split('@', 1)[0]] | |
166 | for mark in divergent: |
|
166 | for mark in divergent: | |
167 | if mark and marks[mark] in deletefrom: |
|
167 | if mark and marks[mark] in deletefrom: | |
168 | if mark != bm: |
|
168 | if mark != bm: | |
169 | del marks[mark] |
|
169 | del marks[mark] | |
170 | deleted = True |
|
170 | deleted = True | |
171 | return deleted |
|
171 | return deleted | |
172 |
|
172 | |||
173 | def update(repo, parents, node): |
|
173 | def update(repo, parents, node): | |
174 | marks = repo._bookmarks |
|
174 | marks = repo._bookmarks | |
175 | update = False |
|
175 | update = False | |
176 | cur = repo._bookmarkcurrent |
|
176 | cur = repo._bookmarkcurrent | |
177 | if not cur: |
|
177 | if not cur: | |
178 | return False |
|
178 | return False | |
179 |
|
179 | |||
180 | if marks[cur] in parents: |
|
180 | if marks[cur] in parents: | |
181 | old = repo[marks[cur]] |
|
181 | old = repo[marks[cur]] | |
182 | new = repo[node] |
|
182 | new = repo[node] | |
183 | if old.descendant(new): |
|
183 | if old.descendant(new): | |
184 | marks[cur] = new.node() |
|
184 | marks[cur] = new.node() | |
185 | update = True |
|
185 | update = True | |
186 |
|
186 | |||
187 | if deletedivergent(repo, parents, cur): |
|
187 | if deletedivergent(repo, parents, cur): | |
188 | update = True |
|
188 | update = True | |
189 |
|
189 | |||
190 | if update: |
|
190 | if update: | |
191 | marks.write() |
|
191 | marks.write() | |
192 | return update |
|
192 | return update | |
193 |
|
193 | |||
194 | def listbookmarks(repo): |
|
194 | def listbookmarks(repo): | |
195 | # We may try to list bookmarks on a repo type that does not |
|
195 | # We may try to list bookmarks on a repo type that does not | |
196 | # support it (e.g., statichttprepository). |
|
196 | # support it (e.g., statichttprepository). | |
197 | marks = getattr(repo, '_bookmarks', {}) |
|
197 | marks = getattr(repo, '_bookmarks', {}) | |
198 |
|
198 | |||
199 | d = {} |
|
199 | d = {} | |
200 | hasnode = repo.changelog.hasnode |
|
200 | hasnode = repo.changelog.hasnode | |
201 | for k, v in marks.iteritems(): |
|
201 | for k, v in marks.iteritems(): | |
202 | # don't expose local divergent bookmarks |
|
202 | # don't expose local divergent bookmarks | |
203 | if hasnode(v) and ('@' not in k or k.endswith('@')): |
|
203 | if hasnode(v) and ('@' not in k or k.endswith('@')): | |
204 | d[k] = hex(v) |
|
204 | d[k] = hex(v) | |
205 | return d |
|
205 | return d | |
206 |
|
206 | |||
207 | def pushbookmark(repo, key, old, new): |
|
207 | def pushbookmark(repo, key, old, new): | |
208 | w = repo.wlock() |
|
208 | w = repo.wlock() | |
209 | try: |
|
209 | try: | |
210 | marks = repo._bookmarks |
|
210 | marks = repo._bookmarks | |
211 | if hex(marks.get(key, '')) != old: |
|
211 | if hex(marks.get(key, '')) != old: | |
212 | return False |
|
212 | return False | |
213 | if new == '': |
|
213 | if new == '': | |
214 | del marks[key] |
|
214 | del marks[key] | |
215 | else: |
|
215 | else: | |
216 | if new not in repo: |
|
216 | if new not in repo: | |
217 | return False |
|
217 | return False | |
218 | marks[key] = repo[new].node() |
|
218 | marks[key] = repo[new].node() | |
219 | marks.write() |
|
219 | marks.write() | |
220 | return True |
|
220 | return True | |
221 | finally: |
|
221 | finally: | |
222 | w.release() |
|
222 | w.release() | |
223 |
|
223 | |||
224 | def updatefromremote(ui, repo, remotemarks, path): |
|
224 | def updatefromremote(ui, repo, remotemarks, path): | |
225 | ui.debug("checking for updated bookmarks\n") |
|
225 | ui.debug("checking for updated bookmarks\n") | |
226 | changed = False |
|
226 | changed = False | |
227 | localmarks = repo._bookmarks |
|
227 | localmarks = repo._bookmarks | |
228 | for k in sorted(remotemarks): |
|
228 | for k in sorted(remotemarks): | |
229 | if k in localmarks: |
|
229 | if k in localmarks: | |
230 | nr, nl = remotemarks[k], localmarks[k] |
|
230 | nr, nl = remotemarks[k], localmarks[k] | |
231 | if nr in repo: |
|
231 | if nr in repo: | |
232 | cr = repo[nr] |
|
232 | cr = repo[nr] | |
233 | cl = repo[nl] |
|
233 | cl = repo[nl] | |
234 | if cl.rev() >= cr.rev(): |
|
234 | if cl.rev() >= cr.rev(): | |
235 | continue |
|
235 | continue | |
236 | if validdest(repo, cl, cr): |
|
236 | if validdest(repo, cl, cr): | |
237 | localmarks[k] = cr.node() |
|
237 | localmarks[k] = cr.node() | |
238 | changed = True |
|
238 | changed = True | |
239 | ui.status(_("updating bookmark %s\n") % k) |
|
239 | ui.status(_("updating bookmark %s\n") % k) | |
240 | else: |
|
240 | else: | |
241 | if k == '@': |
|
241 | if k == '@': | |
242 | kd = '' |
|
242 | kd = '' | |
243 | else: |
|
243 | else: | |
244 | kd = k |
|
244 | kd = k | |
245 | # find a unique @ suffix |
|
245 | # find a unique @ suffix | |
246 | for x in range(1, 100): |
|
246 | for x in range(1, 100): | |
247 | n = '%s@%d' % (kd, x) |
|
247 | n = '%s@%d' % (kd, x) | |
248 | if n not in localmarks: |
|
248 | if n not in localmarks: | |
249 | break |
|
249 | break | |
250 | # try to use an @pathalias suffix |
|
250 | # try to use an @pathalias suffix | |
251 | # if an @pathalias already exists, we overwrite (update) it |
|
251 | # if an @pathalias already exists, we overwrite (update) it | |
252 | for p, u in ui.configitems("paths"): |
|
252 | for p, u in ui.configitems("paths"): | |
253 | if path == u: |
|
253 | if path == u: | |
254 | n = '%s@%s' % (kd, p) |
|
254 | n = '%s@%s' % (kd, p) | |
255 |
|
255 | |||
256 | localmarks[n] = cr.node() |
|
256 | localmarks[n] = cr.node() | |
257 | changed = True |
|
257 | changed = True | |
258 | ui.warn(_("divergent bookmark %s stored as %s\n") % (k, n)) |
|
258 | ui.warn(_("divergent bookmark %s stored as %s\n") % (k, n)) | |
259 | elif remotemarks[k] in repo: |
|
259 | elif remotemarks[k] in repo: | |
260 | # add remote bookmarks for changes we already have |
|
260 | # add remote bookmarks for changes we already have | |
261 | localmarks[k] = repo[remotemarks[k]].node() |
|
261 | localmarks[k] = repo[remotemarks[k]].node() | |
262 | changed = True |
|
262 | changed = True | |
263 | ui.status(_("adding remote bookmark %s\n") % k) |
|
263 | ui.status(_("adding remote bookmark %s\n") % k) | |
264 |
|
264 | |||
265 | if changed: |
|
265 | if changed: | |
266 | localmarks.write() |
|
266 | localmarks.write() | |
267 |
|
267 | |||
268 | def diff(ui, dst, src): |
|
268 | def diff(ui, dst, src): | |
269 | ui.status(_("searching for changed bookmarks\n")) |
|
269 | ui.status(_("searching for changed bookmarks\n")) | |
270 |
|
270 | |||
271 | smarks = src.listkeys('bookmarks') |
|
271 | smarks = src.listkeys('bookmarks') | |
272 | dmarks = dst.listkeys('bookmarks') |
|
272 | dmarks = dst.listkeys('bookmarks') | |
273 |
|
273 | |||
274 | diff = sorted(set(smarks) - set(dmarks)) |
|
274 | diff = sorted(set(smarks) - set(dmarks)) | |
275 | for k in diff: |
|
275 | for k in diff: | |
276 | mark = ui.debugflag and smarks[k] or smarks[k][:12] |
|
276 | mark = ui.debugflag and smarks[k] or smarks[k][:12] | |
277 | ui.write(" %-25s %s\n" % (k, mark)) |
|
277 | ui.write(" %-25s %s\n" % (k, mark)) | |
278 |
|
278 | |||
279 | if len(diff) <= 0: |
|
279 | if len(diff) <= 0: | |
280 | ui.status(_("no changed bookmarks found\n")) |
|
280 | ui.status(_("no changed bookmarks found\n")) | |
281 | return 1 |
|
281 | return 1 | |
282 | return 0 |
|
282 | return 0 | |
283 |
|
283 | |||
284 | def validdest(repo, old, new): |
|
284 | def validdest(repo, old, new): | |
285 | """Is the new bookmark destination a valid update from the old one""" |
|
285 | """Is the new bookmark destination a valid update from the old one""" | |
286 | repo = repo.unfiltered() |
|
286 | repo = repo.unfiltered() | |
287 | if old == new: |
|
287 | if old == new: | |
288 | # Old == new -> nothing to update. |
|
288 | # Old == new -> nothing to update. | |
289 | return False |
|
289 | return False | |
290 | elif not old: |
|
290 | elif not old: | |
291 | # old is nullrev, anything is valid. |
|
291 | # old is nullrev, anything is valid. | |
292 | # (new != nullrev has been excluded by the previous check) |
|
292 | # (new != nullrev has been excluded by the previous check) | |
293 | return True |
|
293 | return True | |
294 | elif repo.obsstore: |
|
294 | elif repo.obsstore: | |
295 | # We only need this complicated logic if there is obsolescence |
|
295 | return new.node() in obsolete.foreground(repo, [old.node()]) | |
296 | # XXX will probably deserve an optimised revset. |
|
|||
297 | nm = repo.changelog.nodemap |
|
|||
298 | validdests = set([old]) |
|
|||
299 | plen = -1 |
|
|||
300 | # compute the whole set of successors or descendants |
|
|||
301 | while len(validdests) != plen: |
|
|||
302 | plen = len(validdests) |
|
|||
303 | succs = set(c.node() for c in validdests) |
|
|||
304 | mutable = [c.node() for c in validdests if c.mutable()] |
|
|||
305 | succs.update(obsolete.allsuccessors(repo.obsstore, mutable)) |
|
|||
306 | known = (n for n in succs if n in nm) |
|
|||
307 | validdests = set(repo.set('%ln::', known)) |
|
|||
308 | return new in validdests |
|
|||
309 | else: |
|
296 | else: | |
|
297 | # still an independant clause as it is lazyer (and therefore faster) | |||
310 | return old.descendant(new) |
|
298 | return old.descendant(new) |
@@ -1,755 +1,782 | |||||
1 | # obsolete.py - obsolete markers handling |
|
1 | # obsolete.py - obsolete markers handling | |
2 | # |
|
2 | # | |
3 | # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org> |
|
3 | # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org> | |
4 | # Logilab SA <contact@logilab.fr> |
|
4 | # Logilab SA <contact@logilab.fr> | |
5 | # |
|
5 | # | |
6 | # This software may be used and distributed according to the terms of the |
|
6 | # This software may be used and distributed according to the terms of the | |
7 | # GNU General Public License version 2 or any later version. |
|
7 | # GNU General Public License version 2 or any later version. | |
8 |
|
8 | |||
9 | """Obsolete markers handling |
|
9 | """Obsolete markers handling | |
10 |
|
10 | |||
11 | An obsolete marker maps an old changeset to a list of new |
|
11 | An obsolete marker maps an old changeset to a list of new | |
12 | changesets. If the list of new changesets is empty, the old changeset |
|
12 | changesets. If the list of new changesets is empty, the old changeset | |
13 | is said to be "killed". Otherwise, the old changeset is being |
|
13 | is said to be "killed". Otherwise, the old changeset is being | |
14 | "replaced" by the new changesets. |
|
14 | "replaced" by the new changesets. | |
15 |
|
15 | |||
16 | Obsolete markers can be used to record and distribute changeset graph |
|
16 | Obsolete markers can be used to record and distribute changeset graph | |
17 | transformations performed by history rewriting operations, and help |
|
17 | transformations performed by history rewriting operations, and help | |
18 | building new tools to reconciliate conflicting rewriting actions. To |
|
18 | building new tools to reconciliate conflicting rewriting actions. To | |
19 | facilitate conflicts resolution, markers include various annotations |
|
19 | facilitate conflicts resolution, markers include various annotations | |
20 | besides old and news changeset identifiers, such as creation date or |
|
20 | besides old and news changeset identifiers, such as creation date or | |
21 | author name. |
|
21 | author name. | |
22 |
|
22 | |||
23 | The old obsoleted changeset is called "precursor" and possible replacements are |
|
23 | The old obsoleted changeset is called "precursor" and possible replacements are | |
24 | called "successors". Markers that used changeset X as a precursors are called |
|
24 | called "successors". Markers that used changeset X as a precursors are called | |
25 | "successor markers of X" because they hold information about the successors of |
|
25 | "successor markers of X" because they hold information about the successors of | |
26 | X. Markers that use changeset Y as a successors are call "precursor markers of |
|
26 | X. Markers that use changeset Y as a successors are call "precursor markers of | |
27 | Y" because they hold information about the precursors of Y. |
|
27 | Y" because they hold information about the precursors of Y. | |
28 |
|
28 | |||
29 | Examples: |
|
29 | Examples: | |
30 |
|
30 | |||
31 | - When changeset A is replacement by a changeset A', one marker is stored: |
|
31 | - When changeset A is replacement by a changeset A', one marker is stored: | |
32 |
|
32 | |||
33 | (A, (A')) |
|
33 | (A, (A')) | |
34 |
|
34 | |||
35 | - When changesets A and B are folded into a new changeset C two markers are |
|
35 | - When changesets A and B are folded into a new changeset C two markers are | |
36 | stored: |
|
36 | stored: | |
37 |
|
37 | |||
38 | (A, (C,)) and (B, (C,)) |
|
38 | (A, (C,)) and (B, (C,)) | |
39 |
|
39 | |||
40 | - When changeset A is simply "pruned" from the graph, a marker in create: |
|
40 | - When changeset A is simply "pruned" from the graph, a marker in create: | |
41 |
|
41 | |||
42 | (A, ()) |
|
42 | (A, ()) | |
43 |
|
43 | |||
44 | - When changeset A is split into B and C, a single marker are used: |
|
44 | - When changeset A is split into B and C, a single marker are used: | |
45 |
|
45 | |||
46 | (A, (C, C)) |
|
46 | (A, (C, C)) | |
47 |
|
47 | |||
48 | We use a single marker to distinct the "split" case from the "divergence" |
|
48 | We use a single marker to distinct the "split" case from the "divergence" | |
49 | case. If two independents operation rewrite the same changeset A in to A' and |
|
49 | case. If two independents operation rewrite the same changeset A in to A' and | |
50 | A'' when have an error case: divergent rewriting. We can detect it because |
|
50 | A'' when have an error case: divergent rewriting. We can detect it because | |
51 | two markers will be created independently: |
|
51 | two markers will be created independently: | |
52 |
|
52 | |||
53 | (A, (B,)) and (A, (C,)) |
|
53 | (A, (B,)) and (A, (C,)) | |
54 |
|
54 | |||
55 | Format |
|
55 | Format | |
56 | ------ |
|
56 | ------ | |
57 |
|
57 | |||
58 | Markers are stored in an append-only file stored in |
|
58 | Markers are stored in an append-only file stored in | |
59 | '.hg/store/obsstore'. |
|
59 | '.hg/store/obsstore'. | |
60 |
|
60 | |||
61 | The file starts with a version header: |
|
61 | The file starts with a version header: | |
62 |
|
62 | |||
63 | - 1 unsigned byte: version number, starting at zero. |
|
63 | - 1 unsigned byte: version number, starting at zero. | |
64 |
|
64 | |||
65 |
|
65 | |||
66 | The header is followed by the markers. Each marker is made of: |
|
66 | The header is followed by the markers. Each marker is made of: | |
67 |
|
67 | |||
68 | - 1 unsigned byte: number of new changesets "R", could be zero. |
|
68 | - 1 unsigned byte: number of new changesets "R", could be zero. | |
69 |
|
69 | |||
70 | - 1 unsigned 32-bits integer: metadata size "M" in bytes. |
|
70 | - 1 unsigned 32-bits integer: metadata size "M" in bytes. | |
71 |
|
71 | |||
72 | - 1 byte: a bit field. It is reserved for flags used in obsolete |
|
72 | - 1 byte: a bit field. It is reserved for flags used in obsolete | |
73 | markers common operations, to avoid repeated decoding of metadata |
|
73 | markers common operations, to avoid repeated decoding of metadata | |
74 | entries. |
|
74 | entries. | |
75 |
|
75 | |||
76 | - 20 bytes: obsoleted changeset identifier. |
|
76 | - 20 bytes: obsoleted changeset identifier. | |
77 |
|
77 | |||
78 | - N*20 bytes: new changesets identifiers. |
|
78 | - N*20 bytes: new changesets identifiers. | |
79 |
|
79 | |||
80 | - M bytes: metadata as a sequence of nul-terminated strings. Each |
|
80 | - M bytes: metadata as a sequence of nul-terminated strings. Each | |
81 | string contains a key and a value, separated by a color ':', without |
|
81 | string contains a key and a value, separated by a color ':', without | |
82 | additional encoding. Keys cannot contain '\0' or ':' and values |
|
82 | additional encoding. Keys cannot contain '\0' or ':' and values | |
83 | cannot contain '\0'. |
|
83 | cannot contain '\0'. | |
84 | """ |
|
84 | """ | |
85 | import struct |
|
85 | import struct | |
86 | import util, base85, node |
|
86 | import util, base85, node | |
87 | from i18n import _ |
|
87 | from i18n import _ | |
88 |
|
88 | |||
89 | _pack = struct.pack |
|
89 | _pack = struct.pack | |
90 | _unpack = struct.unpack |
|
90 | _unpack = struct.unpack | |
91 |
|
91 | |||
92 | _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5 |
|
92 | _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5 | |
93 |
|
93 | |||
94 | # the obsolete feature is not mature enough to be enabled by default. |
|
94 | # the obsolete feature is not mature enough to be enabled by default. | |
95 | # you have to rely on third party extension extension to enable this. |
|
95 | # you have to rely on third party extension extension to enable this. | |
96 | _enabled = False |
|
96 | _enabled = False | |
97 |
|
97 | |||
98 | # data used for parsing and writing |
|
98 | # data used for parsing and writing | |
99 | _fmversion = 0 |
|
99 | _fmversion = 0 | |
100 | _fmfixed = '>BIB20s' |
|
100 | _fmfixed = '>BIB20s' | |
101 | _fmnode = '20s' |
|
101 | _fmnode = '20s' | |
102 | _fmfsize = struct.calcsize(_fmfixed) |
|
102 | _fmfsize = struct.calcsize(_fmfixed) | |
103 | _fnodesize = struct.calcsize(_fmnode) |
|
103 | _fnodesize = struct.calcsize(_fmnode) | |
104 |
|
104 | |||
105 | ### obsolescence marker flag |
|
105 | ### obsolescence marker flag | |
106 |
|
106 | |||
107 | ## bumpedfix flag |
|
107 | ## bumpedfix flag | |
108 | # |
|
108 | # | |
109 | # When a changeset A' succeed to a changeset A which became public, we call A' |
|
109 | # When a changeset A' succeed to a changeset A which became public, we call A' | |
110 | # "bumped" because it's a successors of a public changesets |
|
110 | # "bumped" because it's a successors of a public changesets | |
111 | # |
|
111 | # | |
112 | # o A' (bumped) |
|
112 | # o A' (bumped) | |
113 | # |`: |
|
113 | # |`: | |
114 | # | o A |
|
114 | # | o A | |
115 | # |/ |
|
115 | # |/ | |
116 | # o Z |
|
116 | # o Z | |
117 | # |
|
117 | # | |
118 | # The way to solve this situation is to create a new changeset Ad as children |
|
118 | # The way to solve this situation is to create a new changeset Ad as children | |
119 | # of A. This changeset have the same content than A'. So the diff from A to A' |
|
119 | # of A. This changeset have the same content than A'. So the diff from A to A' | |
120 | # is the same than the diff from A to Ad. Ad is marked as a successors of A' |
|
120 | # is the same than the diff from A to Ad. Ad is marked as a successors of A' | |
121 | # |
|
121 | # | |
122 | # o Ad |
|
122 | # o Ad | |
123 | # |`: |
|
123 | # |`: | |
124 | # | x A' |
|
124 | # | x A' | |
125 | # |'| |
|
125 | # |'| | |
126 | # o | A |
|
126 | # o | A | |
127 | # |/ |
|
127 | # |/ | |
128 | # o Z |
|
128 | # o Z | |
129 | # |
|
129 | # | |
130 | # But by transitivity Ad is also a successors of A. To avoid having Ad marked |
|
130 | # But by transitivity Ad is also a successors of A. To avoid having Ad marked | |
131 | # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>. |
|
131 | # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>. | |
132 | # This flag mean that the successors express the changes between the public and |
|
132 | # This flag mean that the successors express the changes between the public and | |
133 | # bumped version and fix the situation, breaking the transitivity of |
|
133 | # bumped version and fix the situation, breaking the transitivity of | |
134 | # "bumped" here. |
|
134 | # "bumped" here. | |
135 | bumpedfix = 1 |
|
135 | bumpedfix = 1 | |
136 |
|
136 | |||
137 | def _readmarkers(data): |
|
137 | def _readmarkers(data): | |
138 | """Read and enumerate markers from raw data""" |
|
138 | """Read and enumerate markers from raw data""" | |
139 | off = 0 |
|
139 | off = 0 | |
140 | diskversion = _unpack('>B', data[off:off + 1])[0] |
|
140 | diskversion = _unpack('>B', data[off:off + 1])[0] | |
141 | off += 1 |
|
141 | off += 1 | |
142 | if diskversion != _fmversion: |
|
142 | if diskversion != _fmversion: | |
143 | raise util.Abort(_('parsing obsolete marker: unknown version %r') |
|
143 | raise util.Abort(_('parsing obsolete marker: unknown version %r') | |
144 | % diskversion) |
|
144 | % diskversion) | |
145 |
|
145 | |||
146 | # Loop on markers |
|
146 | # Loop on markers | |
147 | l = len(data) |
|
147 | l = len(data) | |
148 | while off + _fmfsize <= l: |
|
148 | while off + _fmfsize <= l: | |
149 | # read fixed part |
|
149 | # read fixed part | |
150 | cur = data[off:off + _fmfsize] |
|
150 | cur = data[off:off + _fmfsize] | |
151 | off += _fmfsize |
|
151 | off += _fmfsize | |
152 | nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur) |
|
152 | nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur) | |
153 | # read replacement |
|
153 | # read replacement | |
154 | sucs = () |
|
154 | sucs = () | |
155 | if nbsuc: |
|
155 | if nbsuc: | |
156 | s = (_fnodesize * nbsuc) |
|
156 | s = (_fnodesize * nbsuc) | |
157 | cur = data[off:off + s] |
|
157 | cur = data[off:off + s] | |
158 | sucs = _unpack(_fmnode * nbsuc, cur) |
|
158 | sucs = _unpack(_fmnode * nbsuc, cur) | |
159 | off += s |
|
159 | off += s | |
160 | # read metadata |
|
160 | # read metadata | |
161 | # (metadata will be decoded on demand) |
|
161 | # (metadata will be decoded on demand) | |
162 | metadata = data[off:off + mdsize] |
|
162 | metadata = data[off:off + mdsize] | |
163 | if len(metadata) != mdsize: |
|
163 | if len(metadata) != mdsize: | |
164 | raise util.Abort(_('parsing obsolete marker: metadata is too ' |
|
164 | raise util.Abort(_('parsing obsolete marker: metadata is too ' | |
165 | 'short, %d bytes expected, got %d') |
|
165 | 'short, %d bytes expected, got %d') | |
166 | % (mdsize, len(metadata))) |
|
166 | % (mdsize, len(metadata))) | |
167 | off += mdsize |
|
167 | off += mdsize | |
168 | yield (pre, sucs, flags, metadata) |
|
168 | yield (pre, sucs, flags, metadata) | |
169 |
|
169 | |||
170 | def encodemeta(meta): |
|
170 | def encodemeta(meta): | |
171 | """Return encoded metadata string to string mapping. |
|
171 | """Return encoded metadata string to string mapping. | |
172 |
|
172 | |||
173 | Assume no ':' in key and no '\0' in both key and value.""" |
|
173 | Assume no ':' in key and no '\0' in both key and value.""" | |
174 | for key, value in meta.iteritems(): |
|
174 | for key, value in meta.iteritems(): | |
175 | if ':' in key or '\0' in key: |
|
175 | if ':' in key or '\0' in key: | |
176 | raise ValueError("':' and '\0' are forbidden in metadata key'") |
|
176 | raise ValueError("':' and '\0' are forbidden in metadata key'") | |
177 | if '\0' in value: |
|
177 | if '\0' in value: | |
178 | raise ValueError("':' are forbidden in metadata value'") |
|
178 | raise ValueError("':' are forbidden in metadata value'") | |
179 | return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)]) |
|
179 | return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)]) | |
180 |
|
180 | |||
181 | def decodemeta(data): |
|
181 | def decodemeta(data): | |
182 | """Return string to string dictionary from encoded version.""" |
|
182 | """Return string to string dictionary from encoded version.""" | |
183 | d = {} |
|
183 | d = {} | |
184 | for l in data.split('\0'): |
|
184 | for l in data.split('\0'): | |
185 | if l: |
|
185 | if l: | |
186 | key, value = l.split(':') |
|
186 | key, value = l.split(':') | |
187 | d[key] = value |
|
187 | d[key] = value | |
188 | return d |
|
188 | return d | |
189 |
|
189 | |||
190 | class marker(object): |
|
190 | class marker(object): | |
191 | """Wrap obsolete marker raw data""" |
|
191 | """Wrap obsolete marker raw data""" | |
192 |
|
192 | |||
193 | def __init__(self, repo, data): |
|
193 | def __init__(self, repo, data): | |
194 | # the repo argument will be used to create changectx in later version |
|
194 | # the repo argument will be used to create changectx in later version | |
195 | self._repo = repo |
|
195 | self._repo = repo | |
196 | self._data = data |
|
196 | self._data = data | |
197 | self._decodedmeta = None |
|
197 | self._decodedmeta = None | |
198 |
|
198 | |||
199 | def precnode(self): |
|
199 | def precnode(self): | |
200 | """Precursor changeset node identifier""" |
|
200 | """Precursor changeset node identifier""" | |
201 | return self._data[0] |
|
201 | return self._data[0] | |
202 |
|
202 | |||
203 | def succnodes(self): |
|
203 | def succnodes(self): | |
204 | """List of successor changesets node identifiers""" |
|
204 | """List of successor changesets node identifiers""" | |
205 | return self._data[1] |
|
205 | return self._data[1] | |
206 |
|
206 | |||
207 | def metadata(self): |
|
207 | def metadata(self): | |
208 | """Decoded metadata dictionary""" |
|
208 | """Decoded metadata dictionary""" | |
209 | if self._decodedmeta is None: |
|
209 | if self._decodedmeta is None: | |
210 | self._decodedmeta = decodemeta(self._data[3]) |
|
210 | self._decodedmeta = decodemeta(self._data[3]) | |
211 | return self._decodedmeta |
|
211 | return self._decodedmeta | |
212 |
|
212 | |||
213 | def date(self): |
|
213 | def date(self): | |
214 | """Creation date as (unixtime, offset)""" |
|
214 | """Creation date as (unixtime, offset)""" | |
215 | parts = self.metadata()['date'].split(' ') |
|
215 | parts = self.metadata()['date'].split(' ') | |
216 | return (float(parts[0]), int(parts[1])) |
|
216 | return (float(parts[0]), int(parts[1])) | |
217 |
|
217 | |||
218 | class obsstore(object): |
|
218 | class obsstore(object): | |
219 | """Store obsolete markers |
|
219 | """Store obsolete markers | |
220 |
|
220 | |||
221 | Markers can be accessed with two mappings: |
|
221 | Markers can be accessed with two mappings: | |
222 | - precursors[x] -> set(markers on precursors edges of x) |
|
222 | - precursors[x] -> set(markers on precursors edges of x) | |
223 | - successors[x] -> set(markers on successors edges of x) |
|
223 | - successors[x] -> set(markers on successors edges of x) | |
224 | """ |
|
224 | """ | |
225 |
|
225 | |||
226 | def __init__(self, sopener): |
|
226 | def __init__(self, sopener): | |
227 | # caches for various obsolescence related cache |
|
227 | # caches for various obsolescence related cache | |
228 | self.caches = {} |
|
228 | self.caches = {} | |
229 | self._all = [] |
|
229 | self._all = [] | |
230 | # new markers to serialize |
|
230 | # new markers to serialize | |
231 | self.precursors = {} |
|
231 | self.precursors = {} | |
232 | self.successors = {} |
|
232 | self.successors = {} | |
233 | self.sopener = sopener |
|
233 | self.sopener = sopener | |
234 | data = sopener.tryread('obsstore') |
|
234 | data = sopener.tryread('obsstore') | |
235 | if data: |
|
235 | if data: | |
236 | self._load(_readmarkers(data)) |
|
236 | self._load(_readmarkers(data)) | |
237 |
|
237 | |||
238 | def __iter__(self): |
|
238 | def __iter__(self): | |
239 | return iter(self._all) |
|
239 | return iter(self._all) | |
240 |
|
240 | |||
241 | def __nonzero__(self): |
|
241 | def __nonzero__(self): | |
242 | return bool(self._all) |
|
242 | return bool(self._all) | |
243 |
|
243 | |||
244 | def create(self, transaction, prec, succs=(), flag=0, metadata=None): |
|
244 | def create(self, transaction, prec, succs=(), flag=0, metadata=None): | |
245 | """obsolete: add a new obsolete marker |
|
245 | """obsolete: add a new obsolete marker | |
246 |
|
246 | |||
247 | * ensuring it is hashable |
|
247 | * ensuring it is hashable | |
248 | * check mandatory metadata |
|
248 | * check mandatory metadata | |
249 | * encode metadata |
|
249 | * encode metadata | |
250 | """ |
|
250 | """ | |
251 | if metadata is None: |
|
251 | if metadata is None: | |
252 | metadata = {} |
|
252 | metadata = {} | |
253 | if 'date' not in metadata: |
|
253 | if 'date' not in metadata: | |
254 | metadata['date'] = "%d %d" % util.makedate() |
|
254 | metadata['date'] = "%d %d" % util.makedate() | |
255 | if len(prec) != 20: |
|
255 | if len(prec) != 20: | |
256 | raise ValueError(prec) |
|
256 | raise ValueError(prec) | |
257 | for succ in succs: |
|
257 | for succ in succs: | |
258 | if len(succ) != 20: |
|
258 | if len(succ) != 20: | |
259 | raise ValueError(succ) |
|
259 | raise ValueError(succ) | |
260 | marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata)) |
|
260 | marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata)) | |
261 | self.add(transaction, [marker]) |
|
261 | self.add(transaction, [marker]) | |
262 |
|
262 | |||
263 | def add(self, transaction, markers): |
|
263 | def add(self, transaction, markers): | |
264 | """Add new markers to the store |
|
264 | """Add new markers to the store | |
265 |
|
265 | |||
266 | Take care of filtering duplicate. |
|
266 | Take care of filtering duplicate. | |
267 | Return the number of new marker.""" |
|
267 | Return the number of new marker.""" | |
268 | if not _enabled: |
|
268 | if not _enabled: | |
269 | raise util.Abort('obsolete feature is not enabled on this repo') |
|
269 | raise util.Abort('obsolete feature is not enabled on this repo') | |
270 | new = [m for m in markers if m not in self._all] |
|
270 | new = [m for m in markers if m not in self._all] | |
271 | if new: |
|
271 | if new: | |
272 | f = self.sopener('obsstore', 'ab') |
|
272 | f = self.sopener('obsstore', 'ab') | |
273 | try: |
|
273 | try: | |
274 | # Whether the file's current position is at the begin or at |
|
274 | # Whether the file's current position is at the begin or at | |
275 | # the end after opening a file for appending is implementation |
|
275 | # the end after opening a file for appending is implementation | |
276 | # defined. So we must seek to the end before calling tell(), |
|
276 | # defined. So we must seek to the end before calling tell(), | |
277 | # or we may get a zero offset for non-zero sized files on |
|
277 | # or we may get a zero offset for non-zero sized files on | |
278 | # some platforms (issue3543). |
|
278 | # some platforms (issue3543). | |
279 | f.seek(0, _SEEK_END) |
|
279 | f.seek(0, _SEEK_END) | |
280 | offset = f.tell() |
|
280 | offset = f.tell() | |
281 | transaction.add('obsstore', offset) |
|
281 | transaction.add('obsstore', offset) | |
282 | # offset == 0: new file - add the version header |
|
282 | # offset == 0: new file - add the version header | |
283 | for bytes in _encodemarkers(new, offset == 0): |
|
283 | for bytes in _encodemarkers(new, offset == 0): | |
284 | f.write(bytes) |
|
284 | f.write(bytes) | |
285 | finally: |
|
285 | finally: | |
286 | # XXX: f.close() == filecache invalidation == obsstore rebuilt. |
|
286 | # XXX: f.close() == filecache invalidation == obsstore rebuilt. | |
287 | # call 'filecacheentry.refresh()' here |
|
287 | # call 'filecacheentry.refresh()' here | |
288 | f.close() |
|
288 | f.close() | |
289 | self._load(new) |
|
289 | self._load(new) | |
290 | # new marker *may* have changed several set. invalidate the cache. |
|
290 | # new marker *may* have changed several set. invalidate the cache. | |
291 | self.caches.clear() |
|
291 | self.caches.clear() | |
292 | return len(new) |
|
292 | return len(new) | |
293 |
|
293 | |||
294 | def mergemarkers(self, transaction, data): |
|
294 | def mergemarkers(self, transaction, data): | |
295 | markers = _readmarkers(data) |
|
295 | markers = _readmarkers(data) | |
296 | self.add(transaction, markers) |
|
296 | self.add(transaction, markers) | |
297 |
|
297 | |||
298 | def _load(self, markers): |
|
298 | def _load(self, markers): | |
299 | for mark in markers: |
|
299 | for mark in markers: | |
300 | self._all.append(mark) |
|
300 | self._all.append(mark) | |
301 | pre, sucs = mark[:2] |
|
301 | pre, sucs = mark[:2] | |
302 | self.successors.setdefault(pre, set()).add(mark) |
|
302 | self.successors.setdefault(pre, set()).add(mark) | |
303 | for suc in sucs: |
|
303 | for suc in sucs: | |
304 | self.precursors.setdefault(suc, set()).add(mark) |
|
304 | self.precursors.setdefault(suc, set()).add(mark) | |
305 | if node.nullid in self.precursors: |
|
305 | if node.nullid in self.precursors: | |
306 | raise util.Abort(_('bad obsolescence marker detected: ' |
|
306 | raise util.Abort(_('bad obsolescence marker detected: ' | |
307 | 'invalid successors nullid')) |
|
307 | 'invalid successors nullid')) | |
308 |
|
308 | |||
309 | def _encodemarkers(markers, addheader=False): |
|
309 | def _encodemarkers(markers, addheader=False): | |
310 | # Kept separate from flushmarkers(), it will be reused for |
|
310 | # Kept separate from flushmarkers(), it will be reused for | |
311 | # markers exchange. |
|
311 | # markers exchange. | |
312 | if addheader: |
|
312 | if addheader: | |
313 | yield _pack('>B', _fmversion) |
|
313 | yield _pack('>B', _fmversion) | |
314 | for marker in markers: |
|
314 | for marker in markers: | |
315 | yield _encodeonemarker(marker) |
|
315 | yield _encodeonemarker(marker) | |
316 |
|
316 | |||
317 |
|
317 | |||
318 | def _encodeonemarker(marker): |
|
318 | def _encodeonemarker(marker): | |
319 | pre, sucs, flags, metadata = marker |
|
319 | pre, sucs, flags, metadata = marker | |
320 | nbsuc = len(sucs) |
|
320 | nbsuc = len(sucs) | |
321 | format = _fmfixed + (_fmnode * nbsuc) |
|
321 | format = _fmfixed + (_fmnode * nbsuc) | |
322 | data = [nbsuc, len(metadata), flags, pre] |
|
322 | data = [nbsuc, len(metadata), flags, pre] | |
323 | data.extend(sucs) |
|
323 | data.extend(sucs) | |
324 | return _pack(format, *data) + metadata |
|
324 | return _pack(format, *data) + metadata | |
325 |
|
325 | |||
326 | # arbitrary picked to fit into 8K limit from HTTP server |
|
326 | # arbitrary picked to fit into 8K limit from HTTP server | |
327 | # you have to take in account: |
|
327 | # you have to take in account: | |
328 | # - the version header |
|
328 | # - the version header | |
329 | # - the base85 encoding |
|
329 | # - the base85 encoding | |
330 | _maxpayload = 5300 |
|
330 | _maxpayload = 5300 | |
331 |
|
331 | |||
332 | def listmarkers(repo): |
|
332 | def listmarkers(repo): | |
333 | """List markers over pushkey""" |
|
333 | """List markers over pushkey""" | |
334 | if not repo.obsstore: |
|
334 | if not repo.obsstore: | |
335 | return {} |
|
335 | return {} | |
336 | keys = {} |
|
336 | keys = {} | |
337 | parts = [] |
|
337 | parts = [] | |
338 | currentlen = _maxpayload * 2 # ensure we create a new part |
|
338 | currentlen = _maxpayload * 2 # ensure we create a new part | |
339 | for marker in repo.obsstore: |
|
339 | for marker in repo.obsstore: | |
340 | nextdata = _encodeonemarker(marker) |
|
340 | nextdata = _encodeonemarker(marker) | |
341 | if (len(nextdata) + currentlen > _maxpayload): |
|
341 | if (len(nextdata) + currentlen > _maxpayload): | |
342 | currentpart = [] |
|
342 | currentpart = [] | |
343 | currentlen = 0 |
|
343 | currentlen = 0 | |
344 | parts.append(currentpart) |
|
344 | parts.append(currentpart) | |
345 | currentpart.append(nextdata) |
|
345 | currentpart.append(nextdata) | |
346 | currentlen += len(nextdata) |
|
346 | currentlen += len(nextdata) | |
347 | for idx, part in enumerate(reversed(parts)): |
|
347 | for idx, part in enumerate(reversed(parts)): | |
348 | data = ''.join([_pack('>B', _fmversion)] + part) |
|
348 | data = ''.join([_pack('>B', _fmversion)] + part) | |
349 | keys['dump%i' % idx] = base85.b85encode(data) |
|
349 | keys['dump%i' % idx] = base85.b85encode(data) | |
350 | return keys |
|
350 | return keys | |
351 |
|
351 | |||
352 | def pushmarker(repo, key, old, new): |
|
352 | def pushmarker(repo, key, old, new): | |
353 | """Push markers over pushkey""" |
|
353 | """Push markers over pushkey""" | |
354 | if not key.startswith('dump'): |
|
354 | if not key.startswith('dump'): | |
355 | repo.ui.warn(_('unknown key: %r') % key) |
|
355 | repo.ui.warn(_('unknown key: %r') % key) | |
356 | return 0 |
|
356 | return 0 | |
357 | if old: |
|
357 | if old: | |
358 | repo.ui.warn(_('unexpected old value') % key) |
|
358 | repo.ui.warn(_('unexpected old value') % key) | |
359 | return 0 |
|
359 | return 0 | |
360 | data = base85.b85decode(new) |
|
360 | data = base85.b85decode(new) | |
361 | lock = repo.lock() |
|
361 | lock = repo.lock() | |
362 | try: |
|
362 | try: | |
363 | tr = repo.transaction('pushkey: obsolete markers') |
|
363 | tr = repo.transaction('pushkey: obsolete markers') | |
364 | try: |
|
364 | try: | |
365 | repo.obsstore.mergemarkers(tr, data) |
|
365 | repo.obsstore.mergemarkers(tr, data) | |
366 | tr.close() |
|
366 | tr.close() | |
367 | return 1 |
|
367 | return 1 | |
368 | finally: |
|
368 | finally: | |
369 | tr.release() |
|
369 | tr.release() | |
370 | finally: |
|
370 | finally: | |
371 | lock.release() |
|
371 | lock.release() | |
372 |
|
372 | |||
373 | def allmarkers(repo): |
|
373 | def allmarkers(repo): | |
374 | """all obsolete markers known in a repository""" |
|
374 | """all obsolete markers known in a repository""" | |
375 | for markerdata in repo.obsstore: |
|
375 | for markerdata in repo.obsstore: | |
376 | yield marker(repo, markerdata) |
|
376 | yield marker(repo, markerdata) | |
377 |
|
377 | |||
378 | def precursormarkers(ctx): |
|
378 | def precursormarkers(ctx): | |
379 | """obsolete marker marking this changeset as a successors""" |
|
379 | """obsolete marker marking this changeset as a successors""" | |
380 | for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()): |
|
380 | for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()): | |
381 | yield marker(ctx._repo, data) |
|
381 | yield marker(ctx._repo, data) | |
382 |
|
382 | |||
383 | def successormarkers(ctx): |
|
383 | def successormarkers(ctx): | |
384 | """obsolete marker making this changeset obsolete""" |
|
384 | """obsolete marker making this changeset obsolete""" | |
385 | for data in ctx._repo.obsstore.successors.get(ctx.node(), ()): |
|
385 | for data in ctx._repo.obsstore.successors.get(ctx.node(), ()): | |
386 | yield marker(ctx._repo, data) |
|
386 | yield marker(ctx._repo, data) | |
387 |
|
387 | |||
388 | def allsuccessors(obsstore, nodes, ignoreflags=0): |
|
388 | def allsuccessors(obsstore, nodes, ignoreflags=0): | |
389 | """Yield node for every successor of <nodes>. |
|
389 | """Yield node for every successor of <nodes>. | |
390 |
|
390 | |||
391 | Some successors may be unknown locally. |
|
391 | Some successors may be unknown locally. | |
392 |
|
392 | |||
393 | This is a linear yield unsuited to detecting split changesets.""" |
|
393 | This is a linear yield unsuited to detecting split changesets.""" | |
394 | remaining = set(nodes) |
|
394 | remaining = set(nodes) | |
395 | seen = set(remaining) |
|
395 | seen = set(remaining) | |
396 | while remaining: |
|
396 | while remaining: | |
397 | current = remaining.pop() |
|
397 | current = remaining.pop() | |
398 | yield current |
|
398 | yield current | |
399 | for mark in obsstore.successors.get(current, ()): |
|
399 | for mark in obsstore.successors.get(current, ()): | |
400 | # ignore marker flagged with with specified flag |
|
400 | # ignore marker flagged with with specified flag | |
401 | if mark[2] & ignoreflags: |
|
401 | if mark[2] & ignoreflags: | |
402 | continue |
|
402 | continue | |
403 | for suc in mark[1]: |
|
403 | for suc in mark[1]: | |
404 | if suc not in seen: |
|
404 | if suc not in seen: | |
405 | seen.add(suc) |
|
405 | seen.add(suc) | |
406 | remaining.add(suc) |
|
406 | remaining.add(suc) | |
407 |
|
407 | |||
|
408 | def foreground(repo, nodes): | |||
|
409 | """return all nodes in the "foreground" of other node | |||
|
410 | ||||
|
411 | The foreground of a revision is anything reachable using parent -> children | |||
|
412 | or precursor -> sucessor relation. It is very similars to "descendant" but | |||
|
413 | augmented with obsolescence information. | |||
|
414 | ||||
|
415 | Beware that possible obsolescence cycle may result if complexe situation. | |||
|
416 | """ | |||
|
417 | repo = repo.unfiltered() | |||
|
418 | foreground = set(repo.set('%ln::', nodes)) | |||
|
419 | if repo.obsstore: | |||
|
420 | # We only need this complicated logic if there is obsolescence | |||
|
421 | # XXX will probably deserve an optimised revset. | |||
|
422 | nm = repo.changelog.nodemap | |||
|
423 | plen = -1 | |||
|
424 | # compute the whole set of successors or descendants | |||
|
425 | while len(foreground) != plen: | |||
|
426 | plen = len(foreground) | |||
|
427 | succs = set(c.node() for c in foreground) | |||
|
428 | mutable = [c.node() for c in foreground if c.mutable()] | |||
|
429 | succs.update(allsuccessors(repo.obsstore, mutable)) | |||
|
430 | known = (n for n in succs if n in nm) | |||
|
431 | foreground = set(repo.set('%ln::', known)) | |||
|
432 | return set(c.node() for c in foreground) | |||
|
433 | ||||
|
434 | ||||
408 | def successorssets(repo, initialnode, cache=None): |
|
435 | def successorssets(repo, initialnode, cache=None): | |
409 | """Return all set of successors of initial nodes |
|
436 | """Return all set of successors of initial nodes | |
410 |
|
437 | |||
411 | Successors set of changeset A are a group of revision that succeed A. It |
|
438 | Successors set of changeset A are a group of revision that succeed A. It | |
412 | succeed A as a consistent whole, each revision being only partial |
|
439 | succeed A as a consistent whole, each revision being only partial | |
413 | replacement. Successors set contains non-obsolete changeset only. |
|
440 | replacement. Successors set contains non-obsolete changeset only. | |
414 |
|
441 | |||
415 | In most cases a changeset A have zero (changeset pruned) or a single |
|
442 | In most cases a changeset A have zero (changeset pruned) or a single | |
416 | successors set that contains a single successor (changeset A replaced by |
|
443 | successors set that contains a single successor (changeset A replaced by | |
417 | A') |
|
444 | A') | |
418 |
|
445 | |||
419 | When changeset is split, it results successors set containing more than |
|
446 | When changeset is split, it results successors set containing more than | |
420 | a single element. Divergent rewriting will result in multiple successors |
|
447 | a single element. Divergent rewriting will result in multiple successors | |
421 | sets. |
|
448 | sets. | |
422 |
|
449 | |||
423 | They are returned as a list of tuples containing all valid successors sets. |
|
450 | They are returned as a list of tuples containing all valid successors sets. | |
424 |
|
451 | |||
425 | Final successors unknown locally are considered plain prune (obsoleted |
|
452 | Final successors unknown locally are considered plain prune (obsoleted | |
426 | without successors). |
|
453 | without successors). | |
427 |
|
454 | |||
428 | The optional `cache` parameter is a dictionary that may contains |
|
455 | The optional `cache` parameter is a dictionary that may contains | |
429 | precomputed successors sets. It is meant to reuse the computation of |
|
456 | precomputed successors sets. It is meant to reuse the computation of | |
430 | previous call to `successorssets` when multiple calls are made at the same |
|
457 | previous call to `successorssets` when multiple calls are made at the same | |
431 | time. The cache dictionary is updated in place. The caller is responsible |
|
458 | time. The cache dictionary is updated in place. The caller is responsible | |
432 | for its live spawn. Code that makes multiple calls to `successorssets` |
|
459 | for its live spawn. Code that makes multiple calls to `successorssets` | |
433 | *must* use this cache mechanism or suffer terrible performances.""" |
|
460 | *must* use this cache mechanism or suffer terrible performances.""" | |
434 |
|
461 | |||
435 | succmarkers = repo.obsstore.successors |
|
462 | succmarkers = repo.obsstore.successors | |
436 |
|
463 | |||
437 | # Stack of nodes we search successors sets for |
|
464 | # Stack of nodes we search successors sets for | |
438 | toproceed = [initialnode] |
|
465 | toproceed = [initialnode] | |
439 | # set version of above list for fast loop detection |
|
466 | # set version of above list for fast loop detection | |
440 | # element added to "toproceed" must be added here |
|
467 | # element added to "toproceed" must be added here | |
441 | stackedset = set(toproceed) |
|
468 | stackedset = set(toproceed) | |
442 | if cache is None: |
|
469 | if cache is None: | |
443 | cache = {} |
|
470 | cache = {} | |
444 |
|
471 | |||
445 | # This while loop is the flattened version of a recursive search for |
|
472 | # This while loop is the flattened version of a recursive search for | |
446 | # successors sets |
|
473 | # successors sets | |
447 | # |
|
474 | # | |
448 | # def successorssets(x): |
|
475 | # def successorssets(x): | |
449 | # successors = directsuccessors(x) |
|
476 | # successors = directsuccessors(x) | |
450 | # ss = [[]] |
|
477 | # ss = [[]] | |
451 | # for succ in directsuccessors(x): |
|
478 | # for succ in directsuccessors(x): | |
452 | # # product as in itertools cartesian product |
|
479 | # # product as in itertools cartesian product | |
453 | # ss = product(ss, successorssets(succ)) |
|
480 | # ss = product(ss, successorssets(succ)) | |
454 | # return ss |
|
481 | # return ss | |
455 | # |
|
482 | # | |
456 | # But we can not use plain recursive calls here: |
|
483 | # But we can not use plain recursive calls here: | |
457 | # - that would blow the python call stack |
|
484 | # - that would blow the python call stack | |
458 | # - obsolescence markers may have cycles, we need to handle them. |
|
485 | # - obsolescence markers may have cycles, we need to handle them. | |
459 | # |
|
486 | # | |
460 | # The `toproceed` list act as our call stack. Every node we search |
|
487 | # The `toproceed` list act as our call stack. Every node we search | |
461 | # successors set for are stacked there. |
|
488 | # successors set for are stacked there. | |
462 | # |
|
489 | # | |
463 | # The `stackedset` is set version of this stack used to check if a node is |
|
490 | # The `stackedset` is set version of this stack used to check if a node is | |
464 | # already stacked. This check is used to detect cycles and prevent infinite |
|
491 | # already stacked. This check is used to detect cycles and prevent infinite | |
465 | # loop. |
|
492 | # loop. | |
466 | # |
|
493 | # | |
467 | # successors set of all nodes are stored in the `cache` dictionary. |
|
494 | # successors set of all nodes are stored in the `cache` dictionary. | |
468 | # |
|
495 | # | |
469 | # After this while loop ends we use the cache to return the successors sets |
|
496 | # After this while loop ends we use the cache to return the successors sets | |
470 | # for the node requested by the caller. |
|
497 | # for the node requested by the caller. | |
471 | while toproceed: |
|
498 | while toproceed: | |
472 | # Every iteration tries to compute the successors sets of the topmost |
|
499 | # Every iteration tries to compute the successors sets of the topmost | |
473 | # node of the stack: CURRENT. |
|
500 | # node of the stack: CURRENT. | |
474 | # |
|
501 | # | |
475 | # There are four possible outcomes: |
|
502 | # There are four possible outcomes: | |
476 | # |
|
503 | # | |
477 | # 1) We already know the successors sets of CURRENT: |
|
504 | # 1) We already know the successors sets of CURRENT: | |
478 | # -> mission accomplished, pop it from the stack. |
|
505 | # -> mission accomplished, pop it from the stack. | |
479 | # 2) Node is not obsolete: |
|
506 | # 2) Node is not obsolete: | |
480 | # -> the node is its own successors sets. Add it to the cache. |
|
507 | # -> the node is its own successors sets. Add it to the cache. | |
481 | # 3) We do not know successors set of direct successors of CURRENT: |
|
508 | # 3) We do not know successors set of direct successors of CURRENT: | |
482 | # -> We add those successors to the stack. |
|
509 | # -> We add those successors to the stack. | |
483 | # 4) We know successors sets of all direct successors of CURRENT: |
|
510 | # 4) We know successors sets of all direct successors of CURRENT: | |
484 | # -> We can compute CURRENT successors set and add it to the |
|
511 | # -> We can compute CURRENT successors set and add it to the | |
485 | # cache. |
|
512 | # cache. | |
486 | # |
|
513 | # | |
487 | current = toproceed[-1] |
|
514 | current = toproceed[-1] | |
488 | if current in cache: |
|
515 | if current in cache: | |
489 | # case (1): We already know the successors sets |
|
516 | # case (1): We already know the successors sets | |
490 | stackedset.remove(toproceed.pop()) |
|
517 | stackedset.remove(toproceed.pop()) | |
491 | elif current not in succmarkers: |
|
518 | elif current not in succmarkers: | |
492 | # case (2): The node is not obsolete. |
|
519 | # case (2): The node is not obsolete. | |
493 | if current in repo: |
|
520 | if current in repo: | |
494 | # We have a valid last successors. |
|
521 | # We have a valid last successors. | |
495 | cache[current] = [(current,)] |
|
522 | cache[current] = [(current,)] | |
496 | else: |
|
523 | else: | |
497 | # Final obsolete version is unknown locally. |
|
524 | # Final obsolete version is unknown locally. | |
498 | # Do not count that as a valid successors |
|
525 | # Do not count that as a valid successors | |
499 | cache[current] = [] |
|
526 | cache[current] = [] | |
500 | else: |
|
527 | else: | |
501 | # cases (3) and (4) |
|
528 | # cases (3) and (4) | |
502 | # |
|
529 | # | |
503 | # We proceed in two phases. Phase 1 aims to distinguish case (3) |
|
530 | # We proceed in two phases. Phase 1 aims to distinguish case (3) | |
504 | # from case (4): |
|
531 | # from case (4): | |
505 | # |
|
532 | # | |
506 | # For each direct successors of CURRENT, we check whether its |
|
533 | # For each direct successors of CURRENT, we check whether its | |
507 | # successors sets are known. If they are not, we stack the |
|
534 | # successors sets are known. If they are not, we stack the | |
508 | # unknown node and proceed to the next iteration of the while |
|
535 | # unknown node and proceed to the next iteration of the while | |
509 | # loop. (case 3) |
|
536 | # loop. (case 3) | |
510 | # |
|
537 | # | |
511 | # During this step, we may detect obsolescence cycles: a node |
|
538 | # During this step, we may detect obsolescence cycles: a node | |
512 | # with unknown successors sets but already in the call stack. |
|
539 | # with unknown successors sets but already in the call stack. | |
513 | # In such a situation, we arbitrary set the successors sets of |
|
540 | # In such a situation, we arbitrary set the successors sets of | |
514 | # the node to nothing (node pruned) to break the cycle. |
|
541 | # the node to nothing (node pruned) to break the cycle. | |
515 | # |
|
542 | # | |
516 | # If no break was encountered we proceed to phase 2. |
|
543 | # If no break was encountered we proceed to phase 2. | |
517 | # |
|
544 | # | |
518 | # Phase 2 computes successors sets of CURRENT (case 4); see details |
|
545 | # Phase 2 computes successors sets of CURRENT (case 4); see details | |
519 | # in phase 2 itself. |
|
546 | # in phase 2 itself. | |
520 | # |
|
547 | # | |
521 | # Note the two levels of iteration in each phase. |
|
548 | # Note the two levels of iteration in each phase. | |
522 | # - The first one handles obsolescence markers using CURRENT as |
|
549 | # - The first one handles obsolescence markers using CURRENT as | |
523 | # precursor (successors markers of CURRENT). |
|
550 | # precursor (successors markers of CURRENT). | |
524 | # |
|
551 | # | |
525 | # Having multiple entry here means divergence. |
|
552 | # Having multiple entry here means divergence. | |
526 | # |
|
553 | # | |
527 | # - The second one handles successors defined in each marker. |
|
554 | # - The second one handles successors defined in each marker. | |
528 | # |
|
555 | # | |
529 | # Having none means pruned node, multiple successors means split, |
|
556 | # Having none means pruned node, multiple successors means split, | |
530 | # single successors are standard replacement. |
|
557 | # single successors are standard replacement. | |
531 | # |
|
558 | # | |
532 | for mark in sorted(succmarkers[current]): |
|
559 | for mark in sorted(succmarkers[current]): | |
533 | for suc in mark[1]: |
|
560 | for suc in mark[1]: | |
534 | if suc not in cache: |
|
561 | if suc not in cache: | |
535 | if suc in stackedset: |
|
562 | if suc in stackedset: | |
536 | # cycle breaking |
|
563 | # cycle breaking | |
537 | cache[suc] = [] |
|
564 | cache[suc] = [] | |
538 | else: |
|
565 | else: | |
539 | # case (3) If we have not computed successors sets |
|
566 | # case (3) If we have not computed successors sets | |
540 | # of one of those successors we add it to the |
|
567 | # of one of those successors we add it to the | |
541 | # `toproceed` stack and stop all work for this |
|
568 | # `toproceed` stack and stop all work for this | |
542 | # iteration. |
|
569 | # iteration. | |
543 | toproceed.append(suc) |
|
570 | toproceed.append(suc) | |
544 | stackedset.add(suc) |
|
571 | stackedset.add(suc) | |
545 | break |
|
572 | break | |
546 | else: |
|
573 | else: | |
547 | continue |
|
574 | continue | |
548 | break |
|
575 | break | |
549 | else: |
|
576 | else: | |
550 | # case (4): we know all successors sets of all direct |
|
577 | # case (4): we know all successors sets of all direct | |
551 | # successors |
|
578 | # successors | |
552 | # |
|
579 | # | |
553 | # Successors set contributed by each marker depends on the |
|
580 | # Successors set contributed by each marker depends on the | |
554 | # successors sets of all its "successors" node. |
|
581 | # successors sets of all its "successors" node. | |
555 | # |
|
582 | # | |
556 | # Each different marker is a divergence in the obsolescence |
|
583 | # Each different marker is a divergence in the obsolescence | |
557 | # history. It contributes successors sets distinct from other |
|
584 | # history. It contributes successors sets distinct from other | |
558 | # markers. |
|
585 | # markers. | |
559 | # |
|
586 | # | |
560 | # Within a marker, a successor may have divergent successors |
|
587 | # Within a marker, a successor may have divergent successors | |
561 | # sets. In such a case, the marker will contribute multiple |
|
588 | # sets. In such a case, the marker will contribute multiple | |
562 | # divergent successors sets. If multiple successors have |
|
589 | # divergent successors sets. If multiple successors have | |
563 | # divergent successors sets, a cartesian product is used. |
|
590 | # divergent successors sets, a cartesian product is used. | |
564 | # |
|
591 | # | |
565 | # At the end we post-process successors sets to remove |
|
592 | # At the end we post-process successors sets to remove | |
566 | # duplicated entry and successors set that are strict subset of |
|
593 | # duplicated entry and successors set that are strict subset of | |
567 | # another one. |
|
594 | # another one. | |
568 | succssets = [] |
|
595 | succssets = [] | |
569 | for mark in sorted(succmarkers[current]): |
|
596 | for mark in sorted(succmarkers[current]): | |
570 | # successors sets contributed by this marker |
|
597 | # successors sets contributed by this marker | |
571 | markss = [[]] |
|
598 | markss = [[]] | |
572 | for suc in mark[1]: |
|
599 | for suc in mark[1]: | |
573 | # cardinal product with previous successors |
|
600 | # cardinal product with previous successors | |
574 | productresult = [] |
|
601 | productresult = [] | |
575 | for prefix in markss: |
|
602 | for prefix in markss: | |
576 | for suffix in cache[suc]: |
|
603 | for suffix in cache[suc]: | |
577 | newss = list(prefix) |
|
604 | newss = list(prefix) | |
578 | for part in suffix: |
|
605 | for part in suffix: | |
579 | # do not duplicated entry in successors set |
|
606 | # do not duplicated entry in successors set | |
580 | # first entry wins. |
|
607 | # first entry wins. | |
581 | if part not in newss: |
|
608 | if part not in newss: | |
582 | newss.append(part) |
|
609 | newss.append(part) | |
583 | productresult.append(newss) |
|
610 | productresult.append(newss) | |
584 | markss = productresult |
|
611 | markss = productresult | |
585 | succssets.extend(markss) |
|
612 | succssets.extend(markss) | |
586 | # remove duplicated and subset |
|
613 | # remove duplicated and subset | |
587 | seen = [] |
|
614 | seen = [] | |
588 | final = [] |
|
615 | final = [] | |
589 | candidate = sorted(((set(s), s) for s in succssets if s), |
|
616 | candidate = sorted(((set(s), s) for s in succssets if s), | |
590 | key=lambda x: len(x[1]), reverse=True) |
|
617 | key=lambda x: len(x[1]), reverse=True) | |
591 | for setversion, listversion in candidate: |
|
618 | for setversion, listversion in candidate: | |
592 | for seenset in seen: |
|
619 | for seenset in seen: | |
593 | if setversion.issubset(seenset): |
|
620 | if setversion.issubset(seenset): | |
594 | break |
|
621 | break | |
595 | else: |
|
622 | else: | |
596 | final.append(listversion) |
|
623 | final.append(listversion) | |
597 | seen.append(setversion) |
|
624 | seen.append(setversion) | |
598 | final.reverse() # put small successors set first |
|
625 | final.reverse() # put small successors set first | |
599 | cache[current] = final |
|
626 | cache[current] = final | |
600 | return cache[initialnode] |
|
627 | return cache[initialnode] | |
601 |
|
628 | |||
602 | def _knownrevs(repo, nodes): |
|
629 | def _knownrevs(repo, nodes): | |
603 | """yield revision numbers of known nodes passed in parameters |
|
630 | """yield revision numbers of known nodes passed in parameters | |
604 |
|
631 | |||
605 | Unknown revisions are silently ignored.""" |
|
632 | Unknown revisions are silently ignored.""" | |
606 | torev = repo.changelog.nodemap.get |
|
633 | torev = repo.changelog.nodemap.get | |
607 | for n in nodes: |
|
634 | for n in nodes: | |
608 | rev = torev(n) |
|
635 | rev = torev(n) | |
609 | if rev is not None: |
|
636 | if rev is not None: | |
610 | yield rev |
|
637 | yield rev | |
611 |
|
638 | |||
612 | # mapping of 'set-name' -> <function to compute this set> |
|
639 | # mapping of 'set-name' -> <function to compute this set> | |
613 | cachefuncs = {} |
|
640 | cachefuncs = {} | |
614 | def cachefor(name): |
|
641 | def cachefor(name): | |
615 | """Decorator to register a function as computing the cache for a set""" |
|
642 | """Decorator to register a function as computing the cache for a set""" | |
616 | def decorator(func): |
|
643 | def decorator(func): | |
617 | assert name not in cachefuncs |
|
644 | assert name not in cachefuncs | |
618 | cachefuncs[name] = func |
|
645 | cachefuncs[name] = func | |
619 | return func |
|
646 | return func | |
620 | return decorator |
|
647 | return decorator | |
621 |
|
648 | |||
622 | def getrevs(repo, name): |
|
649 | def getrevs(repo, name): | |
623 | """Return the set of revision that belong to the <name> set |
|
650 | """Return the set of revision that belong to the <name> set | |
624 |
|
651 | |||
625 | Such access may compute the set and cache it for future use""" |
|
652 | Such access may compute the set and cache it for future use""" | |
626 | repo = repo.unfiltered() |
|
653 | repo = repo.unfiltered() | |
627 | if not repo.obsstore: |
|
654 | if not repo.obsstore: | |
628 | return () |
|
655 | return () | |
629 | if name not in repo.obsstore.caches: |
|
656 | if name not in repo.obsstore.caches: | |
630 | repo.obsstore.caches[name] = cachefuncs[name](repo) |
|
657 | repo.obsstore.caches[name] = cachefuncs[name](repo) | |
631 | return repo.obsstore.caches[name] |
|
658 | return repo.obsstore.caches[name] | |
632 |
|
659 | |||
633 | # To be simple we need to invalidate obsolescence cache when: |
|
660 | # To be simple we need to invalidate obsolescence cache when: | |
634 | # |
|
661 | # | |
635 | # - new changeset is added: |
|
662 | # - new changeset is added: | |
636 | # - public phase is changed |
|
663 | # - public phase is changed | |
637 | # - obsolescence marker are added |
|
664 | # - obsolescence marker are added | |
638 | # - strip is used a repo |
|
665 | # - strip is used a repo | |
639 | def clearobscaches(repo): |
|
666 | def clearobscaches(repo): | |
640 | """Remove all obsolescence related cache from a repo |
|
667 | """Remove all obsolescence related cache from a repo | |
641 |
|
668 | |||
642 | This remove all cache in obsstore is the obsstore already exist on the |
|
669 | This remove all cache in obsstore is the obsstore already exist on the | |
643 | repo. |
|
670 | repo. | |
644 |
|
671 | |||
645 | (We could be smarter here given the exact event that trigger the cache |
|
672 | (We could be smarter here given the exact event that trigger the cache | |
646 | clearing)""" |
|
673 | clearing)""" | |
647 | # only clear cache is there is obsstore data in this repo |
|
674 | # only clear cache is there is obsstore data in this repo | |
648 | if 'obsstore' in repo._filecache: |
|
675 | if 'obsstore' in repo._filecache: | |
649 | repo.obsstore.caches.clear() |
|
676 | repo.obsstore.caches.clear() | |
650 |
|
677 | |||
651 | @cachefor('obsolete') |
|
678 | @cachefor('obsolete') | |
652 | def _computeobsoleteset(repo): |
|
679 | def _computeobsoleteset(repo): | |
653 | """the set of obsolete revisions""" |
|
680 | """the set of obsolete revisions""" | |
654 | obs = set() |
|
681 | obs = set() | |
655 | getrev = repo.changelog.nodemap.get |
|
682 | getrev = repo.changelog.nodemap.get | |
656 | getphase = repo._phasecache.phase |
|
683 | getphase = repo._phasecache.phase | |
657 | for node in repo.obsstore.successors: |
|
684 | for node in repo.obsstore.successors: | |
658 | rev = getrev(node) |
|
685 | rev = getrev(node) | |
659 | if rev is not None and getphase(repo, rev): |
|
686 | if rev is not None and getphase(repo, rev): | |
660 | obs.add(rev) |
|
687 | obs.add(rev) | |
661 | return obs |
|
688 | return obs | |
662 |
|
689 | |||
663 | @cachefor('unstable') |
|
690 | @cachefor('unstable') | |
664 | def _computeunstableset(repo): |
|
691 | def _computeunstableset(repo): | |
665 | """the set of non obsolete revisions with obsolete parents""" |
|
692 | """the set of non obsolete revisions with obsolete parents""" | |
666 | # revset is not efficient enough here |
|
693 | # revset is not efficient enough here | |
667 | # we do (obsolete()::) - obsolete() by hand |
|
694 | # we do (obsolete()::) - obsolete() by hand | |
668 | obs = getrevs(repo, 'obsolete') |
|
695 | obs = getrevs(repo, 'obsolete') | |
669 | if not obs: |
|
696 | if not obs: | |
670 | return set() |
|
697 | return set() | |
671 | cl = repo.changelog |
|
698 | cl = repo.changelog | |
672 | return set(r for r in cl.descendants(obs) if r not in obs) |
|
699 | return set(r for r in cl.descendants(obs) if r not in obs) | |
673 |
|
700 | |||
674 | @cachefor('suspended') |
|
701 | @cachefor('suspended') | |
675 | def _computesuspendedset(repo): |
|
702 | def _computesuspendedset(repo): | |
676 | """the set of obsolete parents with non obsolete descendants""" |
|
703 | """the set of obsolete parents with non obsolete descendants""" | |
677 | suspended = repo.changelog.ancestors(getrevs(repo, 'unstable')) |
|
704 | suspended = repo.changelog.ancestors(getrevs(repo, 'unstable')) | |
678 | return set(r for r in getrevs(repo, 'obsolete') if r in suspended) |
|
705 | return set(r for r in getrevs(repo, 'obsolete') if r in suspended) | |
679 |
|
706 | |||
680 | @cachefor('extinct') |
|
707 | @cachefor('extinct') | |
681 | def _computeextinctset(repo): |
|
708 | def _computeextinctset(repo): | |
682 | """the set of obsolete parents without non obsolete descendants""" |
|
709 | """the set of obsolete parents without non obsolete descendants""" | |
683 | return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended') |
|
710 | return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended') | |
684 |
|
711 | |||
685 |
|
712 | |||
686 | @cachefor('bumped') |
|
713 | @cachefor('bumped') | |
687 | def _computebumpedset(repo): |
|
714 | def _computebumpedset(repo): | |
688 | """the set of revs trying to obsolete public revisions""" |
|
715 | """the set of revs trying to obsolete public revisions""" | |
689 | # get all possible bumped changesets |
|
716 | # get all possible bumped changesets | |
690 | tonode = repo.changelog.node |
|
717 | tonode = repo.changelog.node | |
691 | publicnodes = (tonode(r) for r in repo.revs('public()')) |
|
718 | publicnodes = (tonode(r) for r in repo.revs('public()')) | |
692 | successors = allsuccessors(repo.obsstore, publicnodes, |
|
719 | successors = allsuccessors(repo.obsstore, publicnodes, | |
693 | ignoreflags=bumpedfix) |
|
720 | ignoreflags=bumpedfix) | |
694 | # revision public or already obsolete don't count as bumped |
|
721 | # revision public or already obsolete don't count as bumped | |
695 | query = '%ld - obsolete() - public()' |
|
722 | query = '%ld - obsolete() - public()' | |
696 | return set(repo.revs(query, _knownrevs(repo, successors))) |
|
723 | return set(repo.revs(query, _knownrevs(repo, successors))) | |
697 |
|
724 | |||
698 | @cachefor('divergent') |
|
725 | @cachefor('divergent') | |
699 | def _computedivergentset(repo): |
|
726 | def _computedivergentset(repo): | |
700 | """the set of rev that compete to be the final successors of some revision. |
|
727 | """the set of rev that compete to be the final successors of some revision. | |
701 | """ |
|
728 | """ | |
702 | divergent = set() |
|
729 | divergent = set() | |
703 | obsstore = repo.obsstore |
|
730 | obsstore = repo.obsstore | |
704 | newermap = {} |
|
731 | newermap = {} | |
705 | for ctx in repo.set('(not public()) - obsolete()'): |
|
732 | for ctx in repo.set('(not public()) - obsolete()'): | |
706 | mark = obsstore.precursors.get(ctx.node(), ()) |
|
733 | mark = obsstore.precursors.get(ctx.node(), ()) | |
707 | toprocess = set(mark) |
|
734 | toprocess = set(mark) | |
708 | while toprocess: |
|
735 | while toprocess: | |
709 | prec = toprocess.pop()[0] |
|
736 | prec = toprocess.pop()[0] | |
710 | if prec not in newermap: |
|
737 | if prec not in newermap: | |
711 | successorssets(repo, prec, newermap) |
|
738 | successorssets(repo, prec, newermap) | |
712 | newer = [n for n in newermap[prec] if n] |
|
739 | newer = [n for n in newermap[prec] if n] | |
713 | if len(newer) > 1: |
|
740 | if len(newer) > 1: | |
714 | divergent.add(ctx.rev()) |
|
741 | divergent.add(ctx.rev()) | |
715 | break |
|
742 | break | |
716 | toprocess.update(obsstore.precursors.get(prec, ())) |
|
743 | toprocess.update(obsstore.precursors.get(prec, ())) | |
717 | return divergent |
|
744 | return divergent | |
718 |
|
745 | |||
719 |
|
746 | |||
720 | def createmarkers(repo, relations, flag=0, metadata=None): |
|
747 | def createmarkers(repo, relations, flag=0, metadata=None): | |
721 | """Add obsolete markers between changesets in a repo |
|
748 | """Add obsolete markers between changesets in a repo | |
722 |
|
749 | |||
723 | <relations> must be an iterable of (<old>, (<new>, ...)) tuple. |
|
750 | <relations> must be an iterable of (<old>, (<new>, ...)) tuple. | |
724 | `old` and `news` are changectx. |
|
751 | `old` and `news` are changectx. | |
725 |
|
752 | |||
726 | Trying to obsolete a public changeset will raise an exception. |
|
753 | Trying to obsolete a public changeset will raise an exception. | |
727 |
|
754 | |||
728 | Current user and date are used except if specified otherwise in the |
|
755 | Current user and date are used except if specified otherwise in the | |
729 | metadata attribute. |
|
756 | metadata attribute. | |
730 |
|
757 | |||
731 | This function operates within a transaction of its own, but does |
|
758 | This function operates within a transaction of its own, but does | |
732 | not take any lock on the repo. |
|
759 | not take any lock on the repo. | |
733 | """ |
|
760 | """ | |
734 | # prepare metadata |
|
761 | # prepare metadata | |
735 | if metadata is None: |
|
762 | if metadata is None: | |
736 | metadata = {} |
|
763 | metadata = {} | |
737 | if 'date' not in metadata: |
|
764 | if 'date' not in metadata: | |
738 | metadata['date'] = '%i %i' % util.makedate() |
|
765 | metadata['date'] = '%i %i' % util.makedate() | |
739 | if 'user' not in metadata: |
|
766 | if 'user' not in metadata: | |
740 | metadata['user'] = repo.ui.username() |
|
767 | metadata['user'] = repo.ui.username() | |
741 | tr = repo.transaction('add-obsolescence-marker') |
|
768 | tr = repo.transaction('add-obsolescence-marker') | |
742 | try: |
|
769 | try: | |
743 | for prec, sucs in relations: |
|
770 | for prec, sucs in relations: | |
744 | if not prec.mutable(): |
|
771 | if not prec.mutable(): | |
745 | raise util.Abort("cannot obsolete immutable changeset: %s" |
|
772 | raise util.Abort("cannot obsolete immutable changeset: %s" | |
746 | % prec) |
|
773 | % prec) | |
747 | nprec = prec.node() |
|
774 | nprec = prec.node() | |
748 | nsucs = tuple(s.node() for s in sucs) |
|
775 | nsucs = tuple(s.node() for s in sucs) | |
749 | if nprec in nsucs: |
|
776 | if nprec in nsucs: | |
750 | raise util.Abort("changeset %s cannot obsolete itself" % prec) |
|
777 | raise util.Abort("changeset %s cannot obsolete itself" % prec) | |
751 | repo.obsstore.create(tr, nprec, nsucs, flag, metadata) |
|
778 | repo.obsstore.create(tr, nprec, nsucs, flag, metadata) | |
752 | repo.filteredrevcache.clear() |
|
779 | repo.filteredrevcache.clear() | |
753 | tr.close() |
|
780 | tr.close() | |
754 | finally: |
|
781 | finally: | |
755 | tr.release() |
|
782 | tr.release() |
General Comments 0
You need to be logged in to leave comments.
Login now