##// END OF EJS Templates
obsolete: rename `anysuccessors` into `allsuccessors`...
Pierre-Yves David -
r17826:46e1a4e2 default
parent child Browse files
Show More
@@ -1,275 +1,275 b''
1 1 # Mercurial bookmark support code
2 2 #
3 3 # Copyright 2008 David Soria Parra <dsp@php.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from mercurial.i18n import _
9 9 from mercurial.node import hex
10 10 from mercurial import encoding, error, util, obsolete, phases
11 11 import errno, os
12 12
13 13 def read(repo):
14 14 '''Parse .hg/bookmarks file and return a dictionary
15 15
16 16 Bookmarks are stored as {HASH}\\s{NAME}\\n (localtags format) values
17 17 in the .hg/bookmarks file.
18 18 Read the file and return a (name=>nodeid) dictionary
19 19 '''
20 20 bookmarks = {}
21 21 try:
22 22 for line in repo.opener('bookmarks'):
23 23 line = line.strip()
24 24 if not line:
25 25 continue
26 26 if ' ' not in line:
27 27 repo.ui.warn(_('malformed line in .hg/bookmarks: %r\n') % line)
28 28 continue
29 29 sha, refspec = line.split(' ', 1)
30 30 refspec = encoding.tolocal(refspec)
31 31 try:
32 32 bookmarks[refspec] = repo.changelog.lookup(sha)
33 33 except LookupError:
34 34 pass
35 35 except IOError, inst:
36 36 if inst.errno != errno.ENOENT:
37 37 raise
38 38 return bookmarks
39 39
40 40 def readcurrent(repo):
41 41 '''Get the current bookmark
42 42
43 43 If we use gittishsh branches we have a current bookmark that
44 44 we are on. This function returns the name of the bookmark. It
45 45 is stored in .hg/bookmarks.current
46 46 '''
47 47 mark = None
48 48 try:
49 49 file = repo.opener('bookmarks.current')
50 50 except IOError, inst:
51 51 if inst.errno != errno.ENOENT:
52 52 raise
53 53 return None
54 54 try:
55 55 # No readline() in osutil.posixfile, reading everything is cheap
56 56 mark = encoding.tolocal((file.readlines() or [''])[0])
57 57 if mark == '' or mark not in repo._bookmarks:
58 58 mark = None
59 59 finally:
60 60 file.close()
61 61 return mark
62 62
63 63 def write(repo):
64 64 '''Write bookmarks
65 65
66 66 Write the given bookmark => hash dictionary to the .hg/bookmarks file
67 67 in a format equal to those of localtags.
68 68
69 69 We also store a backup of the previous state in undo.bookmarks that
70 70 can be copied back on rollback.
71 71 '''
72 72 refs = repo._bookmarks
73 73
74 74 if repo._bookmarkcurrent not in refs:
75 75 setcurrent(repo, None)
76 76
77 77 wlock = repo.wlock()
78 78 try:
79 79
80 80 file = repo.opener('bookmarks', 'w', atomictemp=True)
81 81 for refspec, node in refs.iteritems():
82 82 file.write("%s %s\n" % (hex(node), encoding.fromlocal(refspec)))
83 83 file.close()
84 84
85 85 # touch 00changelog.i so hgweb reloads bookmarks (no lock needed)
86 86 try:
87 87 os.utime(repo.sjoin('00changelog.i'), None)
88 88 except OSError:
89 89 pass
90 90
91 91 finally:
92 92 wlock.release()
93 93
94 94 def setcurrent(repo, mark):
95 95 '''Set the name of the bookmark that we are currently on
96 96
97 97 Set the name of the bookmark that we are on (hg update <bookmark>).
98 98 The name is recorded in .hg/bookmarks.current
99 99 '''
100 100 current = repo._bookmarkcurrent
101 101 if current == mark:
102 102 return
103 103
104 104 if mark not in repo._bookmarks:
105 105 mark = ''
106 106
107 107 wlock = repo.wlock()
108 108 try:
109 109 file = repo.opener('bookmarks.current', 'w', atomictemp=True)
110 110 file.write(encoding.fromlocal(mark))
111 111 file.close()
112 112 finally:
113 113 wlock.release()
114 114 repo._bookmarkcurrent = mark
115 115
116 116 def unsetcurrent(repo):
117 117 wlock = repo.wlock()
118 118 try:
119 119 try:
120 120 util.unlink(repo.join('bookmarks.current'))
121 121 repo._bookmarkcurrent = None
122 122 except OSError, inst:
123 123 if inst.errno != errno.ENOENT:
124 124 raise
125 125 finally:
126 126 wlock.release()
127 127
128 128 def updatecurrentbookmark(repo, oldnode, curbranch):
129 129 try:
130 130 return update(repo, oldnode, repo.branchtip(curbranch))
131 131 except error.RepoLookupError:
132 132 if curbranch == "default": # no default branch!
133 133 return update(repo, oldnode, repo.lookup("tip"))
134 134 else:
135 135 raise util.Abort(_("branch %s not found") % curbranch)
136 136
137 137 def update(repo, parents, node):
138 138 marks = repo._bookmarks
139 139 update = False
140 140 cur = repo._bookmarkcurrent
141 141 if not cur:
142 142 return False
143 143
144 144 toupdate = [b for b in marks if b.split('@', 1)[0] == cur.split('@', 1)[0]]
145 145 for mark in toupdate:
146 146 if mark and marks[mark] in parents:
147 147 old = repo[marks[mark]]
148 148 new = repo[node]
149 149 if old.descendant(new) and mark == cur:
150 150 marks[cur] = new.node()
151 151 update = True
152 152 if mark != cur:
153 153 del marks[mark]
154 154 if update:
155 155 repo._writebookmarks(marks)
156 156 return update
157 157
158 158 def listbookmarks(repo):
159 159 # We may try to list bookmarks on a repo type that does not
160 160 # support it (e.g., statichttprepository).
161 161 marks = getattr(repo, '_bookmarks', {})
162 162
163 163 d = {}
164 164 for k, v in marks.iteritems():
165 165 # don't expose local divergent bookmarks
166 166 if '@' not in k or k.endswith('@'):
167 167 d[k] = hex(v)
168 168 return d
169 169
170 170 def pushbookmark(repo, key, old, new):
171 171 w = repo.wlock()
172 172 try:
173 173 marks = repo._bookmarks
174 174 if hex(marks.get(key, '')) != old:
175 175 return False
176 176 if new == '':
177 177 del marks[key]
178 178 else:
179 179 if new not in repo:
180 180 return False
181 181 marks[key] = repo[new].node()
182 182 write(repo)
183 183 return True
184 184 finally:
185 185 w.release()
186 186
187 187 def updatefromremote(ui, repo, remote, path):
188 188 ui.debug("checking for updated bookmarks\n")
189 189 rb = remote.listkeys('bookmarks')
190 190 changed = False
191 191 for k in rb.keys():
192 192 if k in repo._bookmarks:
193 193 nr, nl = rb[k], repo._bookmarks[k]
194 194 if nr in repo:
195 195 cr = repo[nr]
196 196 cl = repo[nl]
197 197 if cl.rev() >= cr.rev():
198 198 continue
199 199 if validdest(repo, cl, cr):
200 200 repo._bookmarks[k] = cr.node()
201 201 changed = True
202 202 ui.status(_("updating bookmark %s\n") % k)
203 203 else:
204 204 if k == '@':
205 205 kd = ''
206 206 else:
207 207 kd = k
208 208 # find a unique @ suffix
209 209 for x in range(1, 100):
210 210 n = '%s@%d' % (kd, x)
211 211 if n not in repo._bookmarks:
212 212 break
213 213 # try to use an @pathalias suffix
214 214 # if an @pathalias already exists, we overwrite (update) it
215 215 for p, u in ui.configitems("paths"):
216 216 if path == u:
217 217 n = '%s@%s' % (kd, p)
218 218
219 219 repo._bookmarks[n] = cr.node()
220 220 changed = True
221 221 ui.warn(_("divergent bookmark %s stored as %s\n") % (k, n))
222 222 elif rb[k] in repo:
223 223 # add remote bookmarks for changes we already have
224 224 repo._bookmarks[k] = repo[rb[k]].node()
225 225 changed = True
226 226 ui.status(_("adding remote bookmark %s\n") % k)
227 227
228 228 if changed:
229 229 write(repo)
230 230
231 231 def diff(ui, dst, src):
232 232 ui.status(_("searching for changed bookmarks\n"))
233 233
234 234 smarks = src.listkeys('bookmarks')
235 235 dmarks = dst.listkeys('bookmarks')
236 236
237 237 diff = sorted(set(smarks) - set(dmarks))
238 238 for k in diff:
239 239 mark = ui.debugflag and smarks[k] or smarks[k][:12]
240 240 ui.write(" %-25s %s\n" % (k, mark))
241 241
242 242 if len(diff) <= 0:
243 243 ui.status(_("no changed bookmarks found\n"))
244 244 return 1
245 245 return 0
246 246
247 247 def validdest(repo, old, new):
248 248 """Is the new bookmark destination a valid update from the old one"""
249 249 if old == new:
250 250 # Old == new -> nothing to update.
251 251 return False
252 252 elif not old:
253 253 # old is nullrev, anything is valid.
254 254 # (new != nullrev has been excluded by the previous check)
255 255 return True
256 256 elif repo.obsstore:
257 257 # We only need this complicated logic if there is obsolescence
258 258 # XXX will probably deserve an optimised revset.
259 259
260 260 validdests = set([old])
261 261 plen = -1
262 262 # compute the whole set of successors or descendants
263 263 while len(validdests) != plen:
264 264 plen = len(validdests)
265 265 succs = set(c.node() for c in validdests)
266 266 for c in validdests:
267 267 if c.phase() > phases.public:
268 268 # obsolescence marker does not apply to public changeset
269 succs.update(obsolete.anysuccessors(repo.obsstore,
269 succs.update(obsolete.allsuccessors(repo.obsstore,
270 270 c.node()))
271 271 validdests = set(repo.set('%ln::', succs))
272 272 validdests.remove(old)
273 273 return new in validdests
274 274 else:
275 275 return old.descendant(new)
@@ -1,377 +1,377 b''
1 1 # discovery.py - protocol changeset discovery functions
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import nullid, short
9 9 from i18n import _
10 10 import util, setdiscovery, treediscovery, phases, obsolete, bookmarks
11 11
12 12 def findcommonincoming(repo, remote, heads=None, force=False):
13 13 """Return a tuple (common, anyincoming, heads) used to identify the common
14 14 subset of nodes between repo and remote.
15 15
16 16 "common" is a list of (at least) the heads of the common subset.
17 17 "anyincoming" is testable as a boolean indicating if any nodes are missing
18 18 locally. If remote does not support getbundle, this actually is a list of
19 19 roots of the nodes that would be incoming, to be supplied to
20 20 changegroupsubset. No code except for pull should be relying on this fact
21 21 any longer.
22 22 "heads" is either the supplied heads, or else the remote's heads.
23 23
24 24 If you pass heads and they are all known locally, the response lists just
25 25 these heads in "common" and in "heads".
26 26
27 27 Please use findcommonoutgoing to compute the set of outgoing nodes to give
28 28 extensions a good hook into outgoing.
29 29 """
30 30
31 31 if not remote.capable('getbundle'):
32 32 return treediscovery.findcommonincoming(repo, remote, heads, force)
33 33
34 34 if heads:
35 35 allknown = True
36 36 nm = repo.changelog.nodemap
37 37 for h in heads:
38 38 if nm.get(h) is None:
39 39 allknown = False
40 40 break
41 41 if allknown:
42 42 return (heads, False, heads)
43 43
44 44 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
45 45 abortwhenunrelated=not force)
46 46 common, anyinc, srvheads = res
47 47 return (list(common), anyinc, heads or list(srvheads))
48 48
49 49 class outgoing(object):
50 50 '''Represents the set of nodes present in a local repo but not in a
51 51 (possibly) remote one.
52 52
53 53 Members:
54 54
55 55 missing is a list of all nodes present in local but not in remote.
56 56 common is a list of all nodes shared between the two repos.
57 57 excluded is the list of missing changeset that shouldn't be sent remotely.
58 58 missingheads is the list of heads of missing.
59 59 commonheads is the list of heads of common.
60 60
61 61 The sets are computed on demand from the heads, unless provided upfront
62 62 by discovery.'''
63 63
64 64 def __init__(self, revlog, commonheads, missingheads):
65 65 self.commonheads = commonheads
66 66 self.missingheads = missingheads
67 67 self._revlog = revlog
68 68 self._common = None
69 69 self._missing = None
70 70 self.excluded = []
71 71
72 72 def _computecommonmissing(self):
73 73 sets = self._revlog.findcommonmissing(self.commonheads,
74 74 self.missingheads)
75 75 self._common, self._missing = sets
76 76
77 77 @util.propertycache
78 78 def common(self):
79 79 if self._common is None:
80 80 self._computecommonmissing()
81 81 return self._common
82 82
83 83 @util.propertycache
84 84 def missing(self):
85 85 if self._missing is None:
86 86 self._computecommonmissing()
87 87 return self._missing
88 88
89 89 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
90 90 commoninc=None, portable=False):
91 91 '''Return an outgoing instance to identify the nodes present in repo but
92 92 not in other.
93 93
94 94 If onlyheads is given, only nodes ancestral to nodes in onlyheads
95 95 (inclusive) are included. If you already know the local repo's heads,
96 96 passing them in onlyheads is faster than letting them be recomputed here.
97 97
98 98 If commoninc is given, it must be the result of a prior call to
99 99 findcommonincoming(repo, other, force) to avoid recomputing it here.
100 100
101 101 If portable is given, compute more conservative common and missingheads,
102 102 to make bundles created from the instance more portable.'''
103 103 # declare an empty outgoing object to be filled later
104 104 og = outgoing(repo.changelog, None, None)
105 105
106 106 # get common set if not provided
107 107 if commoninc is None:
108 108 commoninc = findcommonincoming(repo, other, force=force)
109 109 og.commonheads, _any, _hds = commoninc
110 110
111 111 # compute outgoing
112 112 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
113 113 if not mayexclude:
114 114 og.missingheads = onlyheads or repo.heads()
115 115 elif onlyheads is None:
116 116 # use visible heads as it should be cached
117 117 og.missingheads = visibleheads(repo)
118 118 og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
119 119 else:
120 120 # compute common, missing and exclude secret stuff
121 121 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
122 122 og._common, allmissing = sets
123 123 og._missing = missing = []
124 124 og.excluded = excluded = []
125 125 for node in allmissing:
126 126 ctx = repo[node]
127 127 if ctx.phase() >= phases.secret or ctx.extinct():
128 128 excluded.append(node)
129 129 else:
130 130 missing.append(node)
131 131 if len(missing) == len(allmissing):
132 132 missingheads = onlyheads
133 133 else: # update missing heads
134 134 missingheads = phases.newheads(repo, onlyheads, excluded)
135 135 og.missingheads = missingheads
136 136 if portable:
137 137 # recompute common and missingheads as if -r<rev> had been given for
138 138 # each head of missing, and --base <rev> for each head of the proper
139 139 # ancestors of missing
140 140 og._computecommonmissing()
141 141 cl = repo.changelog
142 142 missingrevs = set(cl.rev(n) for n in og._missing)
143 143 og._common = set(cl.ancestors(missingrevs)) - missingrevs
144 144 commonheads = set(og.commonheads)
145 145 og.missingheads = [h for h in og.missingheads if h not in commonheads]
146 146
147 147 return og
148 148
149 149 def _headssummary(repo, remote, outgoing):
150 150 """compute a summary of branch and heads status before and after push
151 151
152 152 return {'branch': ([remoteheads], [newheads], [unsyncedheads])} mapping
153 153
154 154 - branch: the branch name
155 155 - remoteheads: the list of remote heads known locally
156 156 None is the branch is new
157 157 - newheads: the new remote heads (known locally) with outgoing pushed
158 158 - unsyncedheads: the list of remote heads unknown locally.
159 159 """
160 160 cl = repo.changelog
161 161 headssum = {}
162 162 # A. Create set of branches involved in the push.
163 163 branches = set(repo[n].branch() for n in outgoing.missing)
164 164 remotemap = remote.branchmap()
165 165 newbranches = branches - set(remotemap)
166 166 branches.difference_update(newbranches)
167 167
168 168 # A. register remote heads
169 169 remotebranches = set()
170 170 for branch, heads in remote.branchmap().iteritems():
171 171 remotebranches.add(branch)
172 172 known = []
173 173 unsynced = []
174 174 for h in heads:
175 175 if h in cl.nodemap:
176 176 known.append(h)
177 177 else:
178 178 unsynced.append(h)
179 179 headssum[branch] = (known, list(known), unsynced)
180 180 # B. add new branch data
181 181 missingctx = list(repo[n] for n in outgoing.missing)
182 182 touchedbranches = set()
183 183 for ctx in missingctx:
184 184 branch = ctx.branch()
185 185 touchedbranches.add(branch)
186 186 if branch not in headssum:
187 187 headssum[branch] = (None, [], [])
188 188
189 189 # C drop data about untouched branches:
190 190 for branch in remotebranches - touchedbranches:
191 191 del headssum[branch]
192 192
193 193 # D. Update newmap with outgoing changes.
194 194 # This will possibly add new heads and remove existing ones.
195 195 newmap = dict((branch, heads[1]) for branch, heads in headssum.iteritems()
196 196 if heads[0] is not None)
197 197 repo._updatebranchcache(newmap, missingctx)
198 198 for branch, newheads in newmap.iteritems():
199 199 headssum[branch][1][:] = newheads
200 200 return headssum
201 201
202 202 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
203 203 """Compute branchmapsummary for repo without branchmap support"""
204 204
205 205 cl = repo.changelog
206 206 # 1-4b. old servers: Check for new topological heads.
207 207 # Construct {old,new}map with branch = None (topological branch).
208 208 # (code based on _updatebranchcache)
209 209 oldheads = set(h for h in remoteheads if h in cl.nodemap)
210 210 # all nodes in outgoing.missing are children of either:
211 211 # - an element of oldheads
212 212 # - another element of outgoing.missing
213 213 # - nullrev
214 214 # This explains why the new head are very simple to compute.
215 215 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
216 216 newheads = list(c.node() for c in r)
217 217 unsynced = inc and set([None]) or set()
218 218 return {None: (oldheads, newheads, unsynced)}
219 219
220 220 def checkheads(repo, remote, outgoing, remoteheads, newbranch=False, inc=False):
221 221 """Check that a push won't add any outgoing head
222 222
223 223 raise Abort error and display ui message as needed.
224 224 """
225 225 # Check for each named branch if we're creating new remote heads.
226 226 # To be a remote head after push, node must be either:
227 227 # - unknown locally
228 228 # - a local outgoing head descended from update
229 229 # - a remote head that's known locally and not
230 230 # ancestral to an outgoing head
231 231 if remoteheads == [nullid]:
232 232 # remote is empty, nothing to check.
233 233 return
234 234
235 235 if remote.capable('branchmap'):
236 236 headssum = _headssummary(repo, remote, outgoing)
237 237 else:
238 238 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
239 239 newbranches = [branch for branch, heads in headssum.iteritems()
240 240 if heads[0] is None]
241 241 # 1. Check for new branches on the remote.
242 242 if newbranches and not newbranch: # new branch requires --new-branch
243 243 branchnames = ', '.join(sorted(newbranches))
244 244 raise util.Abort(_("push creates new remote branches: %s!")
245 245 % branchnames,
246 246 hint=_("use 'hg push --new-branch' to create"
247 247 " new remote branches"))
248 248
249 249 # 2 compute newly pushed bookmarks. We
250 250 # we don't warned about bookmarked heads.
251 251 localbookmarks = repo._bookmarks
252 252 remotebookmarks = remote.listkeys('bookmarks')
253 253 bookmarkedheads = set()
254 254 for bm in localbookmarks:
255 255 rnode = remotebookmarks.get(bm)
256 256 if rnode and rnode in repo:
257 257 lctx, rctx = repo[bm], repo[rnode]
258 258 if bookmarks.validdest(repo, rctx, lctx):
259 259 bookmarkedheads.add(lctx.node())
260 260
261 261 # 3. Check for new heads.
262 262 # If there are more heads after the push than before, a suitable
263 263 # error message, depending on unsynced status, is displayed.
264 264 error = None
265 265 unsynced = False
266 266 allmissing = set(outgoing.missing)
267 267 allfuturecommon = set(c.node() for c in repo.set('%ld', outgoing.common))
268 268 allfuturecommon.update(allmissing)
269 269 for branch, heads in headssum.iteritems():
270 270 if heads[0] is None:
271 271 # Maybe we should abort if we push more that one head
272 272 # for new branches ?
273 273 continue
274 274 candidate_newhs = set(heads[1])
275 275 # add unsynced data
276 276 oldhs = set(heads[0])
277 277 oldhs.update(heads[2])
278 278 candidate_newhs.update(heads[2])
279 279 dhs = None
280 280 discardedheads = set()
281 281 if repo.obsstore:
282 282 # remove future heads which are actually obsolete by another
283 283 # pushed element:
284 284 #
285 285 # XXX as above, There are several cases this case does not handle
286 286 # XXX properly
287 287 #
288 288 # (1) if <nh> is public, it won't be affected by obsolete marker
289 289 # and a new is created
290 290 #
291 291 # (2) if the new heads have ancestors which are not obsolete and
292 292 # not ancestors of any other heads we will have a new head too.
293 293 #
294 294 # This two case will be easy to handle for know changeset but much
295 295 # more tricky for unsynced changes.
296 296 newhs = set()
297 297 for nh in candidate_newhs:
298 298 if nh in repo and repo[nh].phase() <= phases.public:
299 299 newhs.add(nh)
300 300 else:
301 for suc in obsolete.anysuccessors(repo.obsstore, nh):
301 for suc in obsolete.allsuccessors(repo.obsstore, nh):
302 302 if suc != nh and suc in allfuturecommon:
303 303 discardedheads.add(nh)
304 304 break
305 305 else:
306 306 newhs.add(nh)
307 307 else:
308 308 newhs = candidate_newhs
309 309 if [h for h in heads[2] if h not in discardedheads]:
310 310 unsynced = True
311 311 if len(newhs) > len(oldhs):
312 312 # strip updates to existing remote heads from the new heads list
313 313 dhs = list(newhs - bookmarkedheads - oldhs)
314 314 if dhs:
315 315 if error is None:
316 316 if branch not in ('default', None):
317 317 error = _("push creates new remote head %s "
318 318 "on branch '%s'!") % (short(dhs[0]), branch)
319 319 else:
320 320 error = _("push creates new remote head %s!"
321 321 ) % short(dhs[0])
322 322 if heads[2]: # unsynced
323 323 hint = _("you should pull and merge or "
324 324 "use push -f to force")
325 325 else:
326 326 hint = _("did you forget to merge? "
327 327 "use push -f to force")
328 328 if branch is not None:
329 329 repo.ui.note(_("new remote heads on branch '%s'\n") % branch)
330 330 for h in dhs:
331 331 repo.ui.note(_("new remote head %s\n") % short(h))
332 332 if error:
333 333 raise util.Abort(error, hint=hint)
334 334
335 335 # 6. Check for unsynced changes on involved branches.
336 336 if unsynced:
337 337 repo.ui.warn(_("note: unsynced remote changes!\n"))
338 338
339 339 def visibleheads(repo):
340 340 """return the set of visible head of this repo"""
341 341 # XXX we want a cache on this
342 342 sroots = repo._phasecache.phaseroots[phases.secret]
343 343 if sroots or repo.obsstore:
344 344 # XXX very slow revset. storing heads or secret "boundary"
345 345 # would help.
346 346 revset = repo.set('heads(not (%ln:: + extinct()))', sroots)
347 347
348 348 vheads = [ctx.node() for ctx in revset]
349 349 if not vheads:
350 350 vheads.append(nullid)
351 351 else:
352 352 vheads = repo.heads()
353 353 return vheads
354 354
355 355
356 356 def visiblebranchmap(repo):
357 357 """return a branchmap for the visible set"""
358 358 # XXX Recomputing this data on the fly is very slow. We should build a
359 359 # XXX cached version while computing the standard branchmap version.
360 360 sroots = repo._phasecache.phaseroots[phases.secret]
361 361 if sroots or repo.obsstore:
362 362 vbranchmap = {}
363 363 for branch, nodes in repo.branchmap().iteritems():
364 364 # search for secret heads.
365 365 for n in nodes:
366 366 if repo[n].phase() >= phases.secret:
367 367 nodes = None
368 368 break
369 369 # if secret heads were found we must compute them again
370 370 if nodes is None:
371 371 s = repo.set('heads(branch(%s) - secret() - extinct())',
372 372 branch)
373 373 nodes = [c.node() for c in s]
374 374 vbranchmap[branch] = nodes
375 375 else:
376 376 vbranchmap = repo.branchmap()
377 377 return vbranchmap
@@ -1,467 +1,467 b''
1 1 # obsolete.py - obsolete markers handling
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Obsolete markers handling
10 10
11 11 An obsolete marker maps an old changeset to a list of new
12 12 changesets. If the list of new changesets is empty, the old changeset
13 13 is said to be "killed". Otherwise, the old changeset is being
14 14 "replaced" by the new changesets.
15 15
16 16 Obsolete markers can be used to record and distribute changeset graph
17 17 transformations performed by history rewriting operations, and help
18 18 building new tools to reconciliate conflicting rewriting actions. To
19 19 facilitate conflicts resolution, markers include various annotations
20 20 besides old and news changeset identifiers, such as creation date or
21 21 author name.
22 22
23 23 The old obsoleted changeset is called "precursor" and possible replacements are
24 24 called "successors". Markers that used changeset X as a precursors are called
25 25 "successor markers of X" because they hold information about the successors of
26 26 X. Markers that use changeset Y as a successors are call "precursor markers of
27 27 Y" because they hold information about the precursors of Y.
28 28
29 29 Examples:
30 30
31 31 - When changeset A is replacement by a changeset A', one marker is stored:
32 32
33 33 (A, (A'))
34 34
35 35 - When changesets A and B are folded into a new changeset C two markers are
36 36 stored:
37 37
38 38 (A, (C,)) and (B, (C,))
39 39
40 40 - When changeset A is simply "pruned" from the graph, a marker in create:
41 41
42 42 (A, ())
43 43
44 44 - When changeset A is split into B and C, a single marker are used:
45 45
46 46 (A, (C, C))
47 47
48 48 We use a single marker to distinct the "split" case from the "divergence"
49 49 case. If two independants operation rewrite the same changeset A in to A' and
50 50 A'' when have an error case: divergent rewriting. We can detect it because
51 51 two markers will be created independently:
52 52
53 53 (A, (B,)) and (A, (C,))
54 54
55 55 Format
56 56 ------
57 57
58 58 Markers are stored in an append-only file stored in
59 59 '.hg/store/obsstore'.
60 60
61 61 The file starts with a version header:
62 62
63 63 - 1 unsigned byte: version number, starting at zero.
64 64
65 65
66 66 The header is followed by the markers. Each marker is made of:
67 67
68 68 - 1 unsigned byte: number of new changesets "R", could be zero.
69 69
70 70 - 1 unsigned 32-bits integer: metadata size "M" in bytes.
71 71
72 72 - 1 byte: a bit field. It is reserved for flags used in obsolete
73 73 markers common operations, to avoid repeated decoding of metadata
74 74 entries.
75 75
76 76 - 20 bytes: obsoleted changeset identifier.
77 77
78 78 - N*20 bytes: new changesets identifiers.
79 79
80 80 - M bytes: metadata as a sequence of nul-terminated strings. Each
81 81 string contains a key and a value, separated by a color ':', without
82 82 additional encoding. Keys cannot contain '\0' or ':' and values
83 83 cannot contain '\0'.
84 84 """
85 85 import struct
86 86 import util, base85, node
87 87 from i18n import _
88 88
89 89 _pack = struct.pack
90 90 _unpack = struct.unpack
91 91
92 92 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
93 93
94 94 # the obsolete feature is not mature enough to be enabled by default.
95 95 # you have to rely on third party extension extension to enable this.
96 96 _enabled = False
97 97
98 98 # data used for parsing and writing
99 99 _fmversion = 0
100 100 _fmfixed = '>BIB20s'
101 101 _fmnode = '20s'
102 102 _fmfsize = struct.calcsize(_fmfixed)
103 103 _fnodesize = struct.calcsize(_fmnode)
104 104
105 105 def _readmarkers(data):
106 106 """Read and enumerate markers from raw data"""
107 107 off = 0
108 108 diskversion = _unpack('>B', data[off:off + 1])[0]
109 109 off += 1
110 110 if diskversion != _fmversion:
111 111 raise util.Abort(_('parsing obsolete marker: unknown version %r')
112 112 % diskversion)
113 113
114 114 # Loop on markers
115 115 l = len(data)
116 116 while off + _fmfsize <= l:
117 117 # read fixed part
118 118 cur = data[off:off + _fmfsize]
119 119 off += _fmfsize
120 120 nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur)
121 121 # read replacement
122 122 sucs = ()
123 123 if nbsuc:
124 124 s = (_fnodesize * nbsuc)
125 125 cur = data[off:off + s]
126 126 sucs = _unpack(_fmnode * nbsuc, cur)
127 127 off += s
128 128 # read metadata
129 129 # (metadata will be decoded on demand)
130 130 metadata = data[off:off + mdsize]
131 131 if len(metadata) != mdsize:
132 132 raise util.Abort(_('parsing obsolete marker: metadata is too '
133 133 'short, %d bytes expected, got %d')
134 134 % (mdsize, len(metadata)))
135 135 off += mdsize
136 136 yield (pre, sucs, flags, metadata)
137 137
138 138 def encodemeta(meta):
139 139 """Return encoded metadata string to string mapping.
140 140
141 141 Assume no ':' in key and no '\0' in both key and value."""
142 142 for key, value in meta.iteritems():
143 143 if ':' in key or '\0' in key:
144 144 raise ValueError("':' and '\0' are forbidden in metadata key'")
145 145 if '\0' in value:
146 146 raise ValueError("':' are forbidden in metadata value'")
147 147 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
148 148
149 149 def decodemeta(data):
150 150 """Return string to string dictionary from encoded version."""
151 151 d = {}
152 152 for l in data.split('\0'):
153 153 if l:
154 154 key, value = l.split(':')
155 155 d[key] = value
156 156 return d
157 157
158 158 class marker(object):
159 159 """Wrap obsolete marker raw data"""
160 160
161 161 def __init__(self, repo, data):
162 162 # the repo argument will be used to create changectx in later version
163 163 self._repo = repo
164 164 self._data = data
165 165 self._decodedmeta = None
166 166
167 167 def precnode(self):
168 168 """Precursor changeset node identifier"""
169 169 return self._data[0]
170 170
171 171 def succnodes(self):
172 172 """List of successor changesets node identifiers"""
173 173 return self._data[1]
174 174
175 175 def metadata(self):
176 176 """Decoded metadata dictionary"""
177 177 if self._decodedmeta is None:
178 178 self._decodedmeta = decodemeta(self._data[3])
179 179 return self._decodedmeta
180 180
181 181 def date(self):
182 182 """Creation date as (unixtime, offset)"""
183 183 parts = self.metadata()['date'].split(' ')
184 184 return (float(parts[0]), int(parts[1]))
185 185
186 186 class obsstore(object):
187 187 """Store obsolete markers
188 188
189 189 Markers can be accessed with two mappings:
190 190 - precursors[x] -> set(markers on precursors edges of x)
191 191 - successors[x] -> set(markers on successors edges of x)
192 192 """
193 193
194 194 def __init__(self, sopener):
195 195 # caches for various obsolescence related cache
196 196 self.caches = {}
197 197 self._all = []
198 198 # new markers to serialize
199 199 self.precursors = {}
200 200 self.successors = {}
201 201 self.sopener = sopener
202 202 data = sopener.tryread('obsstore')
203 203 if data:
204 204 self._load(_readmarkers(data))
205 205
206 206 def __iter__(self):
207 207 return iter(self._all)
208 208
209 209 def __nonzero__(self):
210 210 return bool(self._all)
211 211
212 212 def create(self, transaction, prec, succs=(), flag=0, metadata=None):
213 213 """obsolete: add a new obsolete marker
214 214
215 215 * ensuring it is hashable
216 216 * check mandatory metadata
217 217 * encode metadata
218 218 """
219 219 if metadata is None:
220 220 metadata = {}
221 221 if len(prec) != 20:
222 222 raise ValueError(prec)
223 223 for succ in succs:
224 224 if len(succ) != 20:
225 225 raise ValueError(succ)
226 226 marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata))
227 227 self.add(transaction, [marker])
228 228
229 229 def add(self, transaction, markers):
230 230 """Add new markers to the store
231 231
232 232 Take care of filtering duplicate.
233 233 Return the number of new marker."""
234 234 if not _enabled:
235 235 raise util.Abort('obsolete feature is not enabled on this repo')
236 236 new = [m for m in markers if m not in self._all]
237 237 if new:
238 238 f = self.sopener('obsstore', 'ab')
239 239 try:
240 240 # Whether the file's current position is at the begin or at
241 241 # the end after opening a file for appending is implementation
242 242 # defined. So we must seek to the end before calling tell(),
243 243 # or we may get a zero offset for non-zero sized files on
244 244 # some platforms (issue3543).
245 245 f.seek(0, _SEEK_END)
246 246 offset = f.tell()
247 247 transaction.add('obsstore', offset)
248 248 # offset == 0: new file - add the version header
249 249 for bytes in _encodemarkers(new, offset == 0):
250 250 f.write(bytes)
251 251 finally:
252 252 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
253 253 # call 'filecacheentry.refresh()' here
254 254 f.close()
255 255 self._load(new)
256 256 # new marker *may* have changed several set. invalidate the cache.
257 257 self.caches.clear()
258 258 return len(new)
259 259
260 260 def mergemarkers(self, transaction, data):
261 261 markers = _readmarkers(data)
262 262 self.add(transaction, markers)
263 263
264 264 def _load(self, markers):
265 265 for mark in markers:
266 266 self._all.append(mark)
267 267 pre, sucs = mark[:2]
268 268 self.successors.setdefault(pre, set()).add(mark)
269 269 for suc in sucs:
270 270 self.precursors.setdefault(suc, set()).add(mark)
271 271 if node.nullid in self.precursors:
272 272 raise util.Abort(_('bad obsolescence marker detected: '
273 273 'invalid successors nullid'))
274 274
275 275 def _encodemarkers(markers, addheader=False):
276 276 # Kept separate from flushmarkers(), it will be reused for
277 277 # markers exchange.
278 278 if addheader:
279 279 yield _pack('>B', _fmversion)
280 280 for marker in markers:
281 281 yield _encodeonemarker(marker)
282 282
283 283
284 284 def _encodeonemarker(marker):
285 285 pre, sucs, flags, metadata = marker
286 286 nbsuc = len(sucs)
287 287 format = _fmfixed + (_fmnode * nbsuc)
288 288 data = [nbsuc, len(metadata), flags, pre]
289 289 data.extend(sucs)
290 290 return _pack(format, *data) + metadata
291 291
292 292 # arbitrary picked to fit into 8K limit from HTTP server
293 293 # you have to take in account:
294 294 # - the version header
295 295 # - the base85 encoding
296 296 _maxpayload = 5300
297 297
298 298 def listmarkers(repo):
299 299 """List markers over pushkey"""
300 300 if not repo.obsstore:
301 301 return {}
302 302 keys = {}
303 303 parts = []
304 304 currentlen = _maxpayload * 2 # ensure we create a new part
305 305 for marker in repo.obsstore:
306 306 nextdata = _encodeonemarker(marker)
307 307 if (len(nextdata) + currentlen > _maxpayload):
308 308 currentpart = []
309 309 currentlen = 0
310 310 parts.append(currentpart)
311 311 currentpart.append(nextdata)
312 312 currentlen += len(nextdata)
313 313 for idx, part in enumerate(reversed(parts)):
314 314 data = ''.join([_pack('>B', _fmversion)] + part)
315 315 keys['dump%i' % idx] = base85.b85encode(data)
316 316 return keys
317 317
318 318 def pushmarker(repo, key, old, new):
319 319 """Push markers over pushkey"""
320 320 if not key.startswith('dump'):
321 321 repo.ui.warn(_('unknown key: %r') % key)
322 322 return 0
323 323 if old:
324 324 repo.ui.warn(_('unexpected old value') % key)
325 325 return 0
326 326 data = base85.b85decode(new)
327 327 lock = repo.lock()
328 328 try:
329 329 tr = repo.transaction('pushkey: obsolete markers')
330 330 try:
331 331 repo.obsstore.mergemarkers(tr, data)
332 332 tr.close()
333 333 return 1
334 334 finally:
335 335 tr.release()
336 336 finally:
337 337 lock.release()
338 338
339 339 def allmarkers(repo):
340 340 """all obsolete markers known in a repository"""
341 341 for markerdata in repo.obsstore:
342 342 yield marker(repo, markerdata)
343 343
344 344 def precursormarkers(ctx):
345 345 """obsolete marker marking this changeset as a successors"""
346 346 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
347 347 yield marker(ctx._repo, data)
348 348
349 349 def successormarkers(ctx):
350 350 """obsolete marker making this changeset obsolete"""
351 351 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
352 352 yield marker(ctx._repo, data)
353 353
354 def anysuccessors(obsstore, node):
354 def allsuccessors(obsstore, node):
355 355 """Yield every successor of <node>
356 356
357 357 This is a linear yield unsuited to detecting split changesets."""
358 358 remaining = set([node])
359 359 seen = set(remaining)
360 360 while remaining:
361 361 current = remaining.pop()
362 362 yield current
363 363 for mark in obsstore.successors.get(current, ()):
364 364 for suc in mark[1]:
365 365 if suc not in seen:
366 366 seen.add(suc)
367 367 remaining.add(suc)
368 368
369 369 # mapping of 'set-name' -> <function to computer this set>
370 370 cachefuncs = {}
371 371 def cachefor(name):
372 372 """Decorator to register a function as computing the cache for a set"""
373 373 def decorator(func):
374 374 assert name not in cachefuncs
375 375 cachefuncs[name] = func
376 376 return func
377 377 return decorator
378 378
379 379 def getrevs(repo, name):
380 380 """Return the set of revision that belong to the <name> set
381 381
382 382 Such access may compute the set and cache it for future use"""
383 383 if not repo.obsstore:
384 384 return ()
385 385 if name not in repo.obsstore.caches:
386 386 repo.obsstore.caches[name] = cachefuncs[name](repo)
387 387 return repo.obsstore.caches[name]
388 388
389 389 # To be simple we need to invalidate obsolescence cache when:
390 390 #
391 391 # - new changeset is added:
392 392 # - public phase is changed
393 393 # - obsolescence marker are added
394 394 # - strip is used a repo
395 395 def clearobscaches(repo):
396 396 """Remove all obsolescence related cache from a repo
397 397
398 398 This remove all cache in obsstore is the obsstore already exist on the
399 399 repo.
400 400
401 401 (We could be smarter here given the exact event that trigger the cache
402 402 clearing)"""
403 403 # only clear cache is there is obsstore data in this repo
404 404 if 'obsstore' in repo._filecache:
405 405 repo.obsstore.caches.clear()
406 406
407 407 @cachefor('obsolete')
408 408 def _computeobsoleteset(repo):
409 409 """the set of obsolete revisions"""
410 410 obs = set()
411 411 nm = repo.changelog.nodemap
412 412 for node in repo.obsstore.successors:
413 413 rev = nm.get(node)
414 414 if rev is not None:
415 415 obs.add(rev)
416 416 return set(repo.revs('%ld - public()', obs))
417 417
418 418 @cachefor('unstable')
419 419 def _computeunstableset(repo):
420 420 """the set of non obsolete revisions with obsolete parents"""
421 421 return set(repo.revs('(obsolete()::) - obsolete()'))
422 422
423 423 @cachefor('suspended')
424 424 def _computesuspendedset(repo):
425 425 """the set of obsolete parents with non obsolete descendants"""
426 426 return set(repo.revs('obsolete() and obsolete()::unstable()'))
427 427
428 428 @cachefor('extinct')
429 429 def _computeextinctset(repo):
430 430 """the set of obsolete parents without non obsolete descendants"""
431 431 return set(repo.revs('obsolete() - obsolete()::unstable()'))
432 432
433 433 def createmarkers(repo, relations, flag=0, metadata=None):
434 434 """Add obsolete markers between changesets in a repo
435 435
436 436 <relations> must be an iterable of (<old>, (<new>, ...)) tuple.
437 437 `old` and `news` are changectx.
438 438
439 439 Trying to obsolete a public changeset will raise an exception.
440 440
441 441 Current user and date are used except if specified otherwise in the
442 442 metadata attribute.
443 443
444 444 This function operates within a transaction of its own, but does
445 445 not take any lock on the repo.
446 446 """
447 447 # prepare metadata
448 448 if metadata is None:
449 449 metadata = {}
450 450 if 'date' not in metadata:
451 451 metadata['date'] = '%i %i' % util.makedate()
452 452 if 'user' not in metadata:
453 453 metadata['user'] = repo.ui.username()
454 454 tr = repo.transaction('add-obsolescence-marker')
455 455 try:
456 456 for prec, sucs in relations:
457 457 if not prec.mutable():
458 458 raise util.Abort("cannot obsolete immutable changeset: %s"
459 459 % prec)
460 460 nprec = prec.node()
461 461 nsucs = tuple(s.node() for s in sucs)
462 462 if nprec in nsucs:
463 463 raise util.Abort("changeset %s cannot obsolete itself" % prec)
464 464 repo.obsstore.create(tr, nprec, nsucs, flag, metadata)
465 465 tr.close()
466 466 finally:
467 467 tr.release()
General Comments 0
You need to be logged in to leave comments. Login now