##// END OF EJS Templates
obsolete: rename `anysuccessors` into `allsuccessors`...
Pierre-Yves David -
r17826:46e1a4e2 default
parent child Browse files
Show More
@@ -1,275 +1,275 b''
1 # Mercurial bookmark support code
1 # Mercurial bookmark support code
2 #
2 #
3 # Copyright 2008 David Soria Parra <dsp@php.net>
3 # Copyright 2008 David Soria Parra <dsp@php.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from mercurial.i18n import _
8 from mercurial.i18n import _
9 from mercurial.node import hex
9 from mercurial.node import hex
10 from mercurial import encoding, error, util, obsolete, phases
10 from mercurial import encoding, error, util, obsolete, phases
11 import errno, os
11 import errno, os
12
12
13 def read(repo):
13 def read(repo):
14 '''Parse .hg/bookmarks file and return a dictionary
14 '''Parse .hg/bookmarks file and return a dictionary
15
15
16 Bookmarks are stored as {HASH}\\s{NAME}\\n (localtags format) values
16 Bookmarks are stored as {HASH}\\s{NAME}\\n (localtags format) values
17 in the .hg/bookmarks file.
17 in the .hg/bookmarks file.
18 Read the file and return a (name=>nodeid) dictionary
18 Read the file and return a (name=>nodeid) dictionary
19 '''
19 '''
20 bookmarks = {}
20 bookmarks = {}
21 try:
21 try:
22 for line in repo.opener('bookmarks'):
22 for line in repo.opener('bookmarks'):
23 line = line.strip()
23 line = line.strip()
24 if not line:
24 if not line:
25 continue
25 continue
26 if ' ' not in line:
26 if ' ' not in line:
27 repo.ui.warn(_('malformed line in .hg/bookmarks: %r\n') % line)
27 repo.ui.warn(_('malformed line in .hg/bookmarks: %r\n') % line)
28 continue
28 continue
29 sha, refspec = line.split(' ', 1)
29 sha, refspec = line.split(' ', 1)
30 refspec = encoding.tolocal(refspec)
30 refspec = encoding.tolocal(refspec)
31 try:
31 try:
32 bookmarks[refspec] = repo.changelog.lookup(sha)
32 bookmarks[refspec] = repo.changelog.lookup(sha)
33 except LookupError:
33 except LookupError:
34 pass
34 pass
35 except IOError, inst:
35 except IOError, inst:
36 if inst.errno != errno.ENOENT:
36 if inst.errno != errno.ENOENT:
37 raise
37 raise
38 return bookmarks
38 return bookmarks
39
39
40 def readcurrent(repo):
40 def readcurrent(repo):
41 '''Get the current bookmark
41 '''Get the current bookmark
42
42
43 If we use gittishsh branches we have a current bookmark that
43 If we use gittishsh branches we have a current bookmark that
44 we are on. This function returns the name of the bookmark. It
44 we are on. This function returns the name of the bookmark. It
45 is stored in .hg/bookmarks.current
45 is stored in .hg/bookmarks.current
46 '''
46 '''
47 mark = None
47 mark = None
48 try:
48 try:
49 file = repo.opener('bookmarks.current')
49 file = repo.opener('bookmarks.current')
50 except IOError, inst:
50 except IOError, inst:
51 if inst.errno != errno.ENOENT:
51 if inst.errno != errno.ENOENT:
52 raise
52 raise
53 return None
53 return None
54 try:
54 try:
55 # No readline() in osutil.posixfile, reading everything is cheap
55 # No readline() in osutil.posixfile, reading everything is cheap
56 mark = encoding.tolocal((file.readlines() or [''])[0])
56 mark = encoding.tolocal((file.readlines() or [''])[0])
57 if mark == '' or mark not in repo._bookmarks:
57 if mark == '' or mark not in repo._bookmarks:
58 mark = None
58 mark = None
59 finally:
59 finally:
60 file.close()
60 file.close()
61 return mark
61 return mark
62
62
63 def write(repo):
63 def write(repo):
64 '''Write bookmarks
64 '''Write bookmarks
65
65
66 Write the given bookmark => hash dictionary to the .hg/bookmarks file
66 Write the given bookmark => hash dictionary to the .hg/bookmarks file
67 in a format equal to those of localtags.
67 in a format equal to those of localtags.
68
68
69 We also store a backup of the previous state in undo.bookmarks that
69 We also store a backup of the previous state in undo.bookmarks that
70 can be copied back on rollback.
70 can be copied back on rollback.
71 '''
71 '''
72 refs = repo._bookmarks
72 refs = repo._bookmarks
73
73
74 if repo._bookmarkcurrent not in refs:
74 if repo._bookmarkcurrent not in refs:
75 setcurrent(repo, None)
75 setcurrent(repo, None)
76
76
77 wlock = repo.wlock()
77 wlock = repo.wlock()
78 try:
78 try:
79
79
80 file = repo.opener('bookmarks', 'w', atomictemp=True)
80 file = repo.opener('bookmarks', 'w', atomictemp=True)
81 for refspec, node in refs.iteritems():
81 for refspec, node in refs.iteritems():
82 file.write("%s %s\n" % (hex(node), encoding.fromlocal(refspec)))
82 file.write("%s %s\n" % (hex(node), encoding.fromlocal(refspec)))
83 file.close()
83 file.close()
84
84
85 # touch 00changelog.i so hgweb reloads bookmarks (no lock needed)
85 # touch 00changelog.i so hgweb reloads bookmarks (no lock needed)
86 try:
86 try:
87 os.utime(repo.sjoin('00changelog.i'), None)
87 os.utime(repo.sjoin('00changelog.i'), None)
88 except OSError:
88 except OSError:
89 pass
89 pass
90
90
91 finally:
91 finally:
92 wlock.release()
92 wlock.release()
93
93
94 def setcurrent(repo, mark):
94 def setcurrent(repo, mark):
95 '''Set the name of the bookmark that we are currently on
95 '''Set the name of the bookmark that we are currently on
96
96
97 Set the name of the bookmark that we are on (hg update <bookmark>).
97 Set the name of the bookmark that we are on (hg update <bookmark>).
98 The name is recorded in .hg/bookmarks.current
98 The name is recorded in .hg/bookmarks.current
99 '''
99 '''
100 current = repo._bookmarkcurrent
100 current = repo._bookmarkcurrent
101 if current == mark:
101 if current == mark:
102 return
102 return
103
103
104 if mark not in repo._bookmarks:
104 if mark not in repo._bookmarks:
105 mark = ''
105 mark = ''
106
106
107 wlock = repo.wlock()
107 wlock = repo.wlock()
108 try:
108 try:
109 file = repo.opener('bookmarks.current', 'w', atomictemp=True)
109 file = repo.opener('bookmarks.current', 'w', atomictemp=True)
110 file.write(encoding.fromlocal(mark))
110 file.write(encoding.fromlocal(mark))
111 file.close()
111 file.close()
112 finally:
112 finally:
113 wlock.release()
113 wlock.release()
114 repo._bookmarkcurrent = mark
114 repo._bookmarkcurrent = mark
115
115
116 def unsetcurrent(repo):
116 def unsetcurrent(repo):
117 wlock = repo.wlock()
117 wlock = repo.wlock()
118 try:
118 try:
119 try:
119 try:
120 util.unlink(repo.join('bookmarks.current'))
120 util.unlink(repo.join('bookmarks.current'))
121 repo._bookmarkcurrent = None
121 repo._bookmarkcurrent = None
122 except OSError, inst:
122 except OSError, inst:
123 if inst.errno != errno.ENOENT:
123 if inst.errno != errno.ENOENT:
124 raise
124 raise
125 finally:
125 finally:
126 wlock.release()
126 wlock.release()
127
127
128 def updatecurrentbookmark(repo, oldnode, curbranch):
128 def updatecurrentbookmark(repo, oldnode, curbranch):
129 try:
129 try:
130 return update(repo, oldnode, repo.branchtip(curbranch))
130 return update(repo, oldnode, repo.branchtip(curbranch))
131 except error.RepoLookupError:
131 except error.RepoLookupError:
132 if curbranch == "default": # no default branch!
132 if curbranch == "default": # no default branch!
133 return update(repo, oldnode, repo.lookup("tip"))
133 return update(repo, oldnode, repo.lookup("tip"))
134 else:
134 else:
135 raise util.Abort(_("branch %s not found") % curbranch)
135 raise util.Abort(_("branch %s not found") % curbranch)
136
136
137 def update(repo, parents, node):
137 def update(repo, parents, node):
138 marks = repo._bookmarks
138 marks = repo._bookmarks
139 update = False
139 update = False
140 cur = repo._bookmarkcurrent
140 cur = repo._bookmarkcurrent
141 if not cur:
141 if not cur:
142 return False
142 return False
143
143
144 toupdate = [b for b in marks if b.split('@', 1)[0] == cur.split('@', 1)[0]]
144 toupdate = [b for b in marks if b.split('@', 1)[0] == cur.split('@', 1)[0]]
145 for mark in toupdate:
145 for mark in toupdate:
146 if mark and marks[mark] in parents:
146 if mark and marks[mark] in parents:
147 old = repo[marks[mark]]
147 old = repo[marks[mark]]
148 new = repo[node]
148 new = repo[node]
149 if old.descendant(new) and mark == cur:
149 if old.descendant(new) and mark == cur:
150 marks[cur] = new.node()
150 marks[cur] = new.node()
151 update = True
151 update = True
152 if mark != cur:
152 if mark != cur:
153 del marks[mark]
153 del marks[mark]
154 if update:
154 if update:
155 repo._writebookmarks(marks)
155 repo._writebookmarks(marks)
156 return update
156 return update
157
157
158 def listbookmarks(repo):
158 def listbookmarks(repo):
159 # We may try to list bookmarks on a repo type that does not
159 # We may try to list bookmarks on a repo type that does not
160 # support it (e.g., statichttprepository).
160 # support it (e.g., statichttprepository).
161 marks = getattr(repo, '_bookmarks', {})
161 marks = getattr(repo, '_bookmarks', {})
162
162
163 d = {}
163 d = {}
164 for k, v in marks.iteritems():
164 for k, v in marks.iteritems():
165 # don't expose local divergent bookmarks
165 # don't expose local divergent bookmarks
166 if '@' not in k or k.endswith('@'):
166 if '@' not in k or k.endswith('@'):
167 d[k] = hex(v)
167 d[k] = hex(v)
168 return d
168 return d
169
169
170 def pushbookmark(repo, key, old, new):
170 def pushbookmark(repo, key, old, new):
171 w = repo.wlock()
171 w = repo.wlock()
172 try:
172 try:
173 marks = repo._bookmarks
173 marks = repo._bookmarks
174 if hex(marks.get(key, '')) != old:
174 if hex(marks.get(key, '')) != old:
175 return False
175 return False
176 if new == '':
176 if new == '':
177 del marks[key]
177 del marks[key]
178 else:
178 else:
179 if new not in repo:
179 if new not in repo:
180 return False
180 return False
181 marks[key] = repo[new].node()
181 marks[key] = repo[new].node()
182 write(repo)
182 write(repo)
183 return True
183 return True
184 finally:
184 finally:
185 w.release()
185 w.release()
186
186
187 def updatefromremote(ui, repo, remote, path):
187 def updatefromremote(ui, repo, remote, path):
188 ui.debug("checking for updated bookmarks\n")
188 ui.debug("checking for updated bookmarks\n")
189 rb = remote.listkeys('bookmarks')
189 rb = remote.listkeys('bookmarks')
190 changed = False
190 changed = False
191 for k in rb.keys():
191 for k in rb.keys():
192 if k in repo._bookmarks:
192 if k in repo._bookmarks:
193 nr, nl = rb[k], repo._bookmarks[k]
193 nr, nl = rb[k], repo._bookmarks[k]
194 if nr in repo:
194 if nr in repo:
195 cr = repo[nr]
195 cr = repo[nr]
196 cl = repo[nl]
196 cl = repo[nl]
197 if cl.rev() >= cr.rev():
197 if cl.rev() >= cr.rev():
198 continue
198 continue
199 if validdest(repo, cl, cr):
199 if validdest(repo, cl, cr):
200 repo._bookmarks[k] = cr.node()
200 repo._bookmarks[k] = cr.node()
201 changed = True
201 changed = True
202 ui.status(_("updating bookmark %s\n") % k)
202 ui.status(_("updating bookmark %s\n") % k)
203 else:
203 else:
204 if k == '@':
204 if k == '@':
205 kd = ''
205 kd = ''
206 else:
206 else:
207 kd = k
207 kd = k
208 # find a unique @ suffix
208 # find a unique @ suffix
209 for x in range(1, 100):
209 for x in range(1, 100):
210 n = '%s@%d' % (kd, x)
210 n = '%s@%d' % (kd, x)
211 if n not in repo._bookmarks:
211 if n not in repo._bookmarks:
212 break
212 break
213 # try to use an @pathalias suffix
213 # try to use an @pathalias suffix
214 # if an @pathalias already exists, we overwrite (update) it
214 # if an @pathalias already exists, we overwrite (update) it
215 for p, u in ui.configitems("paths"):
215 for p, u in ui.configitems("paths"):
216 if path == u:
216 if path == u:
217 n = '%s@%s' % (kd, p)
217 n = '%s@%s' % (kd, p)
218
218
219 repo._bookmarks[n] = cr.node()
219 repo._bookmarks[n] = cr.node()
220 changed = True
220 changed = True
221 ui.warn(_("divergent bookmark %s stored as %s\n") % (k, n))
221 ui.warn(_("divergent bookmark %s stored as %s\n") % (k, n))
222 elif rb[k] in repo:
222 elif rb[k] in repo:
223 # add remote bookmarks for changes we already have
223 # add remote bookmarks for changes we already have
224 repo._bookmarks[k] = repo[rb[k]].node()
224 repo._bookmarks[k] = repo[rb[k]].node()
225 changed = True
225 changed = True
226 ui.status(_("adding remote bookmark %s\n") % k)
226 ui.status(_("adding remote bookmark %s\n") % k)
227
227
228 if changed:
228 if changed:
229 write(repo)
229 write(repo)
230
230
231 def diff(ui, dst, src):
231 def diff(ui, dst, src):
232 ui.status(_("searching for changed bookmarks\n"))
232 ui.status(_("searching for changed bookmarks\n"))
233
233
234 smarks = src.listkeys('bookmarks')
234 smarks = src.listkeys('bookmarks')
235 dmarks = dst.listkeys('bookmarks')
235 dmarks = dst.listkeys('bookmarks')
236
236
237 diff = sorted(set(smarks) - set(dmarks))
237 diff = sorted(set(smarks) - set(dmarks))
238 for k in diff:
238 for k in diff:
239 mark = ui.debugflag and smarks[k] or smarks[k][:12]
239 mark = ui.debugflag and smarks[k] or smarks[k][:12]
240 ui.write(" %-25s %s\n" % (k, mark))
240 ui.write(" %-25s %s\n" % (k, mark))
241
241
242 if len(diff) <= 0:
242 if len(diff) <= 0:
243 ui.status(_("no changed bookmarks found\n"))
243 ui.status(_("no changed bookmarks found\n"))
244 return 1
244 return 1
245 return 0
245 return 0
246
246
247 def validdest(repo, old, new):
247 def validdest(repo, old, new):
248 """Is the new bookmark destination a valid update from the old one"""
248 """Is the new bookmark destination a valid update from the old one"""
249 if old == new:
249 if old == new:
250 # Old == new -> nothing to update.
250 # Old == new -> nothing to update.
251 return False
251 return False
252 elif not old:
252 elif not old:
253 # old is nullrev, anything is valid.
253 # old is nullrev, anything is valid.
254 # (new != nullrev has been excluded by the previous check)
254 # (new != nullrev has been excluded by the previous check)
255 return True
255 return True
256 elif repo.obsstore:
256 elif repo.obsstore:
257 # We only need this complicated logic if there is obsolescence
257 # We only need this complicated logic if there is obsolescence
258 # XXX will probably deserve an optimised revset.
258 # XXX will probably deserve an optimised revset.
259
259
260 validdests = set([old])
260 validdests = set([old])
261 plen = -1
261 plen = -1
262 # compute the whole set of successors or descendants
262 # compute the whole set of successors or descendants
263 while len(validdests) != plen:
263 while len(validdests) != plen:
264 plen = len(validdests)
264 plen = len(validdests)
265 succs = set(c.node() for c in validdests)
265 succs = set(c.node() for c in validdests)
266 for c in validdests:
266 for c in validdests:
267 if c.phase() > phases.public:
267 if c.phase() > phases.public:
268 # obsolescence marker does not apply to public changeset
268 # obsolescence marker does not apply to public changeset
269 succs.update(obsolete.anysuccessors(repo.obsstore,
269 succs.update(obsolete.allsuccessors(repo.obsstore,
270 c.node()))
270 c.node()))
271 validdests = set(repo.set('%ln::', succs))
271 validdests = set(repo.set('%ln::', succs))
272 validdests.remove(old)
272 validdests.remove(old)
273 return new in validdests
273 return new in validdests
274 else:
274 else:
275 return old.descendant(new)
275 return old.descendant(new)
@@ -1,377 +1,377 b''
1 # discovery.py - protocol changeset discovery functions
1 # discovery.py - protocol changeset discovery functions
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import nullid, short
8 from node import nullid, short
9 from i18n import _
9 from i18n import _
10 import util, setdiscovery, treediscovery, phases, obsolete, bookmarks
10 import util, setdiscovery, treediscovery, phases, obsolete, bookmarks
11
11
12 def findcommonincoming(repo, remote, heads=None, force=False):
12 def findcommonincoming(repo, remote, heads=None, force=False):
13 """Return a tuple (common, anyincoming, heads) used to identify the common
13 """Return a tuple (common, anyincoming, heads) used to identify the common
14 subset of nodes between repo and remote.
14 subset of nodes between repo and remote.
15
15
16 "common" is a list of (at least) the heads of the common subset.
16 "common" is a list of (at least) the heads of the common subset.
17 "anyincoming" is testable as a boolean indicating if any nodes are missing
17 "anyincoming" is testable as a boolean indicating if any nodes are missing
18 locally. If remote does not support getbundle, this actually is a list of
18 locally. If remote does not support getbundle, this actually is a list of
19 roots of the nodes that would be incoming, to be supplied to
19 roots of the nodes that would be incoming, to be supplied to
20 changegroupsubset. No code except for pull should be relying on this fact
20 changegroupsubset. No code except for pull should be relying on this fact
21 any longer.
21 any longer.
22 "heads" is either the supplied heads, or else the remote's heads.
22 "heads" is either the supplied heads, or else the remote's heads.
23
23
24 If you pass heads and they are all known locally, the response lists just
24 If you pass heads and they are all known locally, the response lists just
25 these heads in "common" and in "heads".
25 these heads in "common" and in "heads".
26
26
27 Please use findcommonoutgoing to compute the set of outgoing nodes to give
27 Please use findcommonoutgoing to compute the set of outgoing nodes to give
28 extensions a good hook into outgoing.
28 extensions a good hook into outgoing.
29 """
29 """
30
30
31 if not remote.capable('getbundle'):
31 if not remote.capable('getbundle'):
32 return treediscovery.findcommonincoming(repo, remote, heads, force)
32 return treediscovery.findcommonincoming(repo, remote, heads, force)
33
33
34 if heads:
34 if heads:
35 allknown = True
35 allknown = True
36 nm = repo.changelog.nodemap
36 nm = repo.changelog.nodemap
37 for h in heads:
37 for h in heads:
38 if nm.get(h) is None:
38 if nm.get(h) is None:
39 allknown = False
39 allknown = False
40 break
40 break
41 if allknown:
41 if allknown:
42 return (heads, False, heads)
42 return (heads, False, heads)
43
43
44 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
44 res = setdiscovery.findcommonheads(repo.ui, repo, remote,
45 abortwhenunrelated=not force)
45 abortwhenunrelated=not force)
46 common, anyinc, srvheads = res
46 common, anyinc, srvheads = res
47 return (list(common), anyinc, heads or list(srvheads))
47 return (list(common), anyinc, heads or list(srvheads))
48
48
49 class outgoing(object):
49 class outgoing(object):
50 '''Represents the set of nodes present in a local repo but not in a
50 '''Represents the set of nodes present in a local repo but not in a
51 (possibly) remote one.
51 (possibly) remote one.
52
52
53 Members:
53 Members:
54
54
55 missing is a list of all nodes present in local but not in remote.
55 missing is a list of all nodes present in local but not in remote.
56 common is a list of all nodes shared between the two repos.
56 common is a list of all nodes shared between the two repos.
57 excluded is the list of missing changeset that shouldn't be sent remotely.
57 excluded is the list of missing changeset that shouldn't be sent remotely.
58 missingheads is the list of heads of missing.
58 missingheads is the list of heads of missing.
59 commonheads is the list of heads of common.
59 commonheads is the list of heads of common.
60
60
61 The sets are computed on demand from the heads, unless provided upfront
61 The sets are computed on demand from the heads, unless provided upfront
62 by discovery.'''
62 by discovery.'''
63
63
64 def __init__(self, revlog, commonheads, missingheads):
64 def __init__(self, revlog, commonheads, missingheads):
65 self.commonheads = commonheads
65 self.commonheads = commonheads
66 self.missingheads = missingheads
66 self.missingheads = missingheads
67 self._revlog = revlog
67 self._revlog = revlog
68 self._common = None
68 self._common = None
69 self._missing = None
69 self._missing = None
70 self.excluded = []
70 self.excluded = []
71
71
72 def _computecommonmissing(self):
72 def _computecommonmissing(self):
73 sets = self._revlog.findcommonmissing(self.commonheads,
73 sets = self._revlog.findcommonmissing(self.commonheads,
74 self.missingheads)
74 self.missingheads)
75 self._common, self._missing = sets
75 self._common, self._missing = sets
76
76
77 @util.propertycache
77 @util.propertycache
78 def common(self):
78 def common(self):
79 if self._common is None:
79 if self._common is None:
80 self._computecommonmissing()
80 self._computecommonmissing()
81 return self._common
81 return self._common
82
82
83 @util.propertycache
83 @util.propertycache
84 def missing(self):
84 def missing(self):
85 if self._missing is None:
85 if self._missing is None:
86 self._computecommonmissing()
86 self._computecommonmissing()
87 return self._missing
87 return self._missing
88
88
89 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
89 def findcommonoutgoing(repo, other, onlyheads=None, force=False,
90 commoninc=None, portable=False):
90 commoninc=None, portable=False):
91 '''Return an outgoing instance to identify the nodes present in repo but
91 '''Return an outgoing instance to identify the nodes present in repo but
92 not in other.
92 not in other.
93
93
94 If onlyheads is given, only nodes ancestral to nodes in onlyheads
94 If onlyheads is given, only nodes ancestral to nodes in onlyheads
95 (inclusive) are included. If you already know the local repo's heads,
95 (inclusive) are included. If you already know the local repo's heads,
96 passing them in onlyheads is faster than letting them be recomputed here.
96 passing them in onlyheads is faster than letting them be recomputed here.
97
97
98 If commoninc is given, it must be the result of a prior call to
98 If commoninc is given, it must be the result of a prior call to
99 findcommonincoming(repo, other, force) to avoid recomputing it here.
99 findcommonincoming(repo, other, force) to avoid recomputing it here.
100
100
101 If portable is given, compute more conservative common and missingheads,
101 If portable is given, compute more conservative common and missingheads,
102 to make bundles created from the instance more portable.'''
102 to make bundles created from the instance more portable.'''
103 # declare an empty outgoing object to be filled later
103 # declare an empty outgoing object to be filled later
104 og = outgoing(repo.changelog, None, None)
104 og = outgoing(repo.changelog, None, None)
105
105
106 # get common set if not provided
106 # get common set if not provided
107 if commoninc is None:
107 if commoninc is None:
108 commoninc = findcommonincoming(repo, other, force=force)
108 commoninc = findcommonincoming(repo, other, force=force)
109 og.commonheads, _any, _hds = commoninc
109 og.commonheads, _any, _hds = commoninc
110
110
111 # compute outgoing
111 # compute outgoing
112 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
112 mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore)
113 if not mayexclude:
113 if not mayexclude:
114 og.missingheads = onlyheads or repo.heads()
114 og.missingheads = onlyheads or repo.heads()
115 elif onlyheads is None:
115 elif onlyheads is None:
116 # use visible heads as it should be cached
116 # use visible heads as it should be cached
117 og.missingheads = visibleheads(repo)
117 og.missingheads = visibleheads(repo)
118 og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
118 og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')]
119 else:
119 else:
120 # compute common, missing and exclude secret stuff
120 # compute common, missing and exclude secret stuff
121 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
121 sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads)
122 og._common, allmissing = sets
122 og._common, allmissing = sets
123 og._missing = missing = []
123 og._missing = missing = []
124 og.excluded = excluded = []
124 og.excluded = excluded = []
125 for node in allmissing:
125 for node in allmissing:
126 ctx = repo[node]
126 ctx = repo[node]
127 if ctx.phase() >= phases.secret or ctx.extinct():
127 if ctx.phase() >= phases.secret or ctx.extinct():
128 excluded.append(node)
128 excluded.append(node)
129 else:
129 else:
130 missing.append(node)
130 missing.append(node)
131 if len(missing) == len(allmissing):
131 if len(missing) == len(allmissing):
132 missingheads = onlyheads
132 missingheads = onlyheads
133 else: # update missing heads
133 else: # update missing heads
134 missingheads = phases.newheads(repo, onlyheads, excluded)
134 missingheads = phases.newheads(repo, onlyheads, excluded)
135 og.missingheads = missingheads
135 og.missingheads = missingheads
136 if portable:
136 if portable:
137 # recompute common and missingheads as if -r<rev> had been given for
137 # recompute common and missingheads as if -r<rev> had been given for
138 # each head of missing, and --base <rev> for each head of the proper
138 # each head of missing, and --base <rev> for each head of the proper
139 # ancestors of missing
139 # ancestors of missing
140 og._computecommonmissing()
140 og._computecommonmissing()
141 cl = repo.changelog
141 cl = repo.changelog
142 missingrevs = set(cl.rev(n) for n in og._missing)
142 missingrevs = set(cl.rev(n) for n in og._missing)
143 og._common = set(cl.ancestors(missingrevs)) - missingrevs
143 og._common = set(cl.ancestors(missingrevs)) - missingrevs
144 commonheads = set(og.commonheads)
144 commonheads = set(og.commonheads)
145 og.missingheads = [h for h in og.missingheads if h not in commonheads]
145 og.missingheads = [h for h in og.missingheads if h not in commonheads]
146
146
147 return og
147 return og
148
148
149 def _headssummary(repo, remote, outgoing):
149 def _headssummary(repo, remote, outgoing):
150 """compute a summary of branch and heads status before and after push
150 """compute a summary of branch and heads status before and after push
151
151
152 return {'branch': ([remoteheads], [newheads], [unsyncedheads])} mapping
152 return {'branch': ([remoteheads], [newheads], [unsyncedheads])} mapping
153
153
154 - branch: the branch name
154 - branch: the branch name
155 - remoteheads: the list of remote heads known locally
155 - remoteheads: the list of remote heads known locally
156 None is the branch is new
156 None is the branch is new
157 - newheads: the new remote heads (known locally) with outgoing pushed
157 - newheads: the new remote heads (known locally) with outgoing pushed
158 - unsyncedheads: the list of remote heads unknown locally.
158 - unsyncedheads: the list of remote heads unknown locally.
159 """
159 """
160 cl = repo.changelog
160 cl = repo.changelog
161 headssum = {}
161 headssum = {}
162 # A. Create set of branches involved in the push.
162 # A. Create set of branches involved in the push.
163 branches = set(repo[n].branch() for n in outgoing.missing)
163 branches = set(repo[n].branch() for n in outgoing.missing)
164 remotemap = remote.branchmap()
164 remotemap = remote.branchmap()
165 newbranches = branches - set(remotemap)
165 newbranches = branches - set(remotemap)
166 branches.difference_update(newbranches)
166 branches.difference_update(newbranches)
167
167
168 # A. register remote heads
168 # A. register remote heads
169 remotebranches = set()
169 remotebranches = set()
170 for branch, heads in remote.branchmap().iteritems():
170 for branch, heads in remote.branchmap().iteritems():
171 remotebranches.add(branch)
171 remotebranches.add(branch)
172 known = []
172 known = []
173 unsynced = []
173 unsynced = []
174 for h in heads:
174 for h in heads:
175 if h in cl.nodemap:
175 if h in cl.nodemap:
176 known.append(h)
176 known.append(h)
177 else:
177 else:
178 unsynced.append(h)
178 unsynced.append(h)
179 headssum[branch] = (known, list(known), unsynced)
179 headssum[branch] = (known, list(known), unsynced)
180 # B. add new branch data
180 # B. add new branch data
181 missingctx = list(repo[n] for n in outgoing.missing)
181 missingctx = list(repo[n] for n in outgoing.missing)
182 touchedbranches = set()
182 touchedbranches = set()
183 for ctx in missingctx:
183 for ctx in missingctx:
184 branch = ctx.branch()
184 branch = ctx.branch()
185 touchedbranches.add(branch)
185 touchedbranches.add(branch)
186 if branch not in headssum:
186 if branch not in headssum:
187 headssum[branch] = (None, [], [])
187 headssum[branch] = (None, [], [])
188
188
189 # C drop data about untouched branches:
189 # C drop data about untouched branches:
190 for branch in remotebranches - touchedbranches:
190 for branch in remotebranches - touchedbranches:
191 del headssum[branch]
191 del headssum[branch]
192
192
193 # D. Update newmap with outgoing changes.
193 # D. Update newmap with outgoing changes.
194 # This will possibly add new heads and remove existing ones.
194 # This will possibly add new heads and remove existing ones.
195 newmap = dict((branch, heads[1]) for branch, heads in headssum.iteritems()
195 newmap = dict((branch, heads[1]) for branch, heads in headssum.iteritems()
196 if heads[0] is not None)
196 if heads[0] is not None)
197 repo._updatebranchcache(newmap, missingctx)
197 repo._updatebranchcache(newmap, missingctx)
198 for branch, newheads in newmap.iteritems():
198 for branch, newheads in newmap.iteritems():
199 headssum[branch][1][:] = newheads
199 headssum[branch][1][:] = newheads
200 return headssum
200 return headssum
201
201
202 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
202 def _oldheadssummary(repo, remoteheads, outgoing, inc=False):
203 """Compute branchmapsummary for repo without branchmap support"""
203 """Compute branchmapsummary for repo without branchmap support"""
204
204
205 cl = repo.changelog
205 cl = repo.changelog
206 # 1-4b. old servers: Check for new topological heads.
206 # 1-4b. old servers: Check for new topological heads.
207 # Construct {old,new}map with branch = None (topological branch).
207 # Construct {old,new}map with branch = None (topological branch).
208 # (code based on _updatebranchcache)
208 # (code based on _updatebranchcache)
209 oldheads = set(h for h in remoteheads if h in cl.nodemap)
209 oldheads = set(h for h in remoteheads if h in cl.nodemap)
210 # all nodes in outgoing.missing are children of either:
210 # all nodes in outgoing.missing are children of either:
211 # - an element of oldheads
211 # - an element of oldheads
212 # - another element of outgoing.missing
212 # - another element of outgoing.missing
213 # - nullrev
213 # - nullrev
214 # This explains why the new head are very simple to compute.
214 # This explains why the new head are very simple to compute.
215 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
215 r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing)
216 newheads = list(c.node() for c in r)
216 newheads = list(c.node() for c in r)
217 unsynced = inc and set([None]) or set()
217 unsynced = inc and set([None]) or set()
218 return {None: (oldheads, newheads, unsynced)}
218 return {None: (oldheads, newheads, unsynced)}
219
219
220 def checkheads(repo, remote, outgoing, remoteheads, newbranch=False, inc=False):
220 def checkheads(repo, remote, outgoing, remoteheads, newbranch=False, inc=False):
221 """Check that a push won't add any outgoing head
221 """Check that a push won't add any outgoing head
222
222
223 raise Abort error and display ui message as needed.
223 raise Abort error and display ui message as needed.
224 """
224 """
225 # Check for each named branch if we're creating new remote heads.
225 # Check for each named branch if we're creating new remote heads.
226 # To be a remote head after push, node must be either:
226 # To be a remote head after push, node must be either:
227 # - unknown locally
227 # - unknown locally
228 # - a local outgoing head descended from update
228 # - a local outgoing head descended from update
229 # - a remote head that's known locally and not
229 # - a remote head that's known locally and not
230 # ancestral to an outgoing head
230 # ancestral to an outgoing head
231 if remoteheads == [nullid]:
231 if remoteheads == [nullid]:
232 # remote is empty, nothing to check.
232 # remote is empty, nothing to check.
233 return
233 return
234
234
235 if remote.capable('branchmap'):
235 if remote.capable('branchmap'):
236 headssum = _headssummary(repo, remote, outgoing)
236 headssum = _headssummary(repo, remote, outgoing)
237 else:
237 else:
238 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
238 headssum = _oldheadssummary(repo, remoteheads, outgoing, inc)
239 newbranches = [branch for branch, heads in headssum.iteritems()
239 newbranches = [branch for branch, heads in headssum.iteritems()
240 if heads[0] is None]
240 if heads[0] is None]
241 # 1. Check for new branches on the remote.
241 # 1. Check for new branches on the remote.
242 if newbranches and not newbranch: # new branch requires --new-branch
242 if newbranches and not newbranch: # new branch requires --new-branch
243 branchnames = ', '.join(sorted(newbranches))
243 branchnames = ', '.join(sorted(newbranches))
244 raise util.Abort(_("push creates new remote branches: %s!")
244 raise util.Abort(_("push creates new remote branches: %s!")
245 % branchnames,
245 % branchnames,
246 hint=_("use 'hg push --new-branch' to create"
246 hint=_("use 'hg push --new-branch' to create"
247 " new remote branches"))
247 " new remote branches"))
248
248
249 # 2 compute newly pushed bookmarks. We
249 # 2 compute newly pushed bookmarks. We
250 # we don't warned about bookmarked heads.
250 # we don't warned about bookmarked heads.
251 localbookmarks = repo._bookmarks
251 localbookmarks = repo._bookmarks
252 remotebookmarks = remote.listkeys('bookmarks')
252 remotebookmarks = remote.listkeys('bookmarks')
253 bookmarkedheads = set()
253 bookmarkedheads = set()
254 for bm in localbookmarks:
254 for bm in localbookmarks:
255 rnode = remotebookmarks.get(bm)
255 rnode = remotebookmarks.get(bm)
256 if rnode and rnode in repo:
256 if rnode and rnode in repo:
257 lctx, rctx = repo[bm], repo[rnode]
257 lctx, rctx = repo[bm], repo[rnode]
258 if bookmarks.validdest(repo, rctx, lctx):
258 if bookmarks.validdest(repo, rctx, lctx):
259 bookmarkedheads.add(lctx.node())
259 bookmarkedheads.add(lctx.node())
260
260
261 # 3. Check for new heads.
261 # 3. Check for new heads.
262 # If there are more heads after the push than before, a suitable
262 # If there are more heads after the push than before, a suitable
263 # error message, depending on unsynced status, is displayed.
263 # error message, depending on unsynced status, is displayed.
264 error = None
264 error = None
265 unsynced = False
265 unsynced = False
266 allmissing = set(outgoing.missing)
266 allmissing = set(outgoing.missing)
267 allfuturecommon = set(c.node() for c in repo.set('%ld', outgoing.common))
267 allfuturecommon = set(c.node() for c in repo.set('%ld', outgoing.common))
268 allfuturecommon.update(allmissing)
268 allfuturecommon.update(allmissing)
269 for branch, heads in headssum.iteritems():
269 for branch, heads in headssum.iteritems():
270 if heads[0] is None:
270 if heads[0] is None:
271 # Maybe we should abort if we push more that one head
271 # Maybe we should abort if we push more that one head
272 # for new branches ?
272 # for new branches ?
273 continue
273 continue
274 candidate_newhs = set(heads[1])
274 candidate_newhs = set(heads[1])
275 # add unsynced data
275 # add unsynced data
276 oldhs = set(heads[0])
276 oldhs = set(heads[0])
277 oldhs.update(heads[2])
277 oldhs.update(heads[2])
278 candidate_newhs.update(heads[2])
278 candidate_newhs.update(heads[2])
279 dhs = None
279 dhs = None
280 discardedheads = set()
280 discardedheads = set()
281 if repo.obsstore:
281 if repo.obsstore:
282 # remove future heads which are actually obsolete by another
282 # remove future heads which are actually obsolete by another
283 # pushed element:
283 # pushed element:
284 #
284 #
285 # XXX as above, There are several cases this case does not handle
285 # XXX as above, There are several cases this case does not handle
286 # XXX properly
286 # XXX properly
287 #
287 #
288 # (1) if <nh> is public, it won't be affected by obsolete marker
288 # (1) if <nh> is public, it won't be affected by obsolete marker
289 # and a new is created
289 # and a new is created
290 #
290 #
291 # (2) if the new heads have ancestors which are not obsolete and
291 # (2) if the new heads have ancestors which are not obsolete and
292 # not ancestors of any other heads we will have a new head too.
292 # not ancestors of any other heads we will have a new head too.
293 #
293 #
294 # This two case will be easy to handle for know changeset but much
294 # This two case will be easy to handle for know changeset but much
295 # more tricky for unsynced changes.
295 # more tricky for unsynced changes.
296 newhs = set()
296 newhs = set()
297 for nh in candidate_newhs:
297 for nh in candidate_newhs:
298 if nh in repo and repo[nh].phase() <= phases.public:
298 if nh in repo and repo[nh].phase() <= phases.public:
299 newhs.add(nh)
299 newhs.add(nh)
300 else:
300 else:
301 for suc in obsolete.anysuccessors(repo.obsstore, nh):
301 for suc in obsolete.allsuccessors(repo.obsstore, nh):
302 if suc != nh and suc in allfuturecommon:
302 if suc != nh and suc in allfuturecommon:
303 discardedheads.add(nh)
303 discardedheads.add(nh)
304 break
304 break
305 else:
305 else:
306 newhs.add(nh)
306 newhs.add(nh)
307 else:
307 else:
308 newhs = candidate_newhs
308 newhs = candidate_newhs
309 if [h for h in heads[2] if h not in discardedheads]:
309 if [h for h in heads[2] if h not in discardedheads]:
310 unsynced = True
310 unsynced = True
311 if len(newhs) > len(oldhs):
311 if len(newhs) > len(oldhs):
312 # strip updates to existing remote heads from the new heads list
312 # strip updates to existing remote heads from the new heads list
313 dhs = list(newhs - bookmarkedheads - oldhs)
313 dhs = list(newhs - bookmarkedheads - oldhs)
314 if dhs:
314 if dhs:
315 if error is None:
315 if error is None:
316 if branch not in ('default', None):
316 if branch not in ('default', None):
317 error = _("push creates new remote head %s "
317 error = _("push creates new remote head %s "
318 "on branch '%s'!") % (short(dhs[0]), branch)
318 "on branch '%s'!") % (short(dhs[0]), branch)
319 else:
319 else:
320 error = _("push creates new remote head %s!"
320 error = _("push creates new remote head %s!"
321 ) % short(dhs[0])
321 ) % short(dhs[0])
322 if heads[2]: # unsynced
322 if heads[2]: # unsynced
323 hint = _("you should pull and merge or "
323 hint = _("you should pull and merge or "
324 "use push -f to force")
324 "use push -f to force")
325 else:
325 else:
326 hint = _("did you forget to merge? "
326 hint = _("did you forget to merge? "
327 "use push -f to force")
327 "use push -f to force")
328 if branch is not None:
328 if branch is not None:
329 repo.ui.note(_("new remote heads on branch '%s'\n") % branch)
329 repo.ui.note(_("new remote heads on branch '%s'\n") % branch)
330 for h in dhs:
330 for h in dhs:
331 repo.ui.note(_("new remote head %s\n") % short(h))
331 repo.ui.note(_("new remote head %s\n") % short(h))
332 if error:
332 if error:
333 raise util.Abort(error, hint=hint)
333 raise util.Abort(error, hint=hint)
334
334
335 # 6. Check for unsynced changes on involved branches.
335 # 6. Check for unsynced changes on involved branches.
336 if unsynced:
336 if unsynced:
337 repo.ui.warn(_("note: unsynced remote changes!\n"))
337 repo.ui.warn(_("note: unsynced remote changes!\n"))
338
338
339 def visibleheads(repo):
339 def visibleheads(repo):
340 """return the set of visible head of this repo"""
340 """return the set of visible head of this repo"""
341 # XXX we want a cache on this
341 # XXX we want a cache on this
342 sroots = repo._phasecache.phaseroots[phases.secret]
342 sroots = repo._phasecache.phaseroots[phases.secret]
343 if sroots or repo.obsstore:
343 if sroots or repo.obsstore:
344 # XXX very slow revset. storing heads or secret "boundary"
344 # XXX very slow revset. storing heads or secret "boundary"
345 # would help.
345 # would help.
346 revset = repo.set('heads(not (%ln:: + extinct()))', sroots)
346 revset = repo.set('heads(not (%ln:: + extinct()))', sroots)
347
347
348 vheads = [ctx.node() for ctx in revset]
348 vheads = [ctx.node() for ctx in revset]
349 if not vheads:
349 if not vheads:
350 vheads.append(nullid)
350 vheads.append(nullid)
351 else:
351 else:
352 vheads = repo.heads()
352 vheads = repo.heads()
353 return vheads
353 return vheads
354
354
355
355
356 def visiblebranchmap(repo):
356 def visiblebranchmap(repo):
357 """return a branchmap for the visible set"""
357 """return a branchmap for the visible set"""
358 # XXX Recomputing this data on the fly is very slow. We should build a
358 # XXX Recomputing this data on the fly is very slow. We should build a
359 # XXX cached version while computing the standard branchmap version.
359 # XXX cached version while computing the standard branchmap version.
360 sroots = repo._phasecache.phaseroots[phases.secret]
360 sroots = repo._phasecache.phaseroots[phases.secret]
361 if sroots or repo.obsstore:
361 if sroots or repo.obsstore:
362 vbranchmap = {}
362 vbranchmap = {}
363 for branch, nodes in repo.branchmap().iteritems():
363 for branch, nodes in repo.branchmap().iteritems():
364 # search for secret heads.
364 # search for secret heads.
365 for n in nodes:
365 for n in nodes:
366 if repo[n].phase() >= phases.secret:
366 if repo[n].phase() >= phases.secret:
367 nodes = None
367 nodes = None
368 break
368 break
369 # if secret heads were found we must compute them again
369 # if secret heads were found we must compute them again
370 if nodes is None:
370 if nodes is None:
371 s = repo.set('heads(branch(%s) - secret() - extinct())',
371 s = repo.set('heads(branch(%s) - secret() - extinct())',
372 branch)
372 branch)
373 nodes = [c.node() for c in s]
373 nodes = [c.node() for c in s]
374 vbranchmap[branch] = nodes
374 vbranchmap[branch] = nodes
375 else:
375 else:
376 vbranchmap = repo.branchmap()
376 vbranchmap = repo.branchmap()
377 return vbranchmap
377 return vbranchmap
@@ -1,467 +1,467 b''
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete markers handling
9 """Obsolete markers handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewriting operations, and help
17 transformations performed by history rewriting operations, and help
18 building new tools to reconciliate conflicting rewriting actions. To
18 building new tools to reconciliate conflicting rewriting actions. To
19 facilitate conflicts resolution, markers include various annotations
19 facilitate conflicts resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23 The old obsoleted changeset is called "precursor" and possible replacements are
23 The old obsoleted changeset is called "precursor" and possible replacements are
24 called "successors". Markers that used changeset X as a precursors are called
24 called "successors". Markers that used changeset X as a precursors are called
25 "successor markers of X" because they hold information about the successors of
25 "successor markers of X" because they hold information about the successors of
26 X. Markers that use changeset Y as a successors are call "precursor markers of
26 X. Markers that use changeset Y as a successors are call "precursor markers of
27 Y" because they hold information about the precursors of Y.
27 Y" because they hold information about the precursors of Y.
28
28
29 Examples:
29 Examples:
30
30
31 - When changeset A is replacement by a changeset A', one marker is stored:
31 - When changeset A is replacement by a changeset A', one marker is stored:
32
32
33 (A, (A'))
33 (A, (A'))
34
34
35 - When changesets A and B are folded into a new changeset C two markers are
35 - When changesets A and B are folded into a new changeset C two markers are
36 stored:
36 stored:
37
37
38 (A, (C,)) and (B, (C,))
38 (A, (C,)) and (B, (C,))
39
39
40 - When changeset A is simply "pruned" from the graph, a marker in create:
40 - When changeset A is simply "pruned" from the graph, a marker in create:
41
41
42 (A, ())
42 (A, ())
43
43
44 - When changeset A is split into B and C, a single marker are used:
44 - When changeset A is split into B and C, a single marker are used:
45
45
46 (A, (C, C))
46 (A, (C, C))
47
47
48 We use a single marker to distinct the "split" case from the "divergence"
48 We use a single marker to distinct the "split" case from the "divergence"
49 case. If two independants operation rewrite the same changeset A in to A' and
49 case. If two independants operation rewrite the same changeset A in to A' and
50 A'' when have an error case: divergent rewriting. We can detect it because
50 A'' when have an error case: divergent rewriting. We can detect it because
51 two markers will be created independently:
51 two markers will be created independently:
52
52
53 (A, (B,)) and (A, (C,))
53 (A, (B,)) and (A, (C,))
54
54
55 Format
55 Format
56 ------
56 ------
57
57
58 Markers are stored in an append-only file stored in
58 Markers are stored in an append-only file stored in
59 '.hg/store/obsstore'.
59 '.hg/store/obsstore'.
60
60
61 The file starts with a version header:
61 The file starts with a version header:
62
62
63 - 1 unsigned byte: version number, starting at zero.
63 - 1 unsigned byte: version number, starting at zero.
64
64
65
65
66 The header is followed by the markers. Each marker is made of:
66 The header is followed by the markers. Each marker is made of:
67
67
68 - 1 unsigned byte: number of new changesets "R", could be zero.
68 - 1 unsigned byte: number of new changesets "R", could be zero.
69
69
70 - 1 unsigned 32-bits integer: metadata size "M" in bytes.
70 - 1 unsigned 32-bits integer: metadata size "M" in bytes.
71
71
72 - 1 byte: a bit field. It is reserved for flags used in obsolete
72 - 1 byte: a bit field. It is reserved for flags used in obsolete
73 markers common operations, to avoid repeated decoding of metadata
73 markers common operations, to avoid repeated decoding of metadata
74 entries.
74 entries.
75
75
76 - 20 bytes: obsoleted changeset identifier.
76 - 20 bytes: obsoleted changeset identifier.
77
77
78 - N*20 bytes: new changesets identifiers.
78 - N*20 bytes: new changesets identifiers.
79
79
80 - M bytes: metadata as a sequence of nul-terminated strings. Each
80 - M bytes: metadata as a sequence of nul-terminated strings. Each
81 string contains a key and a value, separated by a color ':', without
81 string contains a key and a value, separated by a color ':', without
82 additional encoding. Keys cannot contain '\0' or ':' and values
82 additional encoding. Keys cannot contain '\0' or ':' and values
83 cannot contain '\0'.
83 cannot contain '\0'.
84 """
84 """
85 import struct
85 import struct
86 import util, base85, node
86 import util, base85, node
87 from i18n import _
87 from i18n import _
88
88
89 _pack = struct.pack
89 _pack = struct.pack
90 _unpack = struct.unpack
90 _unpack = struct.unpack
91
91
92 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
92 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
93
93
94 # the obsolete feature is not mature enough to be enabled by default.
94 # the obsolete feature is not mature enough to be enabled by default.
95 # you have to rely on third party extension extension to enable this.
95 # you have to rely on third party extension extension to enable this.
96 _enabled = False
96 _enabled = False
97
97
98 # data used for parsing and writing
98 # data used for parsing and writing
99 _fmversion = 0
99 _fmversion = 0
100 _fmfixed = '>BIB20s'
100 _fmfixed = '>BIB20s'
101 _fmnode = '20s'
101 _fmnode = '20s'
102 _fmfsize = struct.calcsize(_fmfixed)
102 _fmfsize = struct.calcsize(_fmfixed)
103 _fnodesize = struct.calcsize(_fmnode)
103 _fnodesize = struct.calcsize(_fmnode)
104
104
105 def _readmarkers(data):
105 def _readmarkers(data):
106 """Read and enumerate markers from raw data"""
106 """Read and enumerate markers from raw data"""
107 off = 0
107 off = 0
108 diskversion = _unpack('>B', data[off:off + 1])[0]
108 diskversion = _unpack('>B', data[off:off + 1])[0]
109 off += 1
109 off += 1
110 if diskversion != _fmversion:
110 if diskversion != _fmversion:
111 raise util.Abort(_('parsing obsolete marker: unknown version %r')
111 raise util.Abort(_('parsing obsolete marker: unknown version %r')
112 % diskversion)
112 % diskversion)
113
113
114 # Loop on markers
114 # Loop on markers
115 l = len(data)
115 l = len(data)
116 while off + _fmfsize <= l:
116 while off + _fmfsize <= l:
117 # read fixed part
117 # read fixed part
118 cur = data[off:off + _fmfsize]
118 cur = data[off:off + _fmfsize]
119 off += _fmfsize
119 off += _fmfsize
120 nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur)
120 nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur)
121 # read replacement
121 # read replacement
122 sucs = ()
122 sucs = ()
123 if nbsuc:
123 if nbsuc:
124 s = (_fnodesize * nbsuc)
124 s = (_fnodesize * nbsuc)
125 cur = data[off:off + s]
125 cur = data[off:off + s]
126 sucs = _unpack(_fmnode * nbsuc, cur)
126 sucs = _unpack(_fmnode * nbsuc, cur)
127 off += s
127 off += s
128 # read metadata
128 # read metadata
129 # (metadata will be decoded on demand)
129 # (metadata will be decoded on demand)
130 metadata = data[off:off + mdsize]
130 metadata = data[off:off + mdsize]
131 if len(metadata) != mdsize:
131 if len(metadata) != mdsize:
132 raise util.Abort(_('parsing obsolete marker: metadata is too '
132 raise util.Abort(_('parsing obsolete marker: metadata is too '
133 'short, %d bytes expected, got %d')
133 'short, %d bytes expected, got %d')
134 % (mdsize, len(metadata)))
134 % (mdsize, len(metadata)))
135 off += mdsize
135 off += mdsize
136 yield (pre, sucs, flags, metadata)
136 yield (pre, sucs, flags, metadata)
137
137
138 def encodemeta(meta):
138 def encodemeta(meta):
139 """Return encoded metadata string to string mapping.
139 """Return encoded metadata string to string mapping.
140
140
141 Assume no ':' in key and no '\0' in both key and value."""
141 Assume no ':' in key and no '\0' in both key and value."""
142 for key, value in meta.iteritems():
142 for key, value in meta.iteritems():
143 if ':' in key or '\0' in key:
143 if ':' in key or '\0' in key:
144 raise ValueError("':' and '\0' are forbidden in metadata key'")
144 raise ValueError("':' and '\0' are forbidden in metadata key'")
145 if '\0' in value:
145 if '\0' in value:
146 raise ValueError("':' are forbidden in metadata value'")
146 raise ValueError("':' are forbidden in metadata value'")
147 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
147 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
148
148
149 def decodemeta(data):
149 def decodemeta(data):
150 """Return string to string dictionary from encoded version."""
150 """Return string to string dictionary from encoded version."""
151 d = {}
151 d = {}
152 for l in data.split('\0'):
152 for l in data.split('\0'):
153 if l:
153 if l:
154 key, value = l.split(':')
154 key, value = l.split(':')
155 d[key] = value
155 d[key] = value
156 return d
156 return d
157
157
158 class marker(object):
158 class marker(object):
159 """Wrap obsolete marker raw data"""
159 """Wrap obsolete marker raw data"""
160
160
161 def __init__(self, repo, data):
161 def __init__(self, repo, data):
162 # the repo argument will be used to create changectx in later version
162 # the repo argument will be used to create changectx in later version
163 self._repo = repo
163 self._repo = repo
164 self._data = data
164 self._data = data
165 self._decodedmeta = None
165 self._decodedmeta = None
166
166
167 def precnode(self):
167 def precnode(self):
168 """Precursor changeset node identifier"""
168 """Precursor changeset node identifier"""
169 return self._data[0]
169 return self._data[0]
170
170
171 def succnodes(self):
171 def succnodes(self):
172 """List of successor changesets node identifiers"""
172 """List of successor changesets node identifiers"""
173 return self._data[1]
173 return self._data[1]
174
174
175 def metadata(self):
175 def metadata(self):
176 """Decoded metadata dictionary"""
176 """Decoded metadata dictionary"""
177 if self._decodedmeta is None:
177 if self._decodedmeta is None:
178 self._decodedmeta = decodemeta(self._data[3])
178 self._decodedmeta = decodemeta(self._data[3])
179 return self._decodedmeta
179 return self._decodedmeta
180
180
181 def date(self):
181 def date(self):
182 """Creation date as (unixtime, offset)"""
182 """Creation date as (unixtime, offset)"""
183 parts = self.metadata()['date'].split(' ')
183 parts = self.metadata()['date'].split(' ')
184 return (float(parts[0]), int(parts[1]))
184 return (float(parts[0]), int(parts[1]))
185
185
186 class obsstore(object):
186 class obsstore(object):
187 """Store obsolete markers
187 """Store obsolete markers
188
188
189 Markers can be accessed with two mappings:
189 Markers can be accessed with two mappings:
190 - precursors[x] -> set(markers on precursors edges of x)
190 - precursors[x] -> set(markers on precursors edges of x)
191 - successors[x] -> set(markers on successors edges of x)
191 - successors[x] -> set(markers on successors edges of x)
192 """
192 """
193
193
194 def __init__(self, sopener):
194 def __init__(self, sopener):
195 # caches for various obsolescence related cache
195 # caches for various obsolescence related cache
196 self.caches = {}
196 self.caches = {}
197 self._all = []
197 self._all = []
198 # new markers to serialize
198 # new markers to serialize
199 self.precursors = {}
199 self.precursors = {}
200 self.successors = {}
200 self.successors = {}
201 self.sopener = sopener
201 self.sopener = sopener
202 data = sopener.tryread('obsstore')
202 data = sopener.tryread('obsstore')
203 if data:
203 if data:
204 self._load(_readmarkers(data))
204 self._load(_readmarkers(data))
205
205
206 def __iter__(self):
206 def __iter__(self):
207 return iter(self._all)
207 return iter(self._all)
208
208
209 def __nonzero__(self):
209 def __nonzero__(self):
210 return bool(self._all)
210 return bool(self._all)
211
211
212 def create(self, transaction, prec, succs=(), flag=0, metadata=None):
212 def create(self, transaction, prec, succs=(), flag=0, metadata=None):
213 """obsolete: add a new obsolete marker
213 """obsolete: add a new obsolete marker
214
214
215 * ensuring it is hashable
215 * ensuring it is hashable
216 * check mandatory metadata
216 * check mandatory metadata
217 * encode metadata
217 * encode metadata
218 """
218 """
219 if metadata is None:
219 if metadata is None:
220 metadata = {}
220 metadata = {}
221 if len(prec) != 20:
221 if len(prec) != 20:
222 raise ValueError(prec)
222 raise ValueError(prec)
223 for succ in succs:
223 for succ in succs:
224 if len(succ) != 20:
224 if len(succ) != 20:
225 raise ValueError(succ)
225 raise ValueError(succ)
226 marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata))
226 marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata))
227 self.add(transaction, [marker])
227 self.add(transaction, [marker])
228
228
229 def add(self, transaction, markers):
229 def add(self, transaction, markers):
230 """Add new markers to the store
230 """Add new markers to the store
231
231
232 Take care of filtering duplicate.
232 Take care of filtering duplicate.
233 Return the number of new marker."""
233 Return the number of new marker."""
234 if not _enabled:
234 if not _enabled:
235 raise util.Abort('obsolete feature is not enabled on this repo')
235 raise util.Abort('obsolete feature is not enabled on this repo')
236 new = [m for m in markers if m not in self._all]
236 new = [m for m in markers if m not in self._all]
237 if new:
237 if new:
238 f = self.sopener('obsstore', 'ab')
238 f = self.sopener('obsstore', 'ab')
239 try:
239 try:
240 # Whether the file's current position is at the begin or at
240 # Whether the file's current position is at the begin or at
241 # the end after opening a file for appending is implementation
241 # the end after opening a file for appending is implementation
242 # defined. So we must seek to the end before calling tell(),
242 # defined. So we must seek to the end before calling tell(),
243 # or we may get a zero offset for non-zero sized files on
243 # or we may get a zero offset for non-zero sized files on
244 # some platforms (issue3543).
244 # some platforms (issue3543).
245 f.seek(0, _SEEK_END)
245 f.seek(0, _SEEK_END)
246 offset = f.tell()
246 offset = f.tell()
247 transaction.add('obsstore', offset)
247 transaction.add('obsstore', offset)
248 # offset == 0: new file - add the version header
248 # offset == 0: new file - add the version header
249 for bytes in _encodemarkers(new, offset == 0):
249 for bytes in _encodemarkers(new, offset == 0):
250 f.write(bytes)
250 f.write(bytes)
251 finally:
251 finally:
252 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
252 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
253 # call 'filecacheentry.refresh()' here
253 # call 'filecacheentry.refresh()' here
254 f.close()
254 f.close()
255 self._load(new)
255 self._load(new)
256 # new marker *may* have changed several set. invalidate the cache.
256 # new marker *may* have changed several set. invalidate the cache.
257 self.caches.clear()
257 self.caches.clear()
258 return len(new)
258 return len(new)
259
259
260 def mergemarkers(self, transaction, data):
260 def mergemarkers(self, transaction, data):
261 markers = _readmarkers(data)
261 markers = _readmarkers(data)
262 self.add(transaction, markers)
262 self.add(transaction, markers)
263
263
264 def _load(self, markers):
264 def _load(self, markers):
265 for mark in markers:
265 for mark in markers:
266 self._all.append(mark)
266 self._all.append(mark)
267 pre, sucs = mark[:2]
267 pre, sucs = mark[:2]
268 self.successors.setdefault(pre, set()).add(mark)
268 self.successors.setdefault(pre, set()).add(mark)
269 for suc in sucs:
269 for suc in sucs:
270 self.precursors.setdefault(suc, set()).add(mark)
270 self.precursors.setdefault(suc, set()).add(mark)
271 if node.nullid in self.precursors:
271 if node.nullid in self.precursors:
272 raise util.Abort(_('bad obsolescence marker detected: '
272 raise util.Abort(_('bad obsolescence marker detected: '
273 'invalid successors nullid'))
273 'invalid successors nullid'))
274
274
275 def _encodemarkers(markers, addheader=False):
275 def _encodemarkers(markers, addheader=False):
276 # Kept separate from flushmarkers(), it will be reused for
276 # Kept separate from flushmarkers(), it will be reused for
277 # markers exchange.
277 # markers exchange.
278 if addheader:
278 if addheader:
279 yield _pack('>B', _fmversion)
279 yield _pack('>B', _fmversion)
280 for marker in markers:
280 for marker in markers:
281 yield _encodeonemarker(marker)
281 yield _encodeonemarker(marker)
282
282
283
283
284 def _encodeonemarker(marker):
284 def _encodeonemarker(marker):
285 pre, sucs, flags, metadata = marker
285 pre, sucs, flags, metadata = marker
286 nbsuc = len(sucs)
286 nbsuc = len(sucs)
287 format = _fmfixed + (_fmnode * nbsuc)
287 format = _fmfixed + (_fmnode * nbsuc)
288 data = [nbsuc, len(metadata), flags, pre]
288 data = [nbsuc, len(metadata), flags, pre]
289 data.extend(sucs)
289 data.extend(sucs)
290 return _pack(format, *data) + metadata
290 return _pack(format, *data) + metadata
291
291
292 # arbitrary picked to fit into 8K limit from HTTP server
292 # arbitrary picked to fit into 8K limit from HTTP server
293 # you have to take in account:
293 # you have to take in account:
294 # - the version header
294 # - the version header
295 # - the base85 encoding
295 # - the base85 encoding
296 _maxpayload = 5300
296 _maxpayload = 5300
297
297
298 def listmarkers(repo):
298 def listmarkers(repo):
299 """List markers over pushkey"""
299 """List markers over pushkey"""
300 if not repo.obsstore:
300 if not repo.obsstore:
301 return {}
301 return {}
302 keys = {}
302 keys = {}
303 parts = []
303 parts = []
304 currentlen = _maxpayload * 2 # ensure we create a new part
304 currentlen = _maxpayload * 2 # ensure we create a new part
305 for marker in repo.obsstore:
305 for marker in repo.obsstore:
306 nextdata = _encodeonemarker(marker)
306 nextdata = _encodeonemarker(marker)
307 if (len(nextdata) + currentlen > _maxpayload):
307 if (len(nextdata) + currentlen > _maxpayload):
308 currentpart = []
308 currentpart = []
309 currentlen = 0
309 currentlen = 0
310 parts.append(currentpart)
310 parts.append(currentpart)
311 currentpart.append(nextdata)
311 currentpart.append(nextdata)
312 currentlen += len(nextdata)
312 currentlen += len(nextdata)
313 for idx, part in enumerate(reversed(parts)):
313 for idx, part in enumerate(reversed(parts)):
314 data = ''.join([_pack('>B', _fmversion)] + part)
314 data = ''.join([_pack('>B', _fmversion)] + part)
315 keys['dump%i' % idx] = base85.b85encode(data)
315 keys['dump%i' % idx] = base85.b85encode(data)
316 return keys
316 return keys
317
317
318 def pushmarker(repo, key, old, new):
318 def pushmarker(repo, key, old, new):
319 """Push markers over pushkey"""
319 """Push markers over pushkey"""
320 if not key.startswith('dump'):
320 if not key.startswith('dump'):
321 repo.ui.warn(_('unknown key: %r') % key)
321 repo.ui.warn(_('unknown key: %r') % key)
322 return 0
322 return 0
323 if old:
323 if old:
324 repo.ui.warn(_('unexpected old value') % key)
324 repo.ui.warn(_('unexpected old value') % key)
325 return 0
325 return 0
326 data = base85.b85decode(new)
326 data = base85.b85decode(new)
327 lock = repo.lock()
327 lock = repo.lock()
328 try:
328 try:
329 tr = repo.transaction('pushkey: obsolete markers')
329 tr = repo.transaction('pushkey: obsolete markers')
330 try:
330 try:
331 repo.obsstore.mergemarkers(tr, data)
331 repo.obsstore.mergemarkers(tr, data)
332 tr.close()
332 tr.close()
333 return 1
333 return 1
334 finally:
334 finally:
335 tr.release()
335 tr.release()
336 finally:
336 finally:
337 lock.release()
337 lock.release()
338
338
339 def allmarkers(repo):
339 def allmarkers(repo):
340 """all obsolete markers known in a repository"""
340 """all obsolete markers known in a repository"""
341 for markerdata in repo.obsstore:
341 for markerdata in repo.obsstore:
342 yield marker(repo, markerdata)
342 yield marker(repo, markerdata)
343
343
344 def precursormarkers(ctx):
344 def precursormarkers(ctx):
345 """obsolete marker marking this changeset as a successors"""
345 """obsolete marker marking this changeset as a successors"""
346 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
346 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
347 yield marker(ctx._repo, data)
347 yield marker(ctx._repo, data)
348
348
349 def successormarkers(ctx):
349 def successormarkers(ctx):
350 """obsolete marker making this changeset obsolete"""
350 """obsolete marker making this changeset obsolete"""
351 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
351 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
352 yield marker(ctx._repo, data)
352 yield marker(ctx._repo, data)
353
353
354 def anysuccessors(obsstore, node):
354 def allsuccessors(obsstore, node):
355 """Yield every successor of <node>
355 """Yield every successor of <node>
356
356
357 This is a linear yield unsuited to detecting split changesets."""
357 This is a linear yield unsuited to detecting split changesets."""
358 remaining = set([node])
358 remaining = set([node])
359 seen = set(remaining)
359 seen = set(remaining)
360 while remaining:
360 while remaining:
361 current = remaining.pop()
361 current = remaining.pop()
362 yield current
362 yield current
363 for mark in obsstore.successors.get(current, ()):
363 for mark in obsstore.successors.get(current, ()):
364 for suc in mark[1]:
364 for suc in mark[1]:
365 if suc not in seen:
365 if suc not in seen:
366 seen.add(suc)
366 seen.add(suc)
367 remaining.add(suc)
367 remaining.add(suc)
368
368
369 # mapping of 'set-name' -> <function to computer this set>
369 # mapping of 'set-name' -> <function to computer this set>
370 cachefuncs = {}
370 cachefuncs = {}
371 def cachefor(name):
371 def cachefor(name):
372 """Decorator to register a function as computing the cache for a set"""
372 """Decorator to register a function as computing the cache for a set"""
373 def decorator(func):
373 def decorator(func):
374 assert name not in cachefuncs
374 assert name not in cachefuncs
375 cachefuncs[name] = func
375 cachefuncs[name] = func
376 return func
376 return func
377 return decorator
377 return decorator
378
378
379 def getrevs(repo, name):
379 def getrevs(repo, name):
380 """Return the set of revision that belong to the <name> set
380 """Return the set of revision that belong to the <name> set
381
381
382 Such access may compute the set and cache it for future use"""
382 Such access may compute the set and cache it for future use"""
383 if not repo.obsstore:
383 if not repo.obsstore:
384 return ()
384 return ()
385 if name not in repo.obsstore.caches:
385 if name not in repo.obsstore.caches:
386 repo.obsstore.caches[name] = cachefuncs[name](repo)
386 repo.obsstore.caches[name] = cachefuncs[name](repo)
387 return repo.obsstore.caches[name]
387 return repo.obsstore.caches[name]
388
388
389 # To be simple we need to invalidate obsolescence cache when:
389 # To be simple we need to invalidate obsolescence cache when:
390 #
390 #
391 # - new changeset is added:
391 # - new changeset is added:
392 # - public phase is changed
392 # - public phase is changed
393 # - obsolescence marker are added
393 # - obsolescence marker are added
394 # - strip is used a repo
394 # - strip is used a repo
395 def clearobscaches(repo):
395 def clearobscaches(repo):
396 """Remove all obsolescence related cache from a repo
396 """Remove all obsolescence related cache from a repo
397
397
398 This remove all cache in obsstore is the obsstore already exist on the
398 This remove all cache in obsstore is the obsstore already exist on the
399 repo.
399 repo.
400
400
401 (We could be smarter here given the exact event that trigger the cache
401 (We could be smarter here given the exact event that trigger the cache
402 clearing)"""
402 clearing)"""
403 # only clear cache is there is obsstore data in this repo
403 # only clear cache is there is obsstore data in this repo
404 if 'obsstore' in repo._filecache:
404 if 'obsstore' in repo._filecache:
405 repo.obsstore.caches.clear()
405 repo.obsstore.caches.clear()
406
406
407 @cachefor('obsolete')
407 @cachefor('obsolete')
408 def _computeobsoleteset(repo):
408 def _computeobsoleteset(repo):
409 """the set of obsolete revisions"""
409 """the set of obsolete revisions"""
410 obs = set()
410 obs = set()
411 nm = repo.changelog.nodemap
411 nm = repo.changelog.nodemap
412 for node in repo.obsstore.successors:
412 for node in repo.obsstore.successors:
413 rev = nm.get(node)
413 rev = nm.get(node)
414 if rev is not None:
414 if rev is not None:
415 obs.add(rev)
415 obs.add(rev)
416 return set(repo.revs('%ld - public()', obs))
416 return set(repo.revs('%ld - public()', obs))
417
417
418 @cachefor('unstable')
418 @cachefor('unstable')
419 def _computeunstableset(repo):
419 def _computeunstableset(repo):
420 """the set of non obsolete revisions with obsolete parents"""
420 """the set of non obsolete revisions with obsolete parents"""
421 return set(repo.revs('(obsolete()::) - obsolete()'))
421 return set(repo.revs('(obsolete()::) - obsolete()'))
422
422
423 @cachefor('suspended')
423 @cachefor('suspended')
424 def _computesuspendedset(repo):
424 def _computesuspendedset(repo):
425 """the set of obsolete parents with non obsolete descendants"""
425 """the set of obsolete parents with non obsolete descendants"""
426 return set(repo.revs('obsolete() and obsolete()::unstable()'))
426 return set(repo.revs('obsolete() and obsolete()::unstable()'))
427
427
428 @cachefor('extinct')
428 @cachefor('extinct')
429 def _computeextinctset(repo):
429 def _computeextinctset(repo):
430 """the set of obsolete parents without non obsolete descendants"""
430 """the set of obsolete parents without non obsolete descendants"""
431 return set(repo.revs('obsolete() - obsolete()::unstable()'))
431 return set(repo.revs('obsolete() - obsolete()::unstable()'))
432
432
433 def createmarkers(repo, relations, flag=0, metadata=None):
433 def createmarkers(repo, relations, flag=0, metadata=None):
434 """Add obsolete markers between changesets in a repo
434 """Add obsolete markers between changesets in a repo
435
435
436 <relations> must be an iterable of (<old>, (<new>, ...)) tuple.
436 <relations> must be an iterable of (<old>, (<new>, ...)) tuple.
437 `old` and `news` are changectx.
437 `old` and `news` are changectx.
438
438
439 Trying to obsolete a public changeset will raise an exception.
439 Trying to obsolete a public changeset will raise an exception.
440
440
441 Current user and date are used except if specified otherwise in the
441 Current user and date are used except if specified otherwise in the
442 metadata attribute.
442 metadata attribute.
443
443
444 This function operates within a transaction of its own, but does
444 This function operates within a transaction of its own, but does
445 not take any lock on the repo.
445 not take any lock on the repo.
446 """
446 """
447 # prepare metadata
447 # prepare metadata
448 if metadata is None:
448 if metadata is None:
449 metadata = {}
449 metadata = {}
450 if 'date' not in metadata:
450 if 'date' not in metadata:
451 metadata['date'] = '%i %i' % util.makedate()
451 metadata['date'] = '%i %i' % util.makedate()
452 if 'user' not in metadata:
452 if 'user' not in metadata:
453 metadata['user'] = repo.ui.username()
453 metadata['user'] = repo.ui.username()
454 tr = repo.transaction('add-obsolescence-marker')
454 tr = repo.transaction('add-obsolescence-marker')
455 try:
455 try:
456 for prec, sucs in relations:
456 for prec, sucs in relations:
457 if not prec.mutable():
457 if not prec.mutable():
458 raise util.Abort("cannot obsolete immutable changeset: %s"
458 raise util.Abort("cannot obsolete immutable changeset: %s"
459 % prec)
459 % prec)
460 nprec = prec.node()
460 nprec = prec.node()
461 nsucs = tuple(s.node() for s in sucs)
461 nsucs = tuple(s.node() for s in sucs)
462 if nprec in nsucs:
462 if nprec in nsucs:
463 raise util.Abort("changeset %s cannot obsolete itself" % prec)
463 raise util.Abort("changeset %s cannot obsolete itself" % prec)
464 repo.obsstore.create(tr, nprec, nsucs, flag, metadata)
464 repo.obsstore.create(tr, nprec, nsucs, flag, metadata)
465 tr.close()
465 tr.close()
466 finally:
466 finally:
467 tr.release()
467 tr.release()
General Comments 0
You need to be logged in to leave comments. Login now