##// END OF EJS Templates
obsolete: extract foreground computation from bookmark.validdest...
Pierre-Yves David -
r18984:efef056b default
parent child Browse files
Show More
@@ -1,310 +1,298
1 1 # Mercurial bookmark support code
2 2 #
3 3 # Copyright 2008 David Soria Parra <dsp@php.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from mercurial.i18n import _
9 9 from mercurial.node import hex
10 10 from mercurial import encoding, error, util, obsolete
11 11 import errno, os
12 12
13 13 class bmstore(dict):
14 14 """Storage for bookmarks.
15 15
16 16 This object should do all bookmark reads and writes, so that it's
17 17 fairly simple to replace the storage underlying bookmarks without
18 18 having to clone the logic surrounding bookmarks.
19 19
20 20 This particular bmstore implementation stores bookmarks as
21 21 {hash}\s{name}\n (the same format as localtags) in
22 22 .hg/bookmarks. The mapping is stored as {name: nodeid}.
23 23
24 24 This class does NOT handle the "current" bookmark state at this
25 25 time.
26 26 """
27 27
28 28 def __init__(self, repo):
29 29 dict.__init__(self)
30 30 self._repo = repo
31 31 try:
32 32 for line in repo.vfs('bookmarks'):
33 33 line = line.strip()
34 34 if not line:
35 35 continue
36 36 if ' ' not in line:
37 37 repo.ui.warn(_('malformed line in .hg/bookmarks: %r\n')
38 38 % line)
39 39 continue
40 40 sha, refspec = line.split(' ', 1)
41 41 refspec = encoding.tolocal(refspec)
42 42 try:
43 43 self[refspec] = repo.changelog.lookup(sha)
44 44 except LookupError:
45 45 pass
46 46 except IOError, inst:
47 47 if inst.errno != errno.ENOENT:
48 48 raise
49 49
50 50 def write(self):
51 51 '''Write bookmarks
52 52
53 53 Write the given bookmark => hash dictionary to the .hg/bookmarks file
54 54 in a format equal to those of localtags.
55 55
56 56 We also store a backup of the previous state in undo.bookmarks that
57 57 can be copied back on rollback.
58 58 '''
59 59 repo = self._repo
60 60 if repo._bookmarkcurrent not in self:
61 61 setcurrent(repo, None)
62 62
63 63 wlock = repo.wlock()
64 64 try:
65 65
66 66 file = repo.vfs('bookmarks', 'w', atomictemp=True)
67 67 for name, node in self.iteritems():
68 68 file.write("%s %s\n" % (hex(node), encoding.fromlocal(name)))
69 69 file.close()
70 70
71 71 # touch 00changelog.i so hgweb reloads bookmarks (no lock needed)
72 72 try:
73 73 os.utime(repo.sjoin('00changelog.i'), None)
74 74 except OSError:
75 75 pass
76 76
77 77 finally:
78 78 wlock.release()
79 79
80 80 def readcurrent(repo):
81 81 '''Get the current bookmark
82 82
83 83 If we use gittish branches we have a current bookmark that
84 84 we are on. This function returns the name of the bookmark. It
85 85 is stored in .hg/bookmarks.current
86 86 '''
87 87 mark = None
88 88 try:
89 89 file = repo.opener('bookmarks.current')
90 90 except IOError, inst:
91 91 if inst.errno != errno.ENOENT:
92 92 raise
93 93 return None
94 94 try:
95 95 # No readline() in osutil.posixfile, reading everything is cheap
96 96 mark = encoding.tolocal((file.readlines() or [''])[0])
97 97 if mark == '' or mark not in repo._bookmarks:
98 98 mark = None
99 99 finally:
100 100 file.close()
101 101 return mark
102 102
103 103 def setcurrent(repo, mark):
104 104 '''Set the name of the bookmark that we are currently on
105 105
106 106 Set the name of the bookmark that we are on (hg update <bookmark>).
107 107 The name is recorded in .hg/bookmarks.current
108 108 '''
109 109 current = repo._bookmarkcurrent
110 110 if current == mark:
111 111 return
112 112
113 113 if mark not in repo._bookmarks:
114 114 mark = ''
115 115
116 116 wlock = repo.wlock()
117 117 try:
118 118 file = repo.opener('bookmarks.current', 'w', atomictemp=True)
119 119 file.write(encoding.fromlocal(mark))
120 120 file.close()
121 121 finally:
122 122 wlock.release()
123 123 repo._bookmarkcurrent = mark
124 124
125 125 def unsetcurrent(repo):
126 126 wlock = repo.wlock()
127 127 try:
128 128 try:
129 129 util.unlink(repo.join('bookmarks.current'))
130 130 repo._bookmarkcurrent = None
131 131 except OSError, inst:
132 132 if inst.errno != errno.ENOENT:
133 133 raise
134 134 finally:
135 135 wlock.release()
136 136
137 137 def iscurrent(repo, mark=None, parents=None):
138 138 '''Tell whether the current bookmark is also active
139 139
140 140 I.e., the bookmark listed in .hg/bookmarks.current also points to a
141 141 parent of the working directory.
142 142 '''
143 143 if not mark:
144 144 mark = repo._bookmarkcurrent
145 145 if not parents:
146 146 parents = [p.node() for p in repo[None].parents()]
147 147 marks = repo._bookmarks
148 148 return (mark in marks and marks[mark] in parents)
149 149
150 150 def updatecurrentbookmark(repo, oldnode, curbranch):
151 151 try:
152 152 return update(repo, oldnode, repo.branchtip(curbranch))
153 153 except error.RepoLookupError:
154 154 if curbranch == "default": # no default branch!
155 155 return update(repo, oldnode, repo.lookup("tip"))
156 156 else:
157 157 raise util.Abort(_("branch %s not found") % curbranch)
158 158
159 159 def deletedivergent(repo, deletefrom, bm):
160 160 '''Delete divergent versions of bm on nodes in deletefrom.
161 161
162 162 Return True if at least one bookmark was deleted, False otherwise.'''
163 163 deleted = False
164 164 marks = repo._bookmarks
165 165 divergent = [b for b in marks if b.split('@', 1)[0] == bm.split('@', 1)[0]]
166 166 for mark in divergent:
167 167 if mark and marks[mark] in deletefrom:
168 168 if mark != bm:
169 169 del marks[mark]
170 170 deleted = True
171 171 return deleted
172 172
173 173 def update(repo, parents, node):
174 174 marks = repo._bookmarks
175 175 update = False
176 176 cur = repo._bookmarkcurrent
177 177 if not cur:
178 178 return False
179 179
180 180 if marks[cur] in parents:
181 181 old = repo[marks[cur]]
182 182 new = repo[node]
183 183 if old.descendant(new):
184 184 marks[cur] = new.node()
185 185 update = True
186 186
187 187 if deletedivergent(repo, parents, cur):
188 188 update = True
189 189
190 190 if update:
191 191 marks.write()
192 192 return update
193 193
194 194 def listbookmarks(repo):
195 195 # We may try to list bookmarks on a repo type that does not
196 196 # support it (e.g., statichttprepository).
197 197 marks = getattr(repo, '_bookmarks', {})
198 198
199 199 d = {}
200 200 hasnode = repo.changelog.hasnode
201 201 for k, v in marks.iteritems():
202 202 # don't expose local divergent bookmarks
203 203 if hasnode(v) and ('@' not in k or k.endswith('@')):
204 204 d[k] = hex(v)
205 205 return d
206 206
207 207 def pushbookmark(repo, key, old, new):
208 208 w = repo.wlock()
209 209 try:
210 210 marks = repo._bookmarks
211 211 if hex(marks.get(key, '')) != old:
212 212 return False
213 213 if new == '':
214 214 del marks[key]
215 215 else:
216 216 if new not in repo:
217 217 return False
218 218 marks[key] = repo[new].node()
219 219 marks.write()
220 220 return True
221 221 finally:
222 222 w.release()
223 223
224 224 def updatefromremote(ui, repo, remotemarks, path):
225 225 ui.debug("checking for updated bookmarks\n")
226 226 changed = False
227 227 localmarks = repo._bookmarks
228 228 for k in sorted(remotemarks):
229 229 if k in localmarks:
230 230 nr, nl = remotemarks[k], localmarks[k]
231 231 if nr in repo:
232 232 cr = repo[nr]
233 233 cl = repo[nl]
234 234 if cl.rev() >= cr.rev():
235 235 continue
236 236 if validdest(repo, cl, cr):
237 237 localmarks[k] = cr.node()
238 238 changed = True
239 239 ui.status(_("updating bookmark %s\n") % k)
240 240 else:
241 241 if k == '@':
242 242 kd = ''
243 243 else:
244 244 kd = k
245 245 # find a unique @ suffix
246 246 for x in range(1, 100):
247 247 n = '%s@%d' % (kd, x)
248 248 if n not in localmarks:
249 249 break
250 250 # try to use an @pathalias suffix
251 251 # if an @pathalias already exists, we overwrite (update) it
252 252 for p, u in ui.configitems("paths"):
253 253 if path == u:
254 254 n = '%s@%s' % (kd, p)
255 255
256 256 localmarks[n] = cr.node()
257 257 changed = True
258 258 ui.warn(_("divergent bookmark %s stored as %s\n") % (k, n))
259 259 elif remotemarks[k] in repo:
260 260 # add remote bookmarks for changes we already have
261 261 localmarks[k] = repo[remotemarks[k]].node()
262 262 changed = True
263 263 ui.status(_("adding remote bookmark %s\n") % k)
264 264
265 265 if changed:
266 266 localmarks.write()
267 267
268 268 def diff(ui, dst, src):
269 269 ui.status(_("searching for changed bookmarks\n"))
270 270
271 271 smarks = src.listkeys('bookmarks')
272 272 dmarks = dst.listkeys('bookmarks')
273 273
274 274 diff = sorted(set(smarks) - set(dmarks))
275 275 for k in diff:
276 276 mark = ui.debugflag and smarks[k] or smarks[k][:12]
277 277 ui.write(" %-25s %s\n" % (k, mark))
278 278
279 279 if len(diff) <= 0:
280 280 ui.status(_("no changed bookmarks found\n"))
281 281 return 1
282 282 return 0
283 283
284 284 def validdest(repo, old, new):
285 285 """Is the new bookmark destination a valid update from the old one"""
286 286 repo = repo.unfiltered()
287 287 if old == new:
288 288 # Old == new -> nothing to update.
289 289 return False
290 290 elif not old:
291 291 # old is nullrev, anything is valid.
292 292 # (new != nullrev has been excluded by the previous check)
293 293 return True
294 294 elif repo.obsstore:
295 # We only need this complicated logic if there is obsolescence
296 # XXX will probably deserve an optimised revset.
297 nm = repo.changelog.nodemap
298 validdests = set([old])
299 plen = -1
300 # compute the whole set of successors or descendants
301 while len(validdests) != plen:
302 plen = len(validdests)
303 succs = set(c.node() for c in validdests)
304 mutable = [c.node() for c in validdests if c.mutable()]
305 succs.update(obsolete.allsuccessors(repo.obsstore, mutable))
306 known = (n for n in succs if n in nm)
307 validdests = set(repo.set('%ln::', known))
308 return new in validdests
295 return new.node() in obsolete.foreground(repo, [old.node()])
309 296 else:
297 # still an independant clause as it is lazyer (and therefore faster)
310 298 return old.descendant(new)
@@ -1,755 +1,782
1 1 # obsolete.py - obsolete markers handling
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Obsolete markers handling
10 10
11 11 An obsolete marker maps an old changeset to a list of new
12 12 changesets. If the list of new changesets is empty, the old changeset
13 13 is said to be "killed". Otherwise, the old changeset is being
14 14 "replaced" by the new changesets.
15 15
16 16 Obsolete markers can be used to record and distribute changeset graph
17 17 transformations performed by history rewriting operations, and help
18 18 building new tools to reconciliate conflicting rewriting actions. To
19 19 facilitate conflicts resolution, markers include various annotations
20 20 besides old and news changeset identifiers, such as creation date or
21 21 author name.
22 22
23 23 The old obsoleted changeset is called "precursor" and possible replacements are
24 24 called "successors". Markers that used changeset X as a precursors are called
25 25 "successor markers of X" because they hold information about the successors of
26 26 X. Markers that use changeset Y as a successors are call "precursor markers of
27 27 Y" because they hold information about the precursors of Y.
28 28
29 29 Examples:
30 30
31 31 - When changeset A is replacement by a changeset A', one marker is stored:
32 32
33 33 (A, (A'))
34 34
35 35 - When changesets A and B are folded into a new changeset C two markers are
36 36 stored:
37 37
38 38 (A, (C,)) and (B, (C,))
39 39
40 40 - When changeset A is simply "pruned" from the graph, a marker in create:
41 41
42 42 (A, ())
43 43
44 44 - When changeset A is split into B and C, a single marker are used:
45 45
46 46 (A, (C, C))
47 47
48 48 We use a single marker to distinct the "split" case from the "divergence"
49 49 case. If two independents operation rewrite the same changeset A in to A' and
50 50 A'' when have an error case: divergent rewriting. We can detect it because
51 51 two markers will be created independently:
52 52
53 53 (A, (B,)) and (A, (C,))
54 54
55 55 Format
56 56 ------
57 57
58 58 Markers are stored in an append-only file stored in
59 59 '.hg/store/obsstore'.
60 60
61 61 The file starts with a version header:
62 62
63 63 - 1 unsigned byte: version number, starting at zero.
64 64
65 65
66 66 The header is followed by the markers. Each marker is made of:
67 67
68 68 - 1 unsigned byte: number of new changesets "R", could be zero.
69 69
70 70 - 1 unsigned 32-bits integer: metadata size "M" in bytes.
71 71
72 72 - 1 byte: a bit field. It is reserved for flags used in obsolete
73 73 markers common operations, to avoid repeated decoding of metadata
74 74 entries.
75 75
76 76 - 20 bytes: obsoleted changeset identifier.
77 77
78 78 - N*20 bytes: new changesets identifiers.
79 79
80 80 - M bytes: metadata as a sequence of nul-terminated strings. Each
81 81 string contains a key and a value, separated by a color ':', without
82 82 additional encoding. Keys cannot contain '\0' or ':' and values
83 83 cannot contain '\0'.
84 84 """
85 85 import struct
86 86 import util, base85, node
87 87 from i18n import _
88 88
89 89 _pack = struct.pack
90 90 _unpack = struct.unpack
91 91
92 92 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
93 93
94 94 # the obsolete feature is not mature enough to be enabled by default.
95 95 # you have to rely on third party extension extension to enable this.
96 96 _enabled = False
97 97
98 98 # data used for parsing and writing
99 99 _fmversion = 0
100 100 _fmfixed = '>BIB20s'
101 101 _fmnode = '20s'
102 102 _fmfsize = struct.calcsize(_fmfixed)
103 103 _fnodesize = struct.calcsize(_fmnode)
104 104
105 105 ### obsolescence marker flag
106 106
107 107 ## bumpedfix flag
108 108 #
109 109 # When a changeset A' succeed to a changeset A which became public, we call A'
110 110 # "bumped" because it's a successors of a public changesets
111 111 #
112 112 # o A' (bumped)
113 113 # |`:
114 114 # | o A
115 115 # |/
116 116 # o Z
117 117 #
118 118 # The way to solve this situation is to create a new changeset Ad as children
119 119 # of A. This changeset have the same content than A'. So the diff from A to A'
120 120 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
121 121 #
122 122 # o Ad
123 123 # |`:
124 124 # | x A'
125 125 # |'|
126 126 # o | A
127 127 # |/
128 128 # o Z
129 129 #
130 130 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
131 131 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
132 132 # This flag mean that the successors express the changes between the public and
133 133 # bumped version and fix the situation, breaking the transitivity of
134 134 # "bumped" here.
135 135 bumpedfix = 1
136 136
137 137 def _readmarkers(data):
138 138 """Read and enumerate markers from raw data"""
139 139 off = 0
140 140 diskversion = _unpack('>B', data[off:off + 1])[0]
141 141 off += 1
142 142 if diskversion != _fmversion:
143 143 raise util.Abort(_('parsing obsolete marker: unknown version %r')
144 144 % diskversion)
145 145
146 146 # Loop on markers
147 147 l = len(data)
148 148 while off + _fmfsize <= l:
149 149 # read fixed part
150 150 cur = data[off:off + _fmfsize]
151 151 off += _fmfsize
152 152 nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur)
153 153 # read replacement
154 154 sucs = ()
155 155 if nbsuc:
156 156 s = (_fnodesize * nbsuc)
157 157 cur = data[off:off + s]
158 158 sucs = _unpack(_fmnode * nbsuc, cur)
159 159 off += s
160 160 # read metadata
161 161 # (metadata will be decoded on demand)
162 162 metadata = data[off:off + mdsize]
163 163 if len(metadata) != mdsize:
164 164 raise util.Abort(_('parsing obsolete marker: metadata is too '
165 165 'short, %d bytes expected, got %d')
166 166 % (mdsize, len(metadata)))
167 167 off += mdsize
168 168 yield (pre, sucs, flags, metadata)
169 169
170 170 def encodemeta(meta):
171 171 """Return encoded metadata string to string mapping.
172 172
173 173 Assume no ':' in key and no '\0' in both key and value."""
174 174 for key, value in meta.iteritems():
175 175 if ':' in key or '\0' in key:
176 176 raise ValueError("':' and '\0' are forbidden in metadata key'")
177 177 if '\0' in value:
178 178 raise ValueError("':' are forbidden in metadata value'")
179 179 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
180 180
181 181 def decodemeta(data):
182 182 """Return string to string dictionary from encoded version."""
183 183 d = {}
184 184 for l in data.split('\0'):
185 185 if l:
186 186 key, value = l.split(':')
187 187 d[key] = value
188 188 return d
189 189
190 190 class marker(object):
191 191 """Wrap obsolete marker raw data"""
192 192
193 193 def __init__(self, repo, data):
194 194 # the repo argument will be used to create changectx in later version
195 195 self._repo = repo
196 196 self._data = data
197 197 self._decodedmeta = None
198 198
199 199 def precnode(self):
200 200 """Precursor changeset node identifier"""
201 201 return self._data[0]
202 202
203 203 def succnodes(self):
204 204 """List of successor changesets node identifiers"""
205 205 return self._data[1]
206 206
207 207 def metadata(self):
208 208 """Decoded metadata dictionary"""
209 209 if self._decodedmeta is None:
210 210 self._decodedmeta = decodemeta(self._data[3])
211 211 return self._decodedmeta
212 212
213 213 def date(self):
214 214 """Creation date as (unixtime, offset)"""
215 215 parts = self.metadata()['date'].split(' ')
216 216 return (float(parts[0]), int(parts[1]))
217 217
218 218 class obsstore(object):
219 219 """Store obsolete markers
220 220
221 221 Markers can be accessed with two mappings:
222 222 - precursors[x] -> set(markers on precursors edges of x)
223 223 - successors[x] -> set(markers on successors edges of x)
224 224 """
225 225
226 226 def __init__(self, sopener):
227 227 # caches for various obsolescence related cache
228 228 self.caches = {}
229 229 self._all = []
230 230 # new markers to serialize
231 231 self.precursors = {}
232 232 self.successors = {}
233 233 self.sopener = sopener
234 234 data = sopener.tryread('obsstore')
235 235 if data:
236 236 self._load(_readmarkers(data))
237 237
238 238 def __iter__(self):
239 239 return iter(self._all)
240 240
241 241 def __nonzero__(self):
242 242 return bool(self._all)
243 243
244 244 def create(self, transaction, prec, succs=(), flag=0, metadata=None):
245 245 """obsolete: add a new obsolete marker
246 246
247 247 * ensuring it is hashable
248 248 * check mandatory metadata
249 249 * encode metadata
250 250 """
251 251 if metadata is None:
252 252 metadata = {}
253 253 if 'date' not in metadata:
254 254 metadata['date'] = "%d %d" % util.makedate()
255 255 if len(prec) != 20:
256 256 raise ValueError(prec)
257 257 for succ in succs:
258 258 if len(succ) != 20:
259 259 raise ValueError(succ)
260 260 marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata))
261 261 self.add(transaction, [marker])
262 262
263 263 def add(self, transaction, markers):
264 264 """Add new markers to the store
265 265
266 266 Take care of filtering duplicate.
267 267 Return the number of new marker."""
268 268 if not _enabled:
269 269 raise util.Abort('obsolete feature is not enabled on this repo')
270 270 new = [m for m in markers if m not in self._all]
271 271 if new:
272 272 f = self.sopener('obsstore', 'ab')
273 273 try:
274 274 # Whether the file's current position is at the begin or at
275 275 # the end after opening a file for appending is implementation
276 276 # defined. So we must seek to the end before calling tell(),
277 277 # or we may get a zero offset for non-zero sized files on
278 278 # some platforms (issue3543).
279 279 f.seek(0, _SEEK_END)
280 280 offset = f.tell()
281 281 transaction.add('obsstore', offset)
282 282 # offset == 0: new file - add the version header
283 283 for bytes in _encodemarkers(new, offset == 0):
284 284 f.write(bytes)
285 285 finally:
286 286 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
287 287 # call 'filecacheentry.refresh()' here
288 288 f.close()
289 289 self._load(new)
290 290 # new marker *may* have changed several set. invalidate the cache.
291 291 self.caches.clear()
292 292 return len(new)
293 293
294 294 def mergemarkers(self, transaction, data):
295 295 markers = _readmarkers(data)
296 296 self.add(transaction, markers)
297 297
298 298 def _load(self, markers):
299 299 for mark in markers:
300 300 self._all.append(mark)
301 301 pre, sucs = mark[:2]
302 302 self.successors.setdefault(pre, set()).add(mark)
303 303 for suc in sucs:
304 304 self.precursors.setdefault(suc, set()).add(mark)
305 305 if node.nullid in self.precursors:
306 306 raise util.Abort(_('bad obsolescence marker detected: '
307 307 'invalid successors nullid'))
308 308
309 309 def _encodemarkers(markers, addheader=False):
310 310 # Kept separate from flushmarkers(), it will be reused for
311 311 # markers exchange.
312 312 if addheader:
313 313 yield _pack('>B', _fmversion)
314 314 for marker in markers:
315 315 yield _encodeonemarker(marker)
316 316
317 317
318 318 def _encodeonemarker(marker):
319 319 pre, sucs, flags, metadata = marker
320 320 nbsuc = len(sucs)
321 321 format = _fmfixed + (_fmnode * nbsuc)
322 322 data = [nbsuc, len(metadata), flags, pre]
323 323 data.extend(sucs)
324 324 return _pack(format, *data) + metadata
325 325
326 326 # arbitrary picked to fit into 8K limit from HTTP server
327 327 # you have to take in account:
328 328 # - the version header
329 329 # - the base85 encoding
330 330 _maxpayload = 5300
331 331
332 332 def listmarkers(repo):
333 333 """List markers over pushkey"""
334 334 if not repo.obsstore:
335 335 return {}
336 336 keys = {}
337 337 parts = []
338 338 currentlen = _maxpayload * 2 # ensure we create a new part
339 339 for marker in repo.obsstore:
340 340 nextdata = _encodeonemarker(marker)
341 341 if (len(nextdata) + currentlen > _maxpayload):
342 342 currentpart = []
343 343 currentlen = 0
344 344 parts.append(currentpart)
345 345 currentpart.append(nextdata)
346 346 currentlen += len(nextdata)
347 347 for idx, part in enumerate(reversed(parts)):
348 348 data = ''.join([_pack('>B', _fmversion)] + part)
349 349 keys['dump%i' % idx] = base85.b85encode(data)
350 350 return keys
351 351
352 352 def pushmarker(repo, key, old, new):
353 353 """Push markers over pushkey"""
354 354 if not key.startswith('dump'):
355 355 repo.ui.warn(_('unknown key: %r') % key)
356 356 return 0
357 357 if old:
358 358 repo.ui.warn(_('unexpected old value') % key)
359 359 return 0
360 360 data = base85.b85decode(new)
361 361 lock = repo.lock()
362 362 try:
363 363 tr = repo.transaction('pushkey: obsolete markers')
364 364 try:
365 365 repo.obsstore.mergemarkers(tr, data)
366 366 tr.close()
367 367 return 1
368 368 finally:
369 369 tr.release()
370 370 finally:
371 371 lock.release()
372 372
373 373 def allmarkers(repo):
374 374 """all obsolete markers known in a repository"""
375 375 for markerdata in repo.obsstore:
376 376 yield marker(repo, markerdata)
377 377
378 378 def precursormarkers(ctx):
379 379 """obsolete marker marking this changeset as a successors"""
380 380 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
381 381 yield marker(ctx._repo, data)
382 382
383 383 def successormarkers(ctx):
384 384 """obsolete marker making this changeset obsolete"""
385 385 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
386 386 yield marker(ctx._repo, data)
387 387
388 388 def allsuccessors(obsstore, nodes, ignoreflags=0):
389 389 """Yield node for every successor of <nodes>.
390 390
391 391 Some successors may be unknown locally.
392 392
393 393 This is a linear yield unsuited to detecting split changesets."""
394 394 remaining = set(nodes)
395 395 seen = set(remaining)
396 396 while remaining:
397 397 current = remaining.pop()
398 398 yield current
399 399 for mark in obsstore.successors.get(current, ()):
400 400 # ignore marker flagged with with specified flag
401 401 if mark[2] & ignoreflags:
402 402 continue
403 403 for suc in mark[1]:
404 404 if suc not in seen:
405 405 seen.add(suc)
406 406 remaining.add(suc)
407 407
408 def foreground(repo, nodes):
409 """return all nodes in the "foreground" of other node
410
411 The foreground of a revision is anything reachable using parent -> children
412 or precursor -> sucessor relation. It is very similars to "descendant" but
413 augmented with obsolescence information.
414
415 Beware that possible obsolescence cycle may result if complexe situation.
416 """
417 repo = repo.unfiltered()
418 foreground = set(repo.set('%ln::', nodes))
419 if repo.obsstore:
420 # We only need this complicated logic if there is obsolescence
421 # XXX will probably deserve an optimised revset.
422 nm = repo.changelog.nodemap
423 plen = -1
424 # compute the whole set of successors or descendants
425 while len(foreground) != plen:
426 plen = len(foreground)
427 succs = set(c.node() for c in foreground)
428 mutable = [c.node() for c in foreground if c.mutable()]
429 succs.update(allsuccessors(repo.obsstore, mutable))
430 known = (n for n in succs if n in nm)
431 foreground = set(repo.set('%ln::', known))
432 return set(c.node() for c in foreground)
433
434
408 435 def successorssets(repo, initialnode, cache=None):
409 436 """Return all set of successors of initial nodes
410 437
411 438 Successors set of changeset A are a group of revision that succeed A. It
412 439 succeed A as a consistent whole, each revision being only partial
413 440 replacement. Successors set contains non-obsolete changeset only.
414 441
415 442 In most cases a changeset A have zero (changeset pruned) or a single
416 443 successors set that contains a single successor (changeset A replaced by
417 444 A')
418 445
419 446 When changeset is split, it results successors set containing more than
420 447 a single element. Divergent rewriting will result in multiple successors
421 448 sets.
422 449
423 450 They are returned as a list of tuples containing all valid successors sets.
424 451
425 452 Final successors unknown locally are considered plain prune (obsoleted
426 453 without successors).
427 454
428 455 The optional `cache` parameter is a dictionary that may contains
429 456 precomputed successors sets. It is meant to reuse the computation of
430 457 previous call to `successorssets` when multiple calls are made at the same
431 458 time. The cache dictionary is updated in place. The caller is responsible
432 459 for its live spawn. Code that makes multiple calls to `successorssets`
433 460 *must* use this cache mechanism or suffer terrible performances."""
434 461
435 462 succmarkers = repo.obsstore.successors
436 463
437 464 # Stack of nodes we search successors sets for
438 465 toproceed = [initialnode]
439 466 # set version of above list for fast loop detection
440 467 # element added to "toproceed" must be added here
441 468 stackedset = set(toproceed)
442 469 if cache is None:
443 470 cache = {}
444 471
445 472 # This while loop is the flattened version of a recursive search for
446 473 # successors sets
447 474 #
448 475 # def successorssets(x):
449 476 # successors = directsuccessors(x)
450 477 # ss = [[]]
451 478 # for succ in directsuccessors(x):
452 479 # # product as in itertools cartesian product
453 480 # ss = product(ss, successorssets(succ))
454 481 # return ss
455 482 #
456 483 # But we can not use plain recursive calls here:
457 484 # - that would blow the python call stack
458 485 # - obsolescence markers may have cycles, we need to handle them.
459 486 #
460 487 # The `toproceed` list act as our call stack. Every node we search
461 488 # successors set for are stacked there.
462 489 #
463 490 # The `stackedset` is set version of this stack used to check if a node is
464 491 # already stacked. This check is used to detect cycles and prevent infinite
465 492 # loop.
466 493 #
467 494 # successors set of all nodes are stored in the `cache` dictionary.
468 495 #
469 496 # After this while loop ends we use the cache to return the successors sets
470 497 # for the node requested by the caller.
471 498 while toproceed:
472 499 # Every iteration tries to compute the successors sets of the topmost
473 500 # node of the stack: CURRENT.
474 501 #
475 502 # There are four possible outcomes:
476 503 #
477 504 # 1) We already know the successors sets of CURRENT:
478 505 # -> mission accomplished, pop it from the stack.
479 506 # 2) Node is not obsolete:
480 507 # -> the node is its own successors sets. Add it to the cache.
481 508 # 3) We do not know successors set of direct successors of CURRENT:
482 509 # -> We add those successors to the stack.
483 510 # 4) We know successors sets of all direct successors of CURRENT:
484 511 # -> We can compute CURRENT successors set and add it to the
485 512 # cache.
486 513 #
487 514 current = toproceed[-1]
488 515 if current in cache:
489 516 # case (1): We already know the successors sets
490 517 stackedset.remove(toproceed.pop())
491 518 elif current not in succmarkers:
492 519 # case (2): The node is not obsolete.
493 520 if current in repo:
494 521 # We have a valid last successors.
495 522 cache[current] = [(current,)]
496 523 else:
497 524 # Final obsolete version is unknown locally.
498 525 # Do not count that as a valid successors
499 526 cache[current] = []
500 527 else:
501 528 # cases (3) and (4)
502 529 #
503 530 # We proceed in two phases. Phase 1 aims to distinguish case (3)
504 531 # from case (4):
505 532 #
506 533 # For each direct successors of CURRENT, we check whether its
507 534 # successors sets are known. If they are not, we stack the
508 535 # unknown node and proceed to the next iteration of the while
509 536 # loop. (case 3)
510 537 #
511 538 # During this step, we may detect obsolescence cycles: a node
512 539 # with unknown successors sets but already in the call stack.
513 540 # In such a situation, we arbitrary set the successors sets of
514 541 # the node to nothing (node pruned) to break the cycle.
515 542 #
516 543 # If no break was encountered we proceed to phase 2.
517 544 #
518 545 # Phase 2 computes successors sets of CURRENT (case 4); see details
519 546 # in phase 2 itself.
520 547 #
521 548 # Note the two levels of iteration in each phase.
522 549 # - The first one handles obsolescence markers using CURRENT as
523 550 # precursor (successors markers of CURRENT).
524 551 #
525 552 # Having multiple entry here means divergence.
526 553 #
527 554 # - The second one handles successors defined in each marker.
528 555 #
529 556 # Having none means pruned node, multiple successors means split,
530 557 # single successors are standard replacement.
531 558 #
532 559 for mark in sorted(succmarkers[current]):
533 560 for suc in mark[1]:
534 561 if suc not in cache:
535 562 if suc in stackedset:
536 563 # cycle breaking
537 564 cache[suc] = []
538 565 else:
539 566 # case (3) If we have not computed successors sets
540 567 # of one of those successors we add it to the
541 568 # `toproceed` stack and stop all work for this
542 569 # iteration.
543 570 toproceed.append(suc)
544 571 stackedset.add(suc)
545 572 break
546 573 else:
547 574 continue
548 575 break
549 576 else:
550 577 # case (4): we know all successors sets of all direct
551 578 # successors
552 579 #
553 580 # Successors set contributed by each marker depends on the
554 581 # successors sets of all its "successors" node.
555 582 #
556 583 # Each different marker is a divergence in the obsolescence
557 584 # history. It contributes successors sets distinct from other
558 585 # markers.
559 586 #
560 587 # Within a marker, a successor may have divergent successors
561 588 # sets. In such a case, the marker will contribute multiple
562 589 # divergent successors sets. If multiple successors have
563 590 # divergent successors sets, a cartesian product is used.
564 591 #
565 592 # At the end we post-process successors sets to remove
566 593 # duplicated entry and successors set that are strict subset of
567 594 # another one.
568 595 succssets = []
569 596 for mark in sorted(succmarkers[current]):
570 597 # successors sets contributed by this marker
571 598 markss = [[]]
572 599 for suc in mark[1]:
573 600 # cardinal product with previous successors
574 601 productresult = []
575 602 for prefix in markss:
576 603 for suffix in cache[suc]:
577 604 newss = list(prefix)
578 605 for part in suffix:
579 606 # do not duplicated entry in successors set
580 607 # first entry wins.
581 608 if part not in newss:
582 609 newss.append(part)
583 610 productresult.append(newss)
584 611 markss = productresult
585 612 succssets.extend(markss)
586 613 # remove duplicated and subset
587 614 seen = []
588 615 final = []
589 616 candidate = sorted(((set(s), s) for s in succssets if s),
590 617 key=lambda x: len(x[1]), reverse=True)
591 618 for setversion, listversion in candidate:
592 619 for seenset in seen:
593 620 if setversion.issubset(seenset):
594 621 break
595 622 else:
596 623 final.append(listversion)
597 624 seen.append(setversion)
598 625 final.reverse() # put small successors set first
599 626 cache[current] = final
600 627 return cache[initialnode]
601 628
602 629 def _knownrevs(repo, nodes):
603 630 """yield revision numbers of known nodes passed in parameters
604 631
605 632 Unknown revisions are silently ignored."""
606 633 torev = repo.changelog.nodemap.get
607 634 for n in nodes:
608 635 rev = torev(n)
609 636 if rev is not None:
610 637 yield rev
611 638
612 639 # mapping of 'set-name' -> <function to compute this set>
613 640 cachefuncs = {}
614 641 def cachefor(name):
615 642 """Decorator to register a function as computing the cache for a set"""
616 643 def decorator(func):
617 644 assert name not in cachefuncs
618 645 cachefuncs[name] = func
619 646 return func
620 647 return decorator
621 648
622 649 def getrevs(repo, name):
623 650 """Return the set of revision that belong to the <name> set
624 651
625 652 Such access may compute the set and cache it for future use"""
626 653 repo = repo.unfiltered()
627 654 if not repo.obsstore:
628 655 return ()
629 656 if name not in repo.obsstore.caches:
630 657 repo.obsstore.caches[name] = cachefuncs[name](repo)
631 658 return repo.obsstore.caches[name]
632 659
633 660 # To be simple we need to invalidate obsolescence cache when:
634 661 #
635 662 # - new changeset is added:
636 663 # - public phase is changed
637 664 # - obsolescence marker are added
638 665 # - strip is used a repo
639 666 def clearobscaches(repo):
640 667 """Remove all obsolescence related cache from a repo
641 668
642 669 This remove all cache in obsstore is the obsstore already exist on the
643 670 repo.
644 671
645 672 (We could be smarter here given the exact event that trigger the cache
646 673 clearing)"""
647 674 # only clear cache is there is obsstore data in this repo
648 675 if 'obsstore' in repo._filecache:
649 676 repo.obsstore.caches.clear()
650 677
651 678 @cachefor('obsolete')
652 679 def _computeobsoleteset(repo):
653 680 """the set of obsolete revisions"""
654 681 obs = set()
655 682 getrev = repo.changelog.nodemap.get
656 683 getphase = repo._phasecache.phase
657 684 for node in repo.obsstore.successors:
658 685 rev = getrev(node)
659 686 if rev is not None and getphase(repo, rev):
660 687 obs.add(rev)
661 688 return obs
662 689
663 690 @cachefor('unstable')
664 691 def _computeunstableset(repo):
665 692 """the set of non obsolete revisions with obsolete parents"""
666 693 # revset is not efficient enough here
667 694 # we do (obsolete()::) - obsolete() by hand
668 695 obs = getrevs(repo, 'obsolete')
669 696 if not obs:
670 697 return set()
671 698 cl = repo.changelog
672 699 return set(r for r in cl.descendants(obs) if r not in obs)
673 700
674 701 @cachefor('suspended')
675 702 def _computesuspendedset(repo):
676 703 """the set of obsolete parents with non obsolete descendants"""
677 704 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
678 705 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
679 706
680 707 @cachefor('extinct')
681 708 def _computeextinctset(repo):
682 709 """the set of obsolete parents without non obsolete descendants"""
683 710 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
684 711
685 712
686 713 @cachefor('bumped')
687 714 def _computebumpedset(repo):
688 715 """the set of revs trying to obsolete public revisions"""
689 716 # get all possible bumped changesets
690 717 tonode = repo.changelog.node
691 718 publicnodes = (tonode(r) for r in repo.revs('public()'))
692 719 successors = allsuccessors(repo.obsstore, publicnodes,
693 720 ignoreflags=bumpedfix)
694 721 # revision public or already obsolete don't count as bumped
695 722 query = '%ld - obsolete() - public()'
696 723 return set(repo.revs(query, _knownrevs(repo, successors)))
697 724
698 725 @cachefor('divergent')
699 726 def _computedivergentset(repo):
700 727 """the set of rev that compete to be the final successors of some revision.
701 728 """
702 729 divergent = set()
703 730 obsstore = repo.obsstore
704 731 newermap = {}
705 732 for ctx in repo.set('(not public()) - obsolete()'):
706 733 mark = obsstore.precursors.get(ctx.node(), ())
707 734 toprocess = set(mark)
708 735 while toprocess:
709 736 prec = toprocess.pop()[0]
710 737 if prec not in newermap:
711 738 successorssets(repo, prec, newermap)
712 739 newer = [n for n in newermap[prec] if n]
713 740 if len(newer) > 1:
714 741 divergent.add(ctx.rev())
715 742 break
716 743 toprocess.update(obsstore.precursors.get(prec, ()))
717 744 return divergent
718 745
719 746
720 747 def createmarkers(repo, relations, flag=0, metadata=None):
721 748 """Add obsolete markers between changesets in a repo
722 749
723 750 <relations> must be an iterable of (<old>, (<new>, ...)) tuple.
724 751 `old` and `news` are changectx.
725 752
726 753 Trying to obsolete a public changeset will raise an exception.
727 754
728 755 Current user and date are used except if specified otherwise in the
729 756 metadata attribute.
730 757
731 758 This function operates within a transaction of its own, but does
732 759 not take any lock on the repo.
733 760 """
734 761 # prepare metadata
735 762 if metadata is None:
736 763 metadata = {}
737 764 if 'date' not in metadata:
738 765 metadata['date'] = '%i %i' % util.makedate()
739 766 if 'user' not in metadata:
740 767 metadata['user'] = repo.ui.username()
741 768 tr = repo.transaction('add-obsolescence-marker')
742 769 try:
743 770 for prec, sucs in relations:
744 771 if not prec.mutable():
745 772 raise util.Abort("cannot obsolete immutable changeset: %s"
746 773 % prec)
747 774 nprec = prec.node()
748 775 nsucs = tuple(s.node() for s in sucs)
749 776 if nprec in nsucs:
750 777 raise util.Abort("changeset %s cannot obsolete itself" % prec)
751 778 repo.obsstore.create(tr, nprec, nsucs, flag, metadata)
752 779 repo.filteredrevcache.clear()
753 780 tr.close()
754 781 finally:
755 782 tr.release()
General Comments 0
You need to be logged in to leave comments. Login now