##// END OF EJS Templates
push: move obsolescence marker exchange in the exchange module...
Pierre-Yves David -
r20432:1b926f0b default
parent child Browse files
Show More
@@ -1,279 +1,295
1 1 # exchange.py - utily to exchange data between repo.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from node import hex
10 10 import errno
11 11 import util, scmutil, changegroup
12 12 import discovery, phases, obsolete, bookmarks
13 13
14 14
15 15 class pushoperation(object):
16 16 """A object that represent a single push operation
17 17
18 18 It purpose is to carry push related state and very common operation.
19 19
20 20 A new should be created at the begining of each push and discarded
21 21 afterward.
22 22 """
23 23
24 24 def __init__(self, repo, remote, force=False, revs=None, newbranch=False):
25 25 # repo we push from
26 26 self.repo = repo
27 27 self.ui = repo.ui
28 28 # repo we push to
29 29 self.remote = remote
30 30 # force option provided
31 31 self.force = force
32 32 # revs to be pushed (None is "all")
33 33 self.revs = revs
34 34 # allow push of new branch
35 35 self.newbranch = newbranch
36 36
37 37 def push(repo, remote, force=False, revs=None, newbranch=False):
38 38 '''Push outgoing changesets (limited by revs) from a local
39 39 repository to remote. Return an integer:
40 40 - None means nothing to push
41 41 - 0 means HTTP error
42 42 - 1 means we pushed and remote head count is unchanged *or*
43 43 we have outgoing changesets but refused to push
44 44 - other values as described by addchangegroup()
45 45 '''
46 46 pushop = pushoperation(repo, remote, force, revs, newbranch)
47 47 if pushop.remote.local():
48 48 missing = (set(pushop.repo.requirements)
49 49 - pushop.remote.local().supported)
50 50 if missing:
51 51 msg = _("required features are not"
52 52 " supported in the destination:"
53 53 " %s") % (', '.join(sorted(missing)))
54 54 raise util.Abort(msg)
55 55
56 56 # there are two ways to push to remote repo:
57 57 #
58 58 # addchangegroup assumes local user can lock remote
59 59 # repo (local filesystem, old ssh servers).
60 60 #
61 61 # unbundle assumes local user cannot lock remote repo (new ssh
62 62 # servers, http servers).
63 63
64 64 if not pushop.remote.canpush():
65 65 raise util.Abort(_("destination does not support push"))
66 66 unfi = pushop.repo.unfiltered()
67 67 def localphasemove(nodes, phase=phases.public):
68 68 """move <nodes> to <phase> in the local source repo"""
69 69 if locallock is not None:
70 70 phases.advanceboundary(pushop.repo, phase, nodes)
71 71 else:
72 72 # repo is not locked, do not change any phases!
73 73 # Informs the user that phases should have been moved when
74 74 # applicable.
75 75 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
76 76 phasestr = phases.phasenames[phase]
77 77 if actualmoves:
78 78 pushop.ui.status(_('cannot lock source repo, skipping '
79 79 'local %s phase update\n') % phasestr)
80 80 # get local lock as we might write phase data
81 81 locallock = None
82 82 try:
83 83 locallock = pushop.repo.lock()
84 84 except IOError, err:
85 85 if err.errno != errno.EACCES:
86 86 raise
87 87 # source repo cannot be locked.
88 88 # We do not abort the push, but just disable the local phase
89 89 # synchronisation.
90 90 msg = 'cannot lock source repository: %s\n' % err
91 91 pushop.ui.debug(msg)
92 92 try:
93 93 pushop.repo.checkpush(pushop.force, pushop.revs)
94 94 lock = None
95 95 unbundle = pushop.remote.capable('unbundle')
96 96 if not unbundle:
97 97 lock = pushop.remote.lock()
98 98 try:
99 99 # discovery
100 100 fci = discovery.findcommonincoming
101 101 commoninc = fci(unfi, pushop.remote, force=pushop.force)
102 102 common, inc, remoteheads = commoninc
103 103 fco = discovery.findcommonoutgoing
104 104 outgoing = fco(unfi, pushop.remote, onlyheads=pushop.revs,
105 105 commoninc=commoninc, force=pushop.force)
106 106
107 107
108 108 if not outgoing.missing:
109 109 # nothing to push
110 110 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
111 111 ret = None
112 112 else:
113 113 # something to push
114 114 if not pushop.force:
115 115 # if repo.obsstore == False --> no obsolete
116 116 # then, save the iteration
117 117 if unfi.obsstore:
118 118 # this message are here for 80 char limit reason
119 119 mso = _("push includes obsolete changeset: %s!")
120 120 mst = "push includes %s changeset: %s!"
121 121 # plain versions for i18n tool to detect them
122 122 _("push includes unstable changeset: %s!")
123 123 _("push includes bumped changeset: %s!")
124 124 _("push includes divergent changeset: %s!")
125 125 # If we are to push if there is at least one
126 126 # obsolete or unstable changeset in missing, at
127 127 # least one of the missinghead will be obsolete or
128 128 # unstable. So checking heads only is ok
129 129 for node in outgoing.missingheads:
130 130 ctx = unfi[node]
131 131 if ctx.obsolete():
132 132 raise util.Abort(mso % ctx)
133 133 elif ctx.troubled():
134 134 raise util.Abort(_(mst)
135 135 % (ctx.troubles()[0],
136 136 ctx))
137 137 newbm = pushop.ui.configlist('bookmarks', 'pushing')
138 138 discovery.checkheads(unfi, pushop.remote, outgoing,
139 139 remoteheads, pushop.newbranch,
140 140 bool(inc), newbm)
141 141
142 142 # TODO: get bundlecaps from remote
143 143 bundlecaps = None
144 144 # create a changegroup from local
145 145 if pushop.revs is None and not (outgoing.excluded
146 146 or pushop.repo.changelog.filteredrevs):
147 147 # push everything,
148 148 # use the fast path, no race possible on push
149 149 bundler = changegroup.bundle10(pushop.repo, bundlecaps)
150 150 cg = pushop.repo._changegroupsubset(outgoing,
151 151 bundler,
152 152 'push',
153 153 fastpath=True)
154 154 else:
155 155 cg = pushop.repo.getlocalbundle('push', outgoing,
156 156 bundlecaps)
157 157
158 158 # apply changegroup to remote
159 159 if unbundle:
160 160 # local repo finds heads on server, finds out what
161 161 # revs it must push. once revs transferred, if server
162 162 # finds it has different heads (someone else won
163 163 # commit/push race), server aborts.
164 164 if pushop.force:
165 165 remoteheads = ['force']
166 166 # ssh: return remote's addchangegroup()
167 167 # http: return remote's addchangegroup() or 0 for error
168 168 ret = pushop.remote.unbundle(cg, remoteheads, 'push')
169 169 else:
170 170 # we return an integer indicating remote head count
171 171 # change
172 172 ret = pushop.remote.addchangegroup(cg, 'push',
173 173 pushop.repo.url())
174 174
175 175 if ret:
176 176 # push succeed, synchronize target of the push
177 177 cheads = outgoing.missingheads
178 178 elif pushop.revs is None:
179 179 # All out push fails. synchronize all common
180 180 cheads = outgoing.commonheads
181 181 else:
182 182 # I want cheads = heads(::missingheads and ::commonheads)
183 183 # (missingheads is revs with secret changeset filtered out)
184 184 #
185 185 # This can be expressed as:
186 186 # cheads = ( (missingheads and ::commonheads)
187 187 # + (commonheads and ::missingheads))"
188 188 # )
189 189 #
190 190 # while trying to push we already computed the following:
191 191 # common = (::commonheads)
192 192 # missing = ((commonheads::missingheads) - commonheads)
193 193 #
194 194 # We can pick:
195 195 # * missingheads part of common (::commonheads)
196 196 common = set(outgoing.common)
197 197 nm = pushop.repo.changelog.nodemap
198 198 cheads = [node for node in pushop.revs if nm[node] in common]
199 199 # and
200 200 # * commonheads parents on missing
201 201 revset = unfi.set('%ln and parents(roots(%ln))',
202 202 outgoing.commonheads,
203 203 outgoing.missing)
204 204 cheads.extend(c.node() for c in revset)
205 205 # even when we don't push, exchanging phase data is useful
206 206 remotephases = pushop.remote.listkeys('phases')
207 207 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
208 208 and remotephases # server supports phases
209 209 and ret is None # nothing was pushed
210 210 and remotephases.get('publishing', False)):
211 211 # When:
212 212 # - this is a subrepo push
213 213 # - and remote support phase
214 214 # - and no changeset was pushed
215 215 # - and remote is publishing
216 216 # We may be in issue 3871 case!
217 217 # We drop the possible phase synchronisation done by
218 218 # courtesy to publish changesets possibly locally draft
219 219 # on the remote.
220 220 remotephases = {'publishing': 'True'}
221 221 if not remotephases: # old server or public only repo
222 222 localphasemove(cheads)
223 223 # don't push any phase data as there is nothing to push
224 224 else:
225 225 ana = phases.analyzeremotephases(pushop.repo, cheads,
226 226 remotephases)
227 227 pheads, droots = ana
228 228 ### Apply remote phase on local
229 229 if remotephases.get('publishing', False):
230 230 localphasemove(cheads)
231 231 else: # publish = False
232 232 localphasemove(pheads)
233 233 localphasemove(cheads, phases.draft)
234 234 ### Apply local phase on remote
235 235
236 236 # Get the list of all revs draft on remote by public here.
237 237 # XXX Beware that revset break if droots is not strictly
238 238 # XXX root we may want to ensure it is but it is costly
239 239 outdated = unfi.set('heads((%ln::%ln) and public())',
240 240 droots, cheads)
241 241 for newremotehead in outdated:
242 242 r = pushop.remote.pushkey('phases',
243 243 newremotehead.hex(),
244 244 str(phases.draft),
245 245 str(phases.public))
246 246 if not r:
247 247 pushop.ui.warn(_('updating %s to public failed!\n')
248 248 % newremotehead)
249 249 pushop.ui.debug('try to push obsolete markers to remote\n')
250 obsolete.syncpush(pushop.repo, pushop.remote)
250 _pushobsolete(pushop.repo, pushop.remote)
251 251 finally:
252 252 if lock is not None:
253 253 lock.release()
254 254 finally:
255 255 if locallock is not None:
256 256 locallock.release()
257 257
258 258 _pushbookmark(pushop)
259 259 return ret
260 260
261 def _pushobsolete(repo, remote):
262 """utility function to push obsolete markers to a remote
263
264 Exist mostly to allow overriding for experimentation purpose"""
265 if (obsolete._enabled and repo.obsstore and
266 'obsolete' in remote.listkeys('namespaces')):
267 rslts = []
268 remotedata = repo.listkeys('obsolete')
269 for key in sorted(remotedata, reverse=True):
270 # reverse sort to ensure we end with dump0
271 data = remotedata[key]
272 rslts.append(remote.pushkey('obsolete', key, '', data))
273 if [r for r in rslts if not r]:
274 msg = _('failed to push some obsolete markers!\n')
275 repo.ui.warn(msg)
276
261 277 def _pushbookmark(pushop):
262 278 """Update bookmark position on remote"""
263 279 ui = pushop.ui
264 280 repo = pushop.repo.unfiltered()
265 281 remote = pushop.remote
266 282 ui.debug("checking for updated bookmarks\n")
267 283 revnums = map(repo.changelog.rev, pushop.revs or [])
268 284 ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)]
269 285 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid
270 286 ) = bookmarks.compare(repo, repo._bookmarks, remote.listkeys('bookmarks'),
271 287 srchex=hex)
272 288
273 289 for b, scid, dcid in advsrc:
274 290 if ancestors and repo[scid].rev() not in ancestors:
275 291 continue
276 292 if remote.pushkey('bookmarks', b, dcid, scid):
277 293 ui.status(_("updating bookmark %s\n") % b)
278 294 else:
279 295 ui.warn(_('updating bookmark %s failed!\n') % b)
@@ -1,880 +1,864
1 1 # obsolete.py - obsolete markers handling
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Obsolete markers handling
10 10
11 11 An obsolete marker maps an old changeset to a list of new
12 12 changesets. If the list of new changesets is empty, the old changeset
13 13 is said to be "killed". Otherwise, the old changeset is being
14 14 "replaced" by the new changesets.
15 15
16 16 Obsolete markers can be used to record and distribute changeset graph
17 17 transformations performed by history rewriting operations, and help
18 18 building new tools to reconciliate conflicting rewriting actions. To
19 19 facilitate conflicts resolution, markers include various annotations
20 20 besides old and news changeset identifiers, such as creation date or
21 21 author name.
22 22
23 23 The old obsoleted changeset is called "precursor" and possible replacements are
24 24 called "successors". Markers that used changeset X as a precursors are called
25 25 "successor markers of X" because they hold information about the successors of
26 26 X. Markers that use changeset Y as a successors are call "precursor markers of
27 27 Y" because they hold information about the precursors of Y.
28 28
29 29 Examples:
30 30
31 31 - When changeset A is replacement by a changeset A', one marker is stored:
32 32
33 33 (A, (A'))
34 34
35 35 - When changesets A and B are folded into a new changeset C two markers are
36 36 stored:
37 37
38 38 (A, (C,)) and (B, (C,))
39 39
40 40 - When changeset A is simply "pruned" from the graph, a marker in create:
41 41
42 42 (A, ())
43 43
44 44 - When changeset A is split into B and C, a single marker are used:
45 45
46 46 (A, (C, C))
47 47
48 48 We use a single marker to distinct the "split" case from the "divergence"
49 49 case. If two independents operation rewrite the same changeset A in to A' and
50 50 A'' when have an error case: divergent rewriting. We can detect it because
51 51 two markers will be created independently:
52 52
53 53 (A, (B,)) and (A, (C,))
54 54
55 55 Format
56 56 ------
57 57
58 58 Markers are stored in an append-only file stored in
59 59 '.hg/store/obsstore'.
60 60
61 61 The file starts with a version header:
62 62
63 63 - 1 unsigned byte: version number, starting at zero.
64 64
65 65
66 66 The header is followed by the markers. Each marker is made of:
67 67
68 68 - 1 unsigned byte: number of new changesets "R", could be zero.
69 69
70 70 - 1 unsigned 32-bits integer: metadata size "M" in bytes.
71 71
72 72 - 1 byte: a bit field. It is reserved for flags used in obsolete
73 73 markers common operations, to avoid repeated decoding of metadata
74 74 entries.
75 75
76 76 - 20 bytes: obsoleted changeset identifier.
77 77
78 78 - N*20 bytes: new changesets identifiers.
79 79
80 80 - M bytes: metadata as a sequence of nul-terminated strings. Each
81 81 string contains a key and a value, separated by a color ':', without
82 82 additional encoding. Keys cannot contain '\0' or ':' and values
83 83 cannot contain '\0'.
84 84 """
85 85 import struct
86 86 import util, base85, node
87 87 import phases
88 88 from i18n import _
89 89
90 90 _pack = struct.pack
91 91 _unpack = struct.unpack
92 92
93 93 _SEEK_END = 2 # os.SEEK_END was introduced in Python 2.5
94 94
95 95 # the obsolete feature is not mature enough to be enabled by default.
96 96 # you have to rely on third party extension extension to enable this.
97 97 _enabled = False
98 98
99 99 # data used for parsing and writing
100 100 _fmversion = 0
101 101 _fmfixed = '>BIB20s'
102 102 _fmnode = '20s'
103 103 _fmfsize = struct.calcsize(_fmfixed)
104 104 _fnodesize = struct.calcsize(_fmnode)
105 105
106 106 ### obsolescence marker flag
107 107
108 108 ## bumpedfix flag
109 109 #
110 110 # When a changeset A' succeed to a changeset A which became public, we call A'
111 111 # "bumped" because it's a successors of a public changesets
112 112 #
113 113 # o A' (bumped)
114 114 # |`:
115 115 # | o A
116 116 # |/
117 117 # o Z
118 118 #
119 119 # The way to solve this situation is to create a new changeset Ad as children
120 120 # of A. This changeset have the same content than A'. So the diff from A to A'
121 121 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
122 122 #
123 123 # o Ad
124 124 # |`:
125 125 # | x A'
126 126 # |'|
127 127 # o | A
128 128 # |/
129 129 # o Z
130 130 #
131 131 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
132 132 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
133 133 # This flag mean that the successors express the changes between the public and
134 134 # bumped version and fix the situation, breaking the transitivity of
135 135 # "bumped" here.
136 136 bumpedfix = 1
137 137
138 138 def _readmarkers(data):
139 139 """Read and enumerate markers from raw data"""
140 140 off = 0
141 141 diskversion = _unpack('>B', data[off:off + 1])[0]
142 142 off += 1
143 143 if diskversion != _fmversion:
144 144 raise util.Abort(_('parsing obsolete marker: unknown version %r')
145 145 % diskversion)
146 146
147 147 # Loop on markers
148 148 l = len(data)
149 149 while off + _fmfsize <= l:
150 150 # read fixed part
151 151 cur = data[off:off + _fmfsize]
152 152 off += _fmfsize
153 153 nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur)
154 154 # read replacement
155 155 sucs = ()
156 156 if nbsuc:
157 157 s = (_fnodesize * nbsuc)
158 158 cur = data[off:off + s]
159 159 sucs = _unpack(_fmnode * nbsuc, cur)
160 160 off += s
161 161 # read metadata
162 162 # (metadata will be decoded on demand)
163 163 metadata = data[off:off + mdsize]
164 164 if len(metadata) != mdsize:
165 165 raise util.Abort(_('parsing obsolete marker: metadata is too '
166 166 'short, %d bytes expected, got %d')
167 167 % (mdsize, len(metadata)))
168 168 off += mdsize
169 169 yield (pre, sucs, flags, metadata)
170 170
171 171 def encodemeta(meta):
172 172 """Return encoded metadata string to string mapping.
173 173
174 174 Assume no ':' in key and no '\0' in both key and value."""
175 175 for key, value in meta.iteritems():
176 176 if ':' in key or '\0' in key:
177 177 raise ValueError("':' and '\0' are forbidden in metadata key'")
178 178 if '\0' in value:
179 179 raise ValueError("':' are forbidden in metadata value'")
180 180 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
181 181
182 182 def decodemeta(data):
183 183 """Return string to string dictionary from encoded version."""
184 184 d = {}
185 185 for l in data.split('\0'):
186 186 if l:
187 187 key, value = l.split(':')
188 188 d[key] = value
189 189 return d
190 190
191 191 class marker(object):
192 192 """Wrap obsolete marker raw data"""
193 193
194 194 def __init__(self, repo, data):
195 195 # the repo argument will be used to create changectx in later version
196 196 self._repo = repo
197 197 self._data = data
198 198 self._decodedmeta = None
199 199
200 200 def __hash__(self):
201 201 return hash(self._data)
202 202
203 203 def __eq__(self, other):
204 204 if type(other) != type(self):
205 205 return False
206 206 return self._data == other._data
207 207
208 208 def precnode(self):
209 209 """Precursor changeset node identifier"""
210 210 return self._data[0]
211 211
212 212 def succnodes(self):
213 213 """List of successor changesets node identifiers"""
214 214 return self._data[1]
215 215
216 216 def metadata(self):
217 217 """Decoded metadata dictionary"""
218 218 if self._decodedmeta is None:
219 219 self._decodedmeta = decodemeta(self._data[3])
220 220 return self._decodedmeta
221 221
222 222 def date(self):
223 223 """Creation date as (unixtime, offset)"""
224 224 parts = self.metadata()['date'].split(' ')
225 225 return (float(parts[0]), int(parts[1]))
226 226
227 227 class obsstore(object):
228 228 """Store obsolete markers
229 229
230 230 Markers can be accessed with two mappings:
231 231 - precursors[x] -> set(markers on precursors edges of x)
232 232 - successors[x] -> set(markers on successors edges of x)
233 233 """
234 234
235 235 def __init__(self, sopener):
236 236 # caches for various obsolescence related cache
237 237 self.caches = {}
238 238 self._all = []
239 239 # new markers to serialize
240 240 self.precursors = {}
241 241 self.successors = {}
242 242 self.sopener = sopener
243 243 data = sopener.tryread('obsstore')
244 244 if data:
245 245 self._load(_readmarkers(data))
246 246
247 247 def __iter__(self):
248 248 return iter(self._all)
249 249
250 250 def __nonzero__(self):
251 251 return bool(self._all)
252 252
253 253 def create(self, transaction, prec, succs=(), flag=0, metadata=None):
254 254 """obsolete: add a new obsolete marker
255 255
256 256 * ensuring it is hashable
257 257 * check mandatory metadata
258 258 * encode metadata
259 259 """
260 260 if metadata is None:
261 261 metadata = {}
262 262 if 'date' not in metadata:
263 263 metadata['date'] = "%d %d" % util.makedate()
264 264 if len(prec) != 20:
265 265 raise ValueError(prec)
266 266 for succ in succs:
267 267 if len(succ) != 20:
268 268 raise ValueError(succ)
269 269 marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata))
270 270 self.add(transaction, [marker])
271 271
272 272 def add(self, transaction, markers):
273 273 """Add new markers to the store
274 274
275 275 Take care of filtering duplicate.
276 276 Return the number of new marker."""
277 277 if not _enabled:
278 278 raise util.Abort('obsolete feature is not enabled on this repo')
279 279 known = set(self._all)
280 280 new = []
281 281 for m in markers:
282 282 if m not in known:
283 283 known.add(m)
284 284 new.append(m)
285 285 if new:
286 286 f = self.sopener('obsstore', 'ab')
287 287 try:
288 288 # Whether the file's current position is at the begin or at
289 289 # the end after opening a file for appending is implementation
290 290 # defined. So we must seek to the end before calling tell(),
291 291 # or we may get a zero offset for non-zero sized files on
292 292 # some platforms (issue3543).
293 293 f.seek(0, _SEEK_END)
294 294 offset = f.tell()
295 295 transaction.add('obsstore', offset)
296 296 # offset == 0: new file - add the version header
297 297 for bytes in _encodemarkers(new, offset == 0):
298 298 f.write(bytes)
299 299 finally:
300 300 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
301 301 # call 'filecacheentry.refresh()' here
302 302 f.close()
303 303 self._load(new)
304 304 # new marker *may* have changed several set. invalidate the cache.
305 305 self.caches.clear()
306 306 return len(new)
307 307
308 308 def mergemarkers(self, transaction, data):
309 309 markers = _readmarkers(data)
310 310 self.add(transaction, markers)
311 311
312 312 def _load(self, markers):
313 313 for mark in markers:
314 314 self._all.append(mark)
315 315 pre, sucs = mark[:2]
316 316 self.successors.setdefault(pre, set()).add(mark)
317 317 for suc in sucs:
318 318 self.precursors.setdefault(suc, set()).add(mark)
319 319 if node.nullid in self.precursors:
320 320 raise util.Abort(_('bad obsolescence marker detected: '
321 321 'invalid successors nullid'))
322 322
323 323 def _encodemarkers(markers, addheader=False):
324 324 # Kept separate from flushmarkers(), it will be reused for
325 325 # markers exchange.
326 326 if addheader:
327 327 yield _pack('>B', _fmversion)
328 328 for marker in markers:
329 329 yield _encodeonemarker(marker)
330 330
331 331
332 332 def _encodeonemarker(marker):
333 333 pre, sucs, flags, metadata = marker
334 334 nbsuc = len(sucs)
335 335 format = _fmfixed + (_fmnode * nbsuc)
336 336 data = [nbsuc, len(metadata), flags, pre]
337 337 data.extend(sucs)
338 338 return _pack(format, *data) + metadata
339 339
340 340 # arbitrary picked to fit into 8K limit from HTTP server
341 341 # you have to take in account:
342 342 # - the version header
343 343 # - the base85 encoding
344 344 _maxpayload = 5300
345 345
346 346 def listmarkers(repo):
347 347 """List markers over pushkey"""
348 348 if not repo.obsstore:
349 349 return {}
350 350 keys = {}
351 351 parts = []
352 352 currentlen = _maxpayload * 2 # ensure we create a new part
353 353 for marker in repo.obsstore:
354 354 nextdata = _encodeonemarker(marker)
355 355 if (len(nextdata) + currentlen > _maxpayload):
356 356 currentpart = []
357 357 currentlen = 0
358 358 parts.append(currentpart)
359 359 currentpart.append(nextdata)
360 360 currentlen += len(nextdata)
361 361 for idx, part in enumerate(reversed(parts)):
362 362 data = ''.join([_pack('>B', _fmversion)] + part)
363 363 keys['dump%i' % idx] = base85.b85encode(data)
364 364 return keys
365 365
366 366 def pushmarker(repo, key, old, new):
367 367 """Push markers over pushkey"""
368 368 if not key.startswith('dump'):
369 369 repo.ui.warn(_('unknown key: %r') % key)
370 370 return 0
371 371 if old:
372 372 repo.ui.warn(_('unexpected old value') % key)
373 373 return 0
374 374 data = base85.b85decode(new)
375 375 lock = repo.lock()
376 376 try:
377 377 tr = repo.transaction('pushkey: obsolete markers')
378 378 try:
379 379 repo.obsstore.mergemarkers(tr, data)
380 380 tr.close()
381 381 return 1
382 382 finally:
383 383 tr.release()
384 384 finally:
385 385 lock.release()
386 386
387 def syncpush(repo, remote):
388 """utility function to push obsolete markers to a remote
389
390 Exist mostly to allow overriding for experimentation purpose"""
391 if (_enabled and repo.obsstore and
392 'obsolete' in remote.listkeys('namespaces')):
393 rslts = []
394 remotedata = repo.listkeys('obsolete')
395 for key in sorted(remotedata, reverse=True):
396 # reverse sort to ensure we end with dump0
397 data = remotedata[key]
398 rslts.append(remote.pushkey('obsolete', key, '', data))
399 if [r for r in rslts if not r]:
400 msg = _('failed to push some obsolete markers!\n')
401 repo.ui.warn(msg)
402
403 387 def syncpull(repo, remote, gettransaction):
404 388 """utility function to pull obsolete markers from a remote
405 389
406 390 The `gettransaction` is function that return the pull transaction, creating
407 391 one if necessary. We return the transaction to inform the calling code that
408 392 a new transaction have been created (when applicable).
409 393
410 394 Exists mostly to allow overriding for experimentation purpose"""
411 395 tr = None
412 396 if _enabled:
413 397 repo.ui.debug('fetching remote obsolete markers\n')
414 398 remoteobs = remote.listkeys('obsolete')
415 399 if 'dump0' in remoteobs:
416 400 tr = gettransaction()
417 401 for key in sorted(remoteobs, reverse=True):
418 402 if key.startswith('dump'):
419 403 data = base85.b85decode(remoteobs[key])
420 404 repo.obsstore.mergemarkers(tr, data)
421 405 repo.invalidatevolatilesets()
422 406 return tr
423 407
424 408 def allmarkers(repo):
425 409 """all obsolete markers known in a repository"""
426 410 for markerdata in repo.obsstore:
427 411 yield marker(repo, markerdata)
428 412
429 413 def precursormarkers(ctx):
430 414 """obsolete marker marking this changeset as a successors"""
431 415 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
432 416 yield marker(ctx._repo, data)
433 417
434 418 def successormarkers(ctx):
435 419 """obsolete marker making this changeset obsolete"""
436 420 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
437 421 yield marker(ctx._repo, data)
438 422
439 423 def allsuccessors(obsstore, nodes, ignoreflags=0):
440 424 """Yield node for every successor of <nodes>.
441 425
442 426 Some successors may be unknown locally.
443 427
444 428 This is a linear yield unsuited to detecting split changesets. It includes
445 429 initial nodes too."""
446 430 remaining = set(nodes)
447 431 seen = set(remaining)
448 432 while remaining:
449 433 current = remaining.pop()
450 434 yield current
451 435 for mark in obsstore.successors.get(current, ()):
452 436 # ignore marker flagged with specified flag
453 437 if mark[2] & ignoreflags:
454 438 continue
455 439 for suc in mark[1]:
456 440 if suc not in seen:
457 441 seen.add(suc)
458 442 remaining.add(suc)
459 443
460 444 def allprecursors(obsstore, nodes, ignoreflags=0):
461 445 """Yield node for every precursors of <nodes>.
462 446
463 447 Some precursors may be unknown locally.
464 448
465 449 This is a linear yield unsuited to detecting folded changesets. It includes
466 450 initial nodes too."""
467 451
468 452 remaining = set(nodes)
469 453 seen = set(remaining)
470 454 while remaining:
471 455 current = remaining.pop()
472 456 yield current
473 457 for mark in obsstore.precursors.get(current, ()):
474 458 # ignore marker flagged with specified flag
475 459 if mark[2] & ignoreflags:
476 460 continue
477 461 suc = mark[0]
478 462 if suc not in seen:
479 463 seen.add(suc)
480 464 remaining.add(suc)
481 465
482 466 def foreground(repo, nodes):
483 467 """return all nodes in the "foreground" of other node
484 468
485 469 The foreground of a revision is anything reachable using parent -> children
486 470 or precursor -> successor relation. It is very similar to "descendant" but
487 471 augmented with obsolescence information.
488 472
489 473 Beware that possible obsolescence cycle may result if complex situation.
490 474 """
491 475 repo = repo.unfiltered()
492 476 foreground = set(repo.set('%ln::', nodes))
493 477 if repo.obsstore:
494 478 # We only need this complicated logic if there is obsolescence
495 479 # XXX will probably deserve an optimised revset.
496 480 nm = repo.changelog.nodemap
497 481 plen = -1
498 482 # compute the whole set of successors or descendants
499 483 while len(foreground) != plen:
500 484 plen = len(foreground)
501 485 succs = set(c.node() for c in foreground)
502 486 mutable = [c.node() for c in foreground if c.mutable()]
503 487 succs.update(allsuccessors(repo.obsstore, mutable))
504 488 known = (n for n in succs if n in nm)
505 489 foreground = set(repo.set('%ln::', known))
506 490 return set(c.node() for c in foreground)
507 491
508 492
509 493 def successorssets(repo, initialnode, cache=None):
510 494 """Return all set of successors of initial nodes
511 495
512 496 The successors set of a changeset A are a group of revisions that succeed
513 497 A. It succeeds A as a consistent whole, each revision being only a partial
514 498 replacement. The successors set contains non-obsolete changesets only.
515 499
516 500 This function returns the full list of successor sets which is why it
517 501 returns a list of tuples and not just a single tuple. Each tuple is a valid
518 502 successors set. Not that (A,) may be a valid successors set for changeset A
519 503 (see below).
520 504
521 505 In most cases, a changeset A will have a single element (e.g. the changeset
522 506 A is replaced by A') in its successors set. Though, it is also common for a
523 507 changeset A to have no elements in its successor set (e.g. the changeset
524 508 has been pruned). Therefore, the returned list of successors sets will be
525 509 [(A',)] or [], respectively.
526 510
527 511 When a changeset A is split into A' and B', however, it will result in a
528 512 successors set containing more than a single element, i.e. [(A',B')].
529 513 Divergent changesets will result in multiple successors sets, i.e. [(A',),
530 514 (A'')].
531 515
532 516 If a changeset A is not obsolete, then it will conceptually have no
533 517 successors set. To distinguish this from a pruned changeset, the successor
534 518 set will only contain itself, i.e. [(A,)].
535 519
536 520 Finally, successors unknown locally are considered to be pruned (obsoleted
537 521 without any successors).
538 522
539 523 The optional `cache` parameter is a dictionary that may contain precomputed
540 524 successors sets. It is meant to reuse the computation of a previous call to
541 525 `successorssets` when multiple calls are made at the same time. The cache
542 526 dictionary is updated in place. The caller is responsible for its live
543 527 spawn. Code that makes multiple calls to `successorssets` *must* use this
544 528 cache mechanism or suffer terrible performances.
545 529
546 530 """
547 531
548 532 succmarkers = repo.obsstore.successors
549 533
550 534 # Stack of nodes we search successors sets for
551 535 toproceed = [initialnode]
552 536 # set version of above list for fast loop detection
553 537 # element added to "toproceed" must be added here
554 538 stackedset = set(toproceed)
555 539 if cache is None:
556 540 cache = {}
557 541
558 542 # This while loop is the flattened version of a recursive search for
559 543 # successors sets
560 544 #
561 545 # def successorssets(x):
562 546 # successors = directsuccessors(x)
563 547 # ss = [[]]
564 548 # for succ in directsuccessors(x):
565 549 # # product as in itertools cartesian product
566 550 # ss = product(ss, successorssets(succ))
567 551 # return ss
568 552 #
569 553 # But we can not use plain recursive calls here:
570 554 # - that would blow the python call stack
571 555 # - obsolescence markers may have cycles, we need to handle them.
572 556 #
573 557 # The `toproceed` list act as our call stack. Every node we search
574 558 # successors set for are stacked there.
575 559 #
576 560 # The `stackedset` is set version of this stack used to check if a node is
577 561 # already stacked. This check is used to detect cycles and prevent infinite
578 562 # loop.
579 563 #
580 564 # successors set of all nodes are stored in the `cache` dictionary.
581 565 #
582 566 # After this while loop ends we use the cache to return the successors sets
583 567 # for the node requested by the caller.
584 568 while toproceed:
585 569 # Every iteration tries to compute the successors sets of the topmost
586 570 # node of the stack: CURRENT.
587 571 #
588 572 # There are four possible outcomes:
589 573 #
590 574 # 1) We already know the successors sets of CURRENT:
591 575 # -> mission accomplished, pop it from the stack.
592 576 # 2) Node is not obsolete:
593 577 # -> the node is its own successors sets. Add it to the cache.
594 578 # 3) We do not know successors set of direct successors of CURRENT:
595 579 # -> We add those successors to the stack.
596 580 # 4) We know successors sets of all direct successors of CURRENT:
597 581 # -> We can compute CURRENT successors set and add it to the
598 582 # cache.
599 583 #
600 584 current = toproceed[-1]
601 585 if current in cache:
602 586 # case (1): We already know the successors sets
603 587 stackedset.remove(toproceed.pop())
604 588 elif current not in succmarkers:
605 589 # case (2): The node is not obsolete.
606 590 if current in repo:
607 591 # We have a valid last successors.
608 592 cache[current] = [(current,)]
609 593 else:
610 594 # Final obsolete version is unknown locally.
611 595 # Do not count that as a valid successors
612 596 cache[current] = []
613 597 else:
614 598 # cases (3) and (4)
615 599 #
616 600 # We proceed in two phases. Phase 1 aims to distinguish case (3)
617 601 # from case (4):
618 602 #
619 603 # For each direct successors of CURRENT, we check whether its
620 604 # successors sets are known. If they are not, we stack the
621 605 # unknown node and proceed to the next iteration of the while
622 606 # loop. (case 3)
623 607 #
624 608 # During this step, we may detect obsolescence cycles: a node
625 609 # with unknown successors sets but already in the call stack.
626 610 # In such a situation, we arbitrary set the successors sets of
627 611 # the node to nothing (node pruned) to break the cycle.
628 612 #
629 613 # If no break was encountered we proceed to phase 2.
630 614 #
631 615 # Phase 2 computes successors sets of CURRENT (case 4); see details
632 616 # in phase 2 itself.
633 617 #
634 618 # Note the two levels of iteration in each phase.
635 619 # - The first one handles obsolescence markers using CURRENT as
636 620 # precursor (successors markers of CURRENT).
637 621 #
638 622 # Having multiple entry here means divergence.
639 623 #
640 624 # - The second one handles successors defined in each marker.
641 625 #
642 626 # Having none means pruned node, multiple successors means split,
643 627 # single successors are standard replacement.
644 628 #
645 629 for mark in sorted(succmarkers[current]):
646 630 for suc in mark[1]:
647 631 if suc not in cache:
648 632 if suc in stackedset:
649 633 # cycle breaking
650 634 cache[suc] = []
651 635 else:
652 636 # case (3) If we have not computed successors sets
653 637 # of one of those successors we add it to the
654 638 # `toproceed` stack and stop all work for this
655 639 # iteration.
656 640 toproceed.append(suc)
657 641 stackedset.add(suc)
658 642 break
659 643 else:
660 644 continue
661 645 break
662 646 else:
663 647 # case (4): we know all successors sets of all direct
664 648 # successors
665 649 #
666 650 # Successors set contributed by each marker depends on the
667 651 # successors sets of all its "successors" node.
668 652 #
669 653 # Each different marker is a divergence in the obsolescence
670 654 # history. It contributes successors sets distinct from other
671 655 # markers.
672 656 #
673 657 # Within a marker, a successor may have divergent successors
674 658 # sets. In such a case, the marker will contribute multiple
675 659 # divergent successors sets. If multiple successors have
676 660 # divergent successors sets, a cartesian product is used.
677 661 #
678 662 # At the end we post-process successors sets to remove
679 663 # duplicated entry and successors set that are strict subset of
680 664 # another one.
681 665 succssets = []
682 666 for mark in sorted(succmarkers[current]):
683 667 # successors sets contributed by this marker
684 668 markss = [[]]
685 669 for suc in mark[1]:
686 670 # cardinal product with previous successors
687 671 productresult = []
688 672 for prefix in markss:
689 673 for suffix in cache[suc]:
690 674 newss = list(prefix)
691 675 for part in suffix:
692 676 # do not duplicated entry in successors set
693 677 # first entry wins.
694 678 if part not in newss:
695 679 newss.append(part)
696 680 productresult.append(newss)
697 681 markss = productresult
698 682 succssets.extend(markss)
699 683 # remove duplicated and subset
700 684 seen = []
701 685 final = []
702 686 candidate = sorted(((set(s), s) for s in succssets if s),
703 687 key=lambda x: len(x[1]), reverse=True)
704 688 for setversion, listversion in candidate:
705 689 for seenset in seen:
706 690 if setversion.issubset(seenset):
707 691 break
708 692 else:
709 693 final.append(listversion)
710 694 seen.append(setversion)
711 695 final.reverse() # put small successors set first
712 696 cache[current] = final
713 697 return cache[initialnode]
714 698
715 699 def _knownrevs(repo, nodes):
716 700 """yield revision numbers of known nodes passed in parameters
717 701
718 702 Unknown revisions are silently ignored."""
719 703 torev = repo.changelog.nodemap.get
720 704 for n in nodes:
721 705 rev = torev(n)
722 706 if rev is not None:
723 707 yield rev
724 708
725 709 # mapping of 'set-name' -> <function to compute this set>
726 710 cachefuncs = {}
727 711 def cachefor(name):
728 712 """Decorator to register a function as computing the cache for a set"""
729 713 def decorator(func):
730 714 assert name not in cachefuncs
731 715 cachefuncs[name] = func
732 716 return func
733 717 return decorator
734 718
735 719 def getrevs(repo, name):
736 720 """Return the set of revision that belong to the <name> set
737 721
738 722 Such access may compute the set and cache it for future use"""
739 723 repo = repo.unfiltered()
740 724 if not repo.obsstore:
741 725 return ()
742 726 if name not in repo.obsstore.caches:
743 727 repo.obsstore.caches[name] = cachefuncs[name](repo)
744 728 return repo.obsstore.caches[name]
745 729
746 730 # To be simple we need to invalidate obsolescence cache when:
747 731 #
748 732 # - new changeset is added:
749 733 # - public phase is changed
750 734 # - obsolescence marker are added
751 735 # - strip is used a repo
752 736 def clearobscaches(repo):
753 737 """Remove all obsolescence related cache from a repo
754 738
755 739 This remove all cache in obsstore is the obsstore already exist on the
756 740 repo.
757 741
758 742 (We could be smarter here given the exact event that trigger the cache
759 743 clearing)"""
760 744 # only clear cache is there is obsstore data in this repo
761 745 if 'obsstore' in repo._filecache:
762 746 repo.obsstore.caches.clear()
763 747
764 748 @cachefor('obsolete')
765 749 def _computeobsoleteset(repo):
766 750 """the set of obsolete revisions"""
767 751 obs = set()
768 752 getrev = repo.changelog.nodemap.get
769 753 getphase = repo._phasecache.phase
770 754 for node in repo.obsstore.successors:
771 755 rev = getrev(node)
772 756 if rev is not None and getphase(repo, rev):
773 757 obs.add(rev)
774 758 return obs
775 759
776 760 @cachefor('unstable')
777 761 def _computeunstableset(repo):
778 762 """the set of non obsolete revisions with obsolete parents"""
779 763 # revset is not efficient enough here
780 764 # we do (obsolete()::) - obsolete() by hand
781 765 obs = getrevs(repo, 'obsolete')
782 766 if not obs:
783 767 return set()
784 768 cl = repo.changelog
785 769 return set(r for r in cl.descendants(obs) if r not in obs)
786 770
787 771 @cachefor('suspended')
788 772 def _computesuspendedset(repo):
789 773 """the set of obsolete parents with non obsolete descendants"""
790 774 suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))
791 775 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
792 776
793 777 @cachefor('extinct')
794 778 def _computeextinctset(repo):
795 779 """the set of obsolete parents without non obsolete descendants"""
796 780 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
797 781
798 782
799 783 @cachefor('bumped')
800 784 def _computebumpedset(repo):
801 785 """the set of revs trying to obsolete public revisions"""
802 786 bumped = set()
803 787 # utils function (avoid attribut lookup in the loop)
804 788 phase = repo._phasecache.phase # would be faster to grab the full list
805 789 public = phases.public
806 790 cl = repo.changelog
807 791 torev = cl.nodemap.get
808 792 obs = getrevs(repo, 'obsolete')
809 793 for rev in repo:
810 794 # We only evaluate mutable, non-obsolete revision
811 795 if (public < phase(repo, rev)) and (rev not in obs):
812 796 node = cl.node(rev)
813 797 # (future) A cache of precursors may worth if split is very common
814 798 for pnode in allprecursors(repo.obsstore, [node],
815 799 ignoreflags=bumpedfix):
816 800 prev = torev(pnode) # unfiltered! but so is phasecache
817 801 if (prev is not None) and (phase(repo, prev) <= public):
818 802 # we have a public precursors
819 803 bumped.add(rev)
820 804 break # Next draft!
821 805 return bumped
822 806
823 807 @cachefor('divergent')
824 808 def _computedivergentset(repo):
825 809 """the set of rev that compete to be the final successors of some revision.
826 810 """
827 811 divergent = set()
828 812 obsstore = repo.obsstore
829 813 newermap = {}
830 814 for ctx in repo.set('(not public()) - obsolete()'):
831 815 mark = obsstore.precursors.get(ctx.node(), ())
832 816 toprocess = set(mark)
833 817 while toprocess:
834 818 prec = toprocess.pop()[0]
835 819 if prec not in newermap:
836 820 successorssets(repo, prec, newermap)
837 821 newer = [n for n in newermap[prec] if n]
838 822 if len(newer) > 1:
839 823 divergent.add(ctx.rev())
840 824 break
841 825 toprocess.update(obsstore.precursors.get(prec, ()))
842 826 return divergent
843 827
844 828
845 829 def createmarkers(repo, relations, flag=0, metadata=None):
846 830 """Add obsolete markers between changesets in a repo
847 831
848 832 <relations> must be an iterable of (<old>, (<new>, ...)) tuple.
849 833 `old` and `news` are changectx.
850 834
851 835 Trying to obsolete a public changeset will raise an exception.
852 836
853 837 Current user and date are used except if specified otherwise in the
854 838 metadata attribute.
855 839
856 840 This function operates within a transaction of its own, but does
857 841 not take any lock on the repo.
858 842 """
859 843 # prepare metadata
860 844 if metadata is None:
861 845 metadata = {}
862 846 if 'date' not in metadata:
863 847 metadata['date'] = '%i %i' % util.makedate()
864 848 if 'user' not in metadata:
865 849 metadata['user'] = repo.ui.username()
866 850 tr = repo.transaction('add-obsolescence-marker')
867 851 try:
868 852 for prec, sucs in relations:
869 853 if not prec.mutable():
870 854 raise util.Abort("cannot obsolete immutable changeset: %s"
871 855 % prec)
872 856 nprec = prec.node()
873 857 nsucs = tuple(s.node() for s in sucs)
874 858 if nprec in nsucs:
875 859 raise util.Abort("changeset %s cannot obsolete itself" % prec)
876 860 repo.obsstore.create(tr, nprec, nsucs, flag, metadata)
877 861 repo.filteredrevcache.clear()
878 862 tr.close()
879 863 finally:
880 864 tr.release()
General Comments 0
You need to be logged in to leave comments. Login now