##// END OF EJS Templates
bundlerepo: implement safe phasecache...
Eric Sumner -
r23631:b8260abf default
parent child Browse files
Show More
@@ -1,403 +1,424
1 1 # bundlerepo.py - repository class for viewing uncompressed bundles
2 2 #
3 3 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """Repository class for viewing uncompressed bundles.
9 9
10 10 This provides a read-only repository interface to bundles as if they
11 11 were part of the actual repository.
12 12 """
13 13
14 14 from node import nullid
15 15 from i18n import _
16 16 import os, tempfile, shutil
17 17 import changegroup, util, mdiff, discovery, cmdutil, scmutil, exchange
18 import localrepo, changelog, manifest, filelog, revlog, error
18 import localrepo, changelog, manifest, filelog, revlog, error, phases
19 19
20 20 class bundlerevlog(revlog.revlog):
21 21 def __init__(self, opener, indexfile, bundle, linkmapper):
22 22 # How it works:
23 23 # To retrieve a revision, we need to know the offset of the revision in
24 24 # the bundle (an unbundle object). We store this offset in the index
25 25 # (start). The base of the delta is stored in the base field.
26 26 #
27 27 # To differentiate a rev in the bundle from a rev in the revlog, we
28 28 # check revision against repotiprev.
29 29 opener = scmutil.readonlyvfs(opener)
30 30 revlog.revlog.__init__(self, opener, indexfile)
31 31 self.bundle = bundle
32 32 n = len(self)
33 33 self.repotiprev = n - 1
34 34 chain = None
35 35 self.bundlerevs = set() # used by 'bundle()' revset expression
36 36 while True:
37 37 chunkdata = bundle.deltachunk(chain)
38 38 if not chunkdata:
39 39 break
40 40 node = chunkdata['node']
41 41 p1 = chunkdata['p1']
42 42 p2 = chunkdata['p2']
43 43 cs = chunkdata['cs']
44 44 deltabase = chunkdata['deltabase']
45 45 delta = chunkdata['delta']
46 46
47 47 size = len(delta)
48 48 start = bundle.tell() - size
49 49
50 50 link = linkmapper(cs)
51 51 if node in self.nodemap:
52 52 # this can happen if two branches make the same change
53 53 chain = node
54 54 self.bundlerevs.add(self.nodemap[node])
55 55 continue
56 56
57 57 for p in (p1, p2):
58 58 if p not in self.nodemap:
59 59 raise error.LookupError(p, self.indexfile,
60 60 _("unknown parent"))
61 61
62 62 if deltabase not in self.nodemap:
63 63 raise LookupError(deltabase, self.indexfile,
64 64 _('unknown delta base'))
65 65
66 66 baserev = self.rev(deltabase)
67 67 # start, size, full unc. size, base (unused), link, p1, p2, node
68 68 e = (revlog.offset_type(start, 0), size, -1, baserev, link,
69 69 self.rev(p1), self.rev(p2), node)
70 70 self.index.insert(-1, e)
71 71 self.nodemap[node] = n
72 72 self.bundlerevs.add(n)
73 73 chain = node
74 74 n += 1
75 75
76 76 def _chunk(self, rev):
77 77 # Warning: in case of bundle, the diff is against what we stored as
78 78 # delta base, not against rev - 1
79 79 # XXX: could use some caching
80 80 if rev <= self.repotiprev:
81 81 return revlog.revlog._chunk(self, rev)
82 82 self.bundle.seek(self.start(rev))
83 83 return self.bundle.read(self.length(rev))
84 84
85 85 def revdiff(self, rev1, rev2):
86 86 """return or calculate a delta between two revisions"""
87 87 if rev1 > self.repotiprev and rev2 > self.repotiprev:
88 88 # hot path for bundle
89 89 revb = self.index[rev2][3]
90 90 if revb == rev1:
91 91 return self._chunk(rev2)
92 92 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
93 93 return revlog.revlog.revdiff(self, rev1, rev2)
94 94
95 95 return mdiff.textdiff(self.revision(self.node(rev1)),
96 96 self.revision(self.node(rev2)))
97 97
98 98 def revision(self, nodeorrev):
99 99 """return an uncompressed revision of a given node or revision
100 100 number.
101 101 """
102 102 if isinstance(nodeorrev, int):
103 103 rev = nodeorrev
104 104 node = self.node(rev)
105 105 else:
106 106 node = nodeorrev
107 107 rev = self.rev(node)
108 108
109 109 if node == nullid:
110 110 return ""
111 111
112 112 text = None
113 113 chain = []
114 114 iterrev = rev
115 115 # reconstruct the revision if it is from a changegroup
116 116 while iterrev > self.repotiprev:
117 117 if self._cache and self._cache[1] == iterrev:
118 118 text = self._cache[2]
119 119 break
120 120 chain.append(iterrev)
121 121 iterrev = self.index[iterrev][3]
122 122 if text is None:
123 123 text = self.baserevision(iterrev)
124 124
125 125 while chain:
126 126 delta = self._chunk(chain.pop())
127 127 text = mdiff.patches(text, [delta])
128 128
129 129 self._checkhash(text, node, rev)
130 130 self._cache = (node, rev, text)
131 131 return text
132 132
133 133 def baserevision(self, nodeorrev):
134 134 # Revlog subclasses may override 'revision' method to modify format of
135 135 # content retrieved from revlog. To use bundlerevlog with such class one
136 136 # needs to override 'baserevision' and make more specific call here.
137 137 return revlog.revlog.revision(self, nodeorrev)
138 138
139 139 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
140 140 raise NotImplementedError
141 141 def addgroup(self, revs, linkmapper, transaction):
142 142 raise NotImplementedError
143 143 def strip(self, rev, minlink):
144 144 raise NotImplementedError
145 145 def checksize(self):
146 146 raise NotImplementedError
147 147
148 148 class bundlechangelog(bundlerevlog, changelog.changelog):
149 149 def __init__(self, opener, bundle):
150 150 changelog.changelog.__init__(self, opener)
151 151 linkmapper = lambda x: x
152 152 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
153 153 linkmapper)
154 154
155 155 def baserevision(self, nodeorrev):
156 156 # Although changelog doesn't override 'revision' method, some extensions
157 157 # may replace this class with another that does. Same story with
158 158 # manifest and filelog classes.
159 159 return changelog.changelog.revision(self, nodeorrev)
160 160
161 161 class bundlemanifest(bundlerevlog, manifest.manifest):
162 162 def __init__(self, opener, bundle, linkmapper):
163 163 manifest.manifest.__init__(self, opener)
164 164 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
165 165 linkmapper)
166 166
167 167 def baserevision(self, nodeorrev):
168 168 return manifest.manifest.revision(self, nodeorrev)
169 169
170 170 class bundlefilelog(bundlerevlog, filelog.filelog):
171 171 def __init__(self, opener, path, bundle, linkmapper, repo):
172 172 filelog.filelog.__init__(self, opener, path)
173 173 bundlerevlog.__init__(self, opener, self.indexfile, bundle,
174 174 linkmapper)
175 175 self._repo = repo
176 176
177 177 def baserevision(self, nodeorrev):
178 178 return filelog.filelog.revision(self, nodeorrev)
179 179
180 180 def _file(self, f):
181 181 self._repo.file(f)
182 182
183 183 class bundlepeer(localrepo.localpeer):
184 184 def canpush(self):
185 185 return False
186 186
187 class bundlephasecache(phases.phasecache):
188 def __init__(self, *args, **kwargs):
189 super(bundlephasecache, self).__init__(*args, **kwargs)
190 if util.safehasattr(self, 'opener'):
191 self.opener = scmutil.readonlyvfs(self.opener)
192
193 def write(self):
194 raise NotImplementedError
195
196 def _write(self, fp):
197 raise NotImplementedError
198
199 def _updateroots(self, phase, newroots, tr):
200 self.phaseroots[phase] = newroots
201 self.invalidate()
202 self.dirty = True
203
187 204 class bundlerepository(localrepo.localrepository):
188 205 def __init__(self, ui, path, bundlename):
189 206 self._tempparent = None
190 207 try:
191 208 localrepo.localrepository.__init__(self, ui, path)
192 209 except error.RepoError:
193 210 self._tempparent = tempfile.mkdtemp()
194 211 localrepo.instance(ui, self._tempparent, 1)
195 212 localrepo.localrepository.__init__(self, ui, self._tempparent)
196 213 self.ui.setconfig('phases', 'publish', False, 'bundlerepo')
197 214
198 215 if path:
199 216 self._url = 'bundle:' + util.expandpath(path) + '+' + bundlename
200 217 else:
201 218 self._url = 'bundle:' + bundlename
202 219
203 220 self.tempfile = None
204 221 f = util.posixfile(bundlename, "rb")
205 222 self.bundle = exchange.readbundle(ui, f, bundlename)
206 223 if self.bundle.compressed():
207 224 fdtemp, temp = self.vfs.mkstemp(prefix="hg-bundle-",
208 225 suffix=".hg10un")
209 226 self.tempfile = temp
210 227 fptemp = os.fdopen(fdtemp, 'wb')
211 228
212 229 try:
213 230 fptemp.write("HG10UN")
214 231 while True:
215 232 chunk = self.bundle.read(2**18)
216 233 if not chunk:
217 234 break
218 235 fptemp.write(chunk)
219 236 finally:
220 237 fptemp.close()
221 238
222 239 f = self.vfs.open(self.tempfile, mode="rb")
223 240 self.bundle = exchange.readbundle(ui, f, bundlename, self.vfs)
224 241
225 242 # dict with the mapping 'filename' -> position in the bundle
226 243 self.bundlefilespos = {}
227 244
228 245 @localrepo.unfilteredpropertycache
246 def _phasecache(self):
247 return bundlephasecache(self, self._phasedefaults)
248
249 @localrepo.unfilteredpropertycache
229 250 def changelog(self):
230 251 # consume the header if it exists
231 252 self.bundle.changelogheader()
232 253 c = bundlechangelog(self.sopener, self.bundle)
233 254 self.manstart = self.bundle.tell()
234 255 return c
235 256
236 257 @localrepo.unfilteredpropertycache
237 258 def manifest(self):
238 259 self.bundle.seek(self.manstart)
239 260 # consume the header if it exists
240 261 self.bundle.manifestheader()
241 262 m = bundlemanifest(self.sopener, self.bundle, self.changelog.rev)
242 263 self.filestart = self.bundle.tell()
243 264 return m
244 265
245 266 @localrepo.unfilteredpropertycache
246 267 def manstart(self):
247 268 self.changelog
248 269 return self.manstart
249 270
250 271 @localrepo.unfilteredpropertycache
251 272 def filestart(self):
252 273 self.manifest
253 274 return self.filestart
254 275
255 276 def url(self):
256 277 return self._url
257 278
258 279 def file(self, f):
259 280 if not self.bundlefilespos:
260 281 self.bundle.seek(self.filestart)
261 282 while True:
262 283 chunkdata = self.bundle.filelogheader()
263 284 if not chunkdata:
264 285 break
265 286 fname = chunkdata['filename']
266 287 self.bundlefilespos[fname] = self.bundle.tell()
267 288 while True:
268 289 c = self.bundle.deltachunk(None)
269 290 if not c:
270 291 break
271 292
272 293 if f in self.bundlefilespos:
273 294 self.bundle.seek(self.bundlefilespos[f])
274 295 return bundlefilelog(self.sopener, f, self.bundle,
275 296 self.changelog.rev, self)
276 297 else:
277 298 return filelog.filelog(self.sopener, f)
278 299
279 300 def close(self):
280 301 """Close assigned bundle file immediately."""
281 302 self.bundle.close()
282 303 if self.tempfile is not None:
283 304 self.vfs.unlink(self.tempfile)
284 305 if self._tempparent:
285 306 shutil.rmtree(self._tempparent, True)
286 307
287 308 def cancopy(self):
288 309 return False
289 310
290 311 def peer(self):
291 312 return bundlepeer(self)
292 313
293 314 def getcwd(self):
294 315 return os.getcwd() # always outside the repo
295 316
296 317
297 318 def instance(ui, path, create):
298 319 if create:
299 320 raise util.Abort(_('cannot create new bundle repository'))
300 321 parentpath = ui.config("bundle", "mainreporoot", "")
301 322 if not parentpath:
302 323 # try to find the correct path to the working directory repo
303 324 parentpath = cmdutil.findrepo(os.getcwd())
304 325 if parentpath is None:
305 326 parentpath = ''
306 327 if parentpath:
307 328 # Try to make the full path relative so we get a nice, short URL.
308 329 # In particular, we don't want temp dir names in test outputs.
309 330 cwd = os.getcwd()
310 331 if parentpath == cwd:
311 332 parentpath = ''
312 333 else:
313 334 cwd = os.path.join(cwd,'')
314 335 if parentpath.startswith(cwd):
315 336 parentpath = parentpath[len(cwd):]
316 337 u = util.url(path)
317 338 path = u.localpath()
318 339 if u.scheme == 'bundle':
319 340 s = path.split("+", 1)
320 341 if len(s) == 1:
321 342 repopath, bundlename = parentpath, s[0]
322 343 else:
323 344 repopath, bundlename = s
324 345 else:
325 346 repopath, bundlename = parentpath, path
326 347 return bundlerepository(ui, repopath, bundlename)
327 348
328 349 def getremotechanges(ui, repo, other, onlyheads=None, bundlename=None,
329 350 force=False):
330 351 '''obtains a bundle of changes incoming from other
331 352
332 353 "onlyheads" restricts the returned changes to those reachable from the
333 354 specified heads.
334 355 "bundlename", if given, stores the bundle to this file path permanently;
335 356 otherwise it's stored to a temp file and gets deleted again when you call
336 357 the returned "cleanupfn".
337 358 "force" indicates whether to proceed on unrelated repos.
338 359
339 360 Returns a tuple (local, csets, cleanupfn):
340 361
341 362 "local" is a local repo from which to obtain the actual incoming
342 363 changesets; it is a bundlerepo for the obtained bundle when the
343 364 original "other" is remote.
344 365 "csets" lists the incoming changeset node ids.
345 366 "cleanupfn" must be called without arguments when you're done processing
346 367 the changes; it closes both the original "other" and the one returned
347 368 here.
348 369 '''
349 370 tmp = discovery.findcommonincoming(repo, other, heads=onlyheads,
350 371 force=force)
351 372 common, incoming, rheads = tmp
352 373 if not incoming:
353 374 try:
354 375 if bundlename:
355 376 os.unlink(bundlename)
356 377 except OSError:
357 378 pass
358 379 return repo, [], other.close
359 380
360 381 commonset = set(common)
361 382 rheads = [x for x in rheads if x not in commonset]
362 383
363 384 bundle = None
364 385 bundlerepo = None
365 386 localrepo = other.local()
366 387 if bundlename or not localrepo:
367 388 # create a bundle (uncompressed if other repo is not local)
368 389
369 390 if other.capable('getbundle'):
370 391 cg = other.getbundle('incoming', common=common, heads=rheads)
371 392 elif onlyheads is None and not other.capable('changegroupsubset'):
372 393 # compat with older servers when pulling all remote heads
373 394 cg = other.changegroup(incoming, "incoming")
374 395 rheads = None
375 396 else:
376 397 cg = other.changegroupsubset(incoming, rheads, 'incoming')
377 398 bundletype = localrepo and "HG10BZ" or "HG10UN"
378 399 fname = bundle = changegroup.writebundle(cg, bundlename, bundletype)
379 400 # keep written bundle?
380 401 if bundlename:
381 402 bundle = None
382 403 if not localrepo:
383 404 # use the created uncompressed bundlerepo
384 405 localrepo = bundlerepo = bundlerepository(repo.baseui, repo.root,
385 406 fname)
386 407 # this repo contains local and other now, so filter out local again
387 408 common = repo.heads()
388 409 if localrepo:
389 410 # Part of common may be remotely filtered
390 411 # So use an unfiltered version
391 412 # The discovery process probably need cleanup to avoid that
392 413 localrepo = localrepo.unfiltered()
393 414
394 415 csets = localrepo.changelog.findmissing(common, rheads)
395 416
396 417 def cleanup():
397 418 if bundlerepo:
398 419 bundlerepo.close()
399 420 if bundle:
400 421 os.unlink(bundle)
401 422 other.close()
402 423
403 424 return (localrepo, csets, cleanup)
@@ -1,445 +1,445
1 1 """ Mercurial phases support code
2 2
3 3 ---
4 4
5 5 Copyright 2011 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
6 6 Logilab SA <contact@logilab.fr>
7 7 Augie Fackler <durin42@gmail.com>
8 8
9 9 This software may be used and distributed according to the terms
10 10 of the GNU General Public License version 2 or any later version.
11 11
12 12 ---
13 13
14 14 This module implements most phase logic in mercurial.
15 15
16 16
17 17 Basic Concept
18 18 =============
19 19
20 20 A 'changeset phase' is an indicator that tells us how a changeset is
21 21 manipulated and communicated. The details of each phase is described
22 22 below, here we describe the properties they have in common.
23 23
24 24 Like bookmarks, phases are not stored in history and thus are not
25 25 permanent and leave no audit trail.
26 26
27 27 First, no changeset can be in two phases at once. Phases are ordered,
28 28 so they can be considered from lowest to highest. The default, lowest
29 29 phase is 'public' - this is the normal phase of existing changesets. A
30 30 child changeset can not be in a lower phase than its parents.
31 31
32 32 These phases share a hierarchy of traits:
33 33
34 34 immutable shared
35 35 public: X X
36 36 draft: X
37 37 secret:
38 38
39 39 Local commits are draft by default.
40 40
41 41 Phase Movement and Exchange
42 42 ===========================
43 43
44 44 Phase data is exchanged by pushkey on pull and push. Some servers have
45 45 a publish option set, we call such a server a "publishing server".
46 46 Pushing a draft changeset to a publishing server changes the phase to
47 47 public.
48 48
49 49 A small list of fact/rules define the exchange of phase:
50 50
51 51 * old client never changes server states
52 52 * pull never changes server states
53 53 * publish and old server changesets are seen as public by client
54 54 * any secret changeset seen in another repository is lowered to at
55 55 least draft
56 56
57 57 Here is the final table summing up the 49 possible use cases of phase
58 58 exchange:
59 59
60 60 server
61 61 old publish non-publish
62 62 N X N D P N D P
63 63 old client
64 64 pull
65 65 N - X/X - X/D X/P - X/D X/P
66 66 X - X/X - X/D X/P - X/D X/P
67 67 push
68 68 X X/X X/X X/P X/P X/P X/D X/D X/P
69 69 new client
70 70 pull
71 71 N - P/X - P/D P/P - D/D P/P
72 72 D - P/X - P/D P/P - D/D P/P
73 73 P - P/X - P/D P/P - P/D P/P
74 74 push
75 75 D P/X P/X P/P P/P P/P D/D D/D P/P
76 76 P P/X P/X P/P P/P P/P P/P P/P P/P
77 77
78 78 Legend:
79 79
80 80 A/B = final state on client / state on server
81 81
82 82 * N = new/not present,
83 83 * P = public,
84 84 * D = draft,
85 85 * X = not tracked (i.e., the old client or server has no internal
86 86 way of recording the phase.)
87 87
88 88 passive = only pushes
89 89
90 90
91 91 A cell here can be read like this:
92 92
93 93 "When a new client pushes a draft changeset (D) to a publishing
94 94 server where it's not present (N), it's marked public on both
95 95 sides (P/P)."
96 96
97 97 Note: old client behave as a publishing server with draft only content
98 98 - other people see it as public
99 99 - content is pushed as draft
100 100
101 101 """
102 102
103 103 import os
104 104 import errno
105 105 from node import nullid, nullrev, bin, hex, short
106 106 from i18n import _
107 107 import util, error
108 108
109 109 allphases = public, draft, secret = range(3)
110 110 trackedphases = allphases[1:]
111 111 phasenames = ['public', 'draft', 'secret']
112 112
113 113 def _readroots(repo, phasedefaults=None):
114 114 """Read phase roots from disk
115 115
116 116 phasedefaults is a list of fn(repo, roots) callable, which are
117 117 executed if the phase roots file does not exist. When phases are
118 118 being initialized on an existing repository, this could be used to
119 119 set selected changesets phase to something else than public.
120 120
121 121 Return (roots, dirty) where dirty is true if roots differ from
122 122 what is being stored.
123 123 """
124 124 repo = repo.unfiltered()
125 125 dirty = False
126 126 roots = [set() for i in allphases]
127 127 try:
128 128 f = None
129 129 if 'HG_PENDING' in os.environ:
130 130 try:
131 131 f = repo.svfs('phaseroots.pending')
132 132 except IOError, inst:
133 133 if inst.errno != errno.ENOENT:
134 134 raise
135 135 if f is None:
136 136 f = repo.sopener('phaseroots')
137 137 try:
138 138 for line in f:
139 139 phase, nh = line.split()
140 140 roots[int(phase)].add(bin(nh))
141 141 finally:
142 142 f.close()
143 143 except IOError, inst:
144 144 if inst.errno != errno.ENOENT:
145 145 raise
146 146 if phasedefaults:
147 147 for f in phasedefaults:
148 148 roots = f(repo, roots)
149 149 dirty = True
150 150 return roots, dirty
151 151
152 152 class phasecache(object):
153 153 def __init__(self, repo, phasedefaults, _load=True):
154 154 if _load:
155 155 # Cheap trick to allow shallow-copy without copy module
156 156 self.phaseroots, self.dirty = _readroots(repo, phasedefaults)
157 157 self._phaserevs = None
158 158 self.filterunknown(repo)
159 159 self.opener = repo.sopener
160 160
161 161 def copy(self):
162 162 # Shallow copy meant to ensure isolation in
163 163 # advance/retractboundary(), nothing more.
164 ph = phasecache(None, None, _load=False)
164 ph = self.__class__(None, None, _load=False)
165 165 ph.phaseroots = self.phaseroots[:]
166 166 ph.dirty = self.dirty
167 167 ph.opener = self.opener
168 168 ph._phaserevs = self._phaserevs
169 169 return ph
170 170
171 171 def replace(self, phcache):
172 172 for a in 'phaseroots dirty opener _phaserevs'.split():
173 173 setattr(self, a, getattr(phcache, a))
174 174
175 175 def getphaserevs(self, repo):
176 176 if self._phaserevs is None:
177 177 repo = repo.unfiltered()
178 178 revs = [public] * len(repo.changelog)
179 179 self._phaserevs = revs
180 180 self._populatephaseroots(repo)
181 181 for phase in trackedphases:
182 182 roots = map(repo.changelog.rev, self.phaseroots[phase])
183 183 if roots:
184 184 for rev in roots:
185 185 revs[rev] = phase
186 186 for rev in repo.changelog.descendants(roots):
187 187 revs[rev] = phase
188 188 return self._phaserevs
189 189
190 190 def invalidate(self):
191 191 self._phaserevs = None
192 192
193 193 def _populatephaseroots(self, repo):
194 194 """Fills the _phaserevs cache with phases for the roots.
195 195 """
196 196 cl = repo.changelog
197 197 phaserevs = self._phaserevs
198 198 for phase in trackedphases:
199 199 roots = map(cl.rev, self.phaseroots[phase])
200 200 for root in roots:
201 201 phaserevs[root] = phase
202 202
203 203 def phase(self, repo, rev):
204 204 # We need a repo argument here to be able to build _phaserevs
205 205 # if necessary. The repository instance is not stored in
206 206 # phasecache to avoid reference cycles. The changelog instance
207 207 # is not stored because it is a filecache() property and can
208 208 # be replaced without us being notified.
209 209 if rev == nullrev:
210 210 return public
211 211 if rev < nullrev:
212 212 raise ValueError(_('cannot lookup negative revision'))
213 213 if self._phaserevs is None or rev >= len(self._phaserevs):
214 214 self.invalidate()
215 215 self._phaserevs = self.getphaserevs(repo)
216 216 return self._phaserevs[rev]
217 217
218 218 def write(self):
219 219 if not self.dirty:
220 220 return
221 221 f = self.opener('phaseroots', 'w', atomictemp=True)
222 222 try:
223 223 self._write(f)
224 224 finally:
225 225 f.close()
226 226
227 227 def _write(self, fp):
228 228 for phase, roots in enumerate(self.phaseroots):
229 229 for h in roots:
230 230 fp.write('%i %s\n' % (phase, hex(h)))
231 231 self.dirty = False
232 232
233 233 def _updateroots(self, phase, newroots, tr):
234 234 self.phaseroots[phase] = newroots
235 235 self.invalidate()
236 236 self.dirty = True
237 237
238 238 tr.addfilegenerator('phase', ('phaseroots',), self._write)
239 239 tr.hookargs['phases_moved'] = '1'
240 240
241 241 def advanceboundary(self, repo, tr, targetphase, nodes):
242 242 # Be careful to preserve shallow-copied values: do not update
243 243 # phaseroots values, replace them.
244 244
245 245 repo = repo.unfiltered()
246 246 delroots = [] # set of root deleted by this path
247 247 for phase in xrange(targetphase + 1, len(allphases)):
248 248 # filter nodes that are not in a compatible phase already
249 249 nodes = [n for n in nodes
250 250 if self.phase(repo, repo[n].rev()) >= phase]
251 251 if not nodes:
252 252 break # no roots to move anymore
253 253 olds = self.phaseroots[phase]
254 254 roots = set(ctx.node() for ctx in repo.set(
255 255 'roots((%ln::) - (%ln::%ln))', olds, olds, nodes))
256 256 if olds != roots:
257 257 self._updateroots(phase, roots, tr)
258 258 # some roots may need to be declared for lower phases
259 259 delroots.extend(olds - roots)
260 260 # declare deleted root in the target phase
261 261 if targetphase != 0:
262 262 self.retractboundary(repo, tr, targetphase, delroots)
263 263 repo.invalidatevolatilesets()
264 264
265 265 def retractboundary(self, repo, tr, targetphase, nodes):
266 266 # Be careful to preserve shallow-copied values: do not update
267 267 # phaseroots values, replace them.
268 268
269 269 repo = repo.unfiltered()
270 270 currentroots = self.phaseroots[targetphase]
271 271 newroots = [n for n in nodes
272 272 if self.phase(repo, repo[n].rev()) < targetphase]
273 273 if newroots:
274 274 if nullid in newroots:
275 275 raise util.Abort(_('cannot change null revision phase'))
276 276 currentroots = currentroots.copy()
277 277 currentroots.update(newroots)
278 278 ctxs = repo.set('roots(%ln::)', currentroots)
279 279 currentroots.intersection_update(ctx.node() for ctx in ctxs)
280 280 self._updateroots(targetphase, currentroots, tr)
281 281 repo.invalidatevolatilesets()
282 282
283 283 def filterunknown(self, repo):
284 284 """remove unknown nodes from the phase boundary
285 285
286 286 Nothing is lost as unknown nodes only hold data for their descendants.
287 287 """
288 288 filtered = False
289 289 nodemap = repo.changelog.nodemap # to filter unknown nodes
290 290 for phase, nodes in enumerate(self.phaseroots):
291 291 missing = sorted(node for node in nodes if node not in nodemap)
292 292 if missing:
293 293 for mnode in missing:
294 294 repo.ui.debug(
295 295 'removing unknown node %s from %i-phase boundary\n'
296 296 % (short(mnode), phase))
297 297 nodes.symmetric_difference_update(missing)
298 298 filtered = True
299 299 if filtered:
300 300 self.dirty = True
301 301 # filterunknown is called by repo.destroyed, we may have no changes in
302 302 # root but phaserevs contents is certainly invalid (or at least we
303 303 # have not proper way to check that). related to issue 3858.
304 304 #
305 305 # The other caller is __init__ that have no _phaserevs initialized
306 306 # anyway. If this change we should consider adding a dedicated
307 307 # "destroyed" function to phasecache or a proper cache key mechanism
308 308 # (see branchmap one)
309 309 self.invalidate()
310 310
311 311 def advanceboundary(repo, tr, targetphase, nodes):
312 312 """Add nodes to a phase changing other nodes phases if necessary.
313 313
314 314 This function move boundary *forward* this means that all nodes
315 315 are set in the target phase or kept in a *lower* phase.
316 316
317 317 Simplify boundary to contains phase roots only."""
318 318 phcache = repo._phasecache.copy()
319 319 phcache.advanceboundary(repo, tr, targetphase, nodes)
320 320 repo._phasecache.replace(phcache)
321 321
322 322 def retractboundary(repo, tr, targetphase, nodes):
323 323 """Set nodes back to a phase changing other nodes phases if
324 324 necessary.
325 325
326 326 This function move boundary *backward* this means that all nodes
327 327 are set in the target phase or kept in a *higher* phase.
328 328
329 329 Simplify boundary to contains phase roots only."""
330 330 phcache = repo._phasecache.copy()
331 331 phcache.retractboundary(repo, tr, targetphase, nodes)
332 332 repo._phasecache.replace(phcache)
333 333
334 334 def listphases(repo):
335 335 """List phases root for serialization over pushkey"""
336 336 keys = {}
337 337 value = '%i' % draft
338 338 for root in repo._phasecache.phaseroots[draft]:
339 339 keys[hex(root)] = value
340 340
341 341 if repo.ui.configbool('phases', 'publish', True):
342 342 # Add an extra data to let remote know we are a publishing
343 343 # repo. Publishing repo can't just pretend they are old repo.
344 344 # When pushing to a publishing repo, the client still need to
345 345 # push phase boundary
346 346 #
347 347 # Push do not only push changeset. It also push phase data.
348 348 # New phase data may apply to common changeset which won't be
349 349 # push (as they are common). Here is a very simple example:
350 350 #
351 351 # 1) repo A push changeset X as draft to repo B
352 352 # 2) repo B make changeset X public
353 353 # 3) repo B push to repo A. X is not pushed but the data that
354 354 # X as now public should
355 355 #
356 356 # The server can't handle it on it's own as it has no idea of
357 357 # client phase data.
358 358 keys['publishing'] = 'True'
359 359 return keys
360 360
361 361 def pushphase(repo, nhex, oldphasestr, newphasestr):
362 362 """List phases root for serialization over pushkey"""
363 363 repo = repo.unfiltered()
364 364 tr = None
365 365 lock = repo.lock()
366 366 try:
367 367 currentphase = repo[nhex].phase()
368 368 newphase = abs(int(newphasestr)) # let's avoid negative index surprise
369 369 oldphase = abs(int(oldphasestr)) # let's avoid negative index surprise
370 370 if currentphase == oldphase and newphase < oldphase:
371 371 tr = repo.transaction('pushkey-phase')
372 372 advanceboundary(repo, tr, newphase, [bin(nhex)])
373 373 tr.close()
374 374 return 1
375 375 elif currentphase == newphase:
376 376 # raced, but got correct result
377 377 return 1
378 378 else:
379 379 return 0
380 380 finally:
381 381 if tr:
382 382 tr.release()
383 383 lock.release()
384 384
385 385 def analyzeremotephases(repo, subset, roots):
386 386 """Compute phases heads and root in a subset of node from root dict
387 387
388 388 * subset is heads of the subset
389 389 * roots is {<nodeid> => phase} mapping. key and value are string.
390 390
391 391 Accept unknown element input
392 392 """
393 393 repo = repo.unfiltered()
394 394 # build list from dictionary
395 395 draftroots = []
396 396 nodemap = repo.changelog.nodemap # to filter unknown nodes
397 397 for nhex, phase in roots.iteritems():
398 398 if nhex == 'publishing': # ignore data related to publish option
399 399 continue
400 400 node = bin(nhex)
401 401 phase = int(phase)
402 402 if phase == 0:
403 403 if node != nullid:
404 404 repo.ui.warn(_('ignoring inconsistent public root'
405 405 ' from remote: %s\n') % nhex)
406 406 elif phase == 1:
407 407 if node in nodemap:
408 408 draftroots.append(node)
409 409 else:
410 410 repo.ui.warn(_('ignoring unexpected root from remote: %i %s\n')
411 411 % (phase, nhex))
412 412 # compute heads
413 413 publicheads = newheads(repo, subset, draftroots)
414 414 return publicheads, draftroots
415 415
416 416 def newheads(repo, heads, roots):
417 417 """compute new head of a subset minus another
418 418
419 419 * `heads`: define the first subset
420 420 * `roots`: define the second we subtract from the first"""
421 421 repo = repo.unfiltered()
422 422 revset = repo.set('heads((%ln + parents(%ln)) - (%ln::%ln))',
423 423 heads, roots, roots, heads)
424 424 return [c.node() for c in revset]
425 425
426 426
427 427 def newcommitphase(ui):
428 428 """helper to get the target phase of new commit
429 429
430 430 Handle all possible values for the phases.new-commit options.
431 431
432 432 """
433 433 v = ui.config('phases', 'new-commit', draft)
434 434 try:
435 435 return phasenames.index(v)
436 436 except ValueError:
437 437 try:
438 438 return int(v)
439 439 except ValueError:
440 440 msg = _("phases.new-commit: not a valid phase name ('%s')")
441 441 raise error.ConfigError(msg % v)
442 442
443 443 def hassecret(repo):
444 444 """utility function that check if a repo have any secret changeset."""
445 445 return bool(repo._phasecache.phaseroots[2])
General Comments 0
You need to be logged in to leave comments. Login now