Show More
@@ -1,1065 +1,1055 b'' | |||
|
1 | 1 | # changegroup.py - Mercurial changegroup manipulation functions |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2006 Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import os |
|
11 | 11 | import struct |
|
12 | 12 | import tempfile |
|
13 | 13 | import weakref |
|
14 | 14 | |
|
15 | 15 | from .i18n import _ |
|
16 | 16 | from .node import ( |
|
17 | 17 | hex, |
|
18 | 18 | nullid, |
|
19 | 19 | nullrev, |
|
20 | 20 | short, |
|
21 | 21 | ) |
|
22 | 22 | |
|
23 | 23 | from . import ( |
|
24 | 24 | branchmap, |
|
25 | 25 | dagutil, |
|
26 | 26 | discovery, |
|
27 | 27 | error, |
|
28 | 28 | mdiff, |
|
29 | 29 | phases, |
|
30 | 30 | util, |
|
31 | 31 | ) |
|
32 | 32 | |
|
33 | 33 | _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s" |
|
34 | 34 | _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s" |
|
35 | 35 | _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH" |
|
36 | 36 | |
|
37 | 37 | def readexactly(stream, n): |
|
38 | 38 | '''read n bytes from stream.read and abort if less was available''' |
|
39 | 39 | s = stream.read(n) |
|
40 | 40 | if len(s) < n: |
|
41 | 41 | raise error.Abort(_("stream ended unexpectedly" |
|
42 | 42 | " (got %d bytes, expected %d)") |
|
43 | 43 | % (len(s), n)) |
|
44 | 44 | return s |
|
45 | 45 | |
|
46 | 46 | def getchunk(stream): |
|
47 | 47 | """return the next chunk from stream as a string""" |
|
48 | 48 | d = readexactly(stream, 4) |
|
49 | 49 | l = struct.unpack(">l", d)[0] |
|
50 | 50 | if l <= 4: |
|
51 | 51 | if l: |
|
52 | 52 | raise error.Abort(_("invalid chunk length %d") % l) |
|
53 | 53 | return "" |
|
54 | 54 | return readexactly(stream, l - 4) |
|
55 | 55 | |
|
56 | 56 | def chunkheader(length): |
|
57 | 57 | """return a changegroup chunk header (string)""" |
|
58 | 58 | return struct.pack(">l", length + 4) |
|
59 | 59 | |
|
60 | 60 | def closechunk(): |
|
61 | 61 | """return a changegroup chunk header (string) for a zero-length chunk""" |
|
62 | 62 | return struct.pack(">l", 0) |
|
63 | 63 | |
|
64 | 64 | def combineresults(results): |
|
65 | 65 | """logic to combine 0 or more addchangegroup results into one""" |
|
66 | 66 | changedheads = 0 |
|
67 | 67 | result = 1 |
|
68 | 68 | for ret in results: |
|
69 | 69 | # If any changegroup result is 0, return 0 |
|
70 | 70 | if ret == 0: |
|
71 | 71 | result = 0 |
|
72 | 72 | break |
|
73 | 73 | if ret < -1: |
|
74 | 74 | changedheads += ret + 1 |
|
75 | 75 | elif ret > 1: |
|
76 | 76 | changedheads += ret - 1 |
|
77 | 77 | if changedheads > 0: |
|
78 | 78 | result = 1 + changedheads |
|
79 | 79 | elif changedheads < 0: |
|
80 | 80 | result = -1 + changedheads |
|
81 | 81 | return result |
|
82 | 82 | |
|
83 | 83 | def writechunks(ui, chunks, filename, vfs=None): |
|
84 | 84 | """Write chunks to a file and return its filename. |
|
85 | 85 | |
|
86 | 86 | The stream is assumed to be a bundle file. |
|
87 | 87 | Existing files will not be overwritten. |
|
88 | 88 | If no filename is specified, a temporary file is created. |
|
89 | 89 | """ |
|
90 | 90 | fh = None |
|
91 | 91 | cleanup = None |
|
92 | 92 | try: |
|
93 | 93 | if filename: |
|
94 | 94 | if vfs: |
|
95 | 95 | fh = vfs.open(filename, "wb") |
|
96 | 96 | else: |
|
97 | 97 | fh = open(filename, "wb") |
|
98 | 98 | else: |
|
99 | 99 | fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg") |
|
100 | 100 | fh = os.fdopen(fd, "wb") |
|
101 | 101 | cleanup = filename |
|
102 | 102 | for c in chunks: |
|
103 | 103 | fh.write(c) |
|
104 | 104 | cleanup = None |
|
105 | 105 | return filename |
|
106 | 106 | finally: |
|
107 | 107 | if fh is not None: |
|
108 | 108 | fh.close() |
|
109 | 109 | if cleanup is not None: |
|
110 | 110 | if filename and vfs: |
|
111 | 111 | vfs.unlink(cleanup) |
|
112 | 112 | else: |
|
113 | 113 | os.unlink(cleanup) |
|
114 | 114 | |
|
115 | 115 | class cg1unpacker(object): |
|
116 | 116 | """Unpacker for cg1 changegroup streams. |
|
117 | 117 | |
|
118 | 118 | A changegroup unpacker handles the framing of the revision data in |
|
119 | 119 | the wire format. Most consumers will want to use the apply() |
|
120 | 120 | method to add the changes from the changegroup to a repository. |
|
121 | 121 | |
|
122 | 122 | If you're forwarding a changegroup unmodified to another consumer, |
|
123 | 123 | use getchunks(), which returns an iterator of changegroup |
|
124 | 124 | chunks. This is mostly useful for cases where you need to know the |
|
125 | 125 | data stream has ended by observing the end of the changegroup. |
|
126 | 126 | |
|
127 | 127 | deltachunk() is useful only if you're applying delta data. Most |
|
128 | 128 | consumers should prefer apply() instead. |
|
129 | 129 | |
|
130 | 130 | A few other public methods exist. Those are used only for |
|
131 | 131 | bundlerepo and some debug commands - their use is discouraged. |
|
132 | 132 | """ |
|
133 | 133 | deltaheader = _CHANGEGROUPV1_DELTA_HEADER |
|
134 | 134 | deltaheadersize = struct.calcsize(deltaheader) |
|
135 | 135 | version = '01' |
|
136 | 136 | _grouplistcount = 1 # One list of files after the manifests |
|
137 | 137 | |
|
138 | 138 | def __init__(self, fh, alg, extras=None): |
|
139 | 139 | if alg == 'UN': |
|
140 | 140 | alg = None # get more modern without breaking too much |
|
141 | 141 | if not alg in util.decompressors: |
|
142 | 142 | raise error.Abort(_('unknown stream compression type: %s') |
|
143 | 143 | % alg) |
|
144 | 144 | if alg == 'BZ': |
|
145 | 145 | alg = '_truncatedBZ' |
|
146 | 146 | self._stream = util.decompressors[alg](fh) |
|
147 | 147 | self._type = alg |
|
148 | 148 | self.extras = extras or {} |
|
149 | 149 | self.callback = None |
|
150 | 150 | |
|
151 | 151 | # These methods (compressed, read, seek, tell) all appear to only |
|
152 | 152 | # be used by bundlerepo, but it's a little hard to tell. |
|
153 | 153 | def compressed(self): |
|
154 | 154 | return self._type is not None |
|
155 | 155 | def read(self, l): |
|
156 | 156 | return self._stream.read(l) |
|
157 | 157 | def seek(self, pos): |
|
158 | 158 | return self._stream.seek(pos) |
|
159 | 159 | def tell(self): |
|
160 | 160 | return self._stream.tell() |
|
161 | 161 | def close(self): |
|
162 | 162 | return self._stream.close() |
|
163 | 163 | |
|
164 | 164 | def _chunklength(self): |
|
165 | 165 | d = readexactly(self._stream, 4) |
|
166 | 166 | l = struct.unpack(">l", d)[0] |
|
167 | 167 | if l <= 4: |
|
168 | 168 | if l: |
|
169 | 169 | raise error.Abort(_("invalid chunk length %d") % l) |
|
170 | 170 | return 0 |
|
171 | 171 | if self.callback: |
|
172 | 172 | self.callback() |
|
173 | 173 | return l - 4 |
|
174 | 174 | |
|
175 | 175 | def changelogheader(self): |
|
176 | 176 | """v10 does not have a changelog header chunk""" |
|
177 | 177 | return {} |
|
178 | 178 | |
|
179 | 179 | def manifestheader(self): |
|
180 | 180 | """v10 does not have a manifest header chunk""" |
|
181 | 181 | return {} |
|
182 | 182 | |
|
183 | 183 | def filelogheader(self): |
|
184 | 184 | """return the header of the filelogs chunk, v10 only has the filename""" |
|
185 | 185 | l = self._chunklength() |
|
186 | 186 | if not l: |
|
187 | 187 | return {} |
|
188 | 188 | fname = readexactly(self._stream, l) |
|
189 | 189 | return {'filename': fname} |
|
190 | 190 | |
|
191 | 191 | def _deltaheader(self, headertuple, prevnode): |
|
192 | 192 | node, p1, p2, cs = headertuple |
|
193 | 193 | if prevnode is None: |
|
194 | 194 | deltabase = p1 |
|
195 | 195 | else: |
|
196 | 196 | deltabase = prevnode |
|
197 | 197 | flags = 0 |
|
198 | 198 | return node, p1, p2, deltabase, cs, flags |
|
199 | 199 | |
|
200 | 200 | def deltachunk(self, prevnode): |
|
201 | 201 | l = self._chunklength() |
|
202 | 202 | if not l: |
|
203 | 203 | return {} |
|
204 | 204 | headerdata = readexactly(self._stream, self.deltaheadersize) |
|
205 | 205 | header = struct.unpack(self.deltaheader, headerdata) |
|
206 | 206 | delta = readexactly(self._stream, l - self.deltaheadersize) |
|
207 | 207 | node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode) |
|
208 | 208 | return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs, |
|
209 | 209 | 'deltabase': deltabase, 'delta': delta, 'flags': flags} |
|
210 | 210 | |
|
211 | 211 | def getchunks(self): |
|
212 | 212 | """returns all the chunks contains in the bundle |
|
213 | 213 | |
|
214 | 214 | Used when you need to forward the binary stream to a file or another |
|
215 | 215 | network API. To do so, it parse the changegroup data, otherwise it will |
|
216 | 216 | block in case of sshrepo because it don't know the end of the stream. |
|
217 | 217 | """ |
|
218 | 218 | # an empty chunkgroup is the end of the changegroup |
|
219 | 219 | # a changegroup has at least 2 chunkgroups (changelog and manifest). |
|
220 | 220 | # after that, changegroup versions 1 and 2 have a series of groups |
|
221 | 221 | # with one group per file. changegroup 3 has a series of directory |
|
222 | 222 | # manifests before the files. |
|
223 | 223 | count = 0 |
|
224 | 224 | emptycount = 0 |
|
225 | 225 | while emptycount < self._grouplistcount: |
|
226 | 226 | empty = True |
|
227 | 227 | count += 1 |
|
228 | 228 | while True: |
|
229 | 229 | chunk = getchunk(self) |
|
230 | 230 | if not chunk: |
|
231 | 231 | if empty and count > 2: |
|
232 | 232 | emptycount += 1 |
|
233 | 233 | break |
|
234 | 234 | empty = False |
|
235 | 235 | yield chunkheader(len(chunk)) |
|
236 | 236 | pos = 0 |
|
237 | 237 | while pos < len(chunk): |
|
238 | 238 | next = pos + 2**20 |
|
239 | 239 | yield chunk[pos:next] |
|
240 | 240 | pos = next |
|
241 | 241 | yield closechunk() |
|
242 | 242 | |
|
243 | 243 | def _unpackmanifests(self, repo, revmap, trp, prog, numchanges): |
|
244 | 244 | # We know that we'll never have more manifests than we had |
|
245 | 245 | # changesets. |
|
246 | 246 | self.callback = prog(_('manifests'), numchanges) |
|
247 | 247 | # no need to check for empty manifest group here: |
|
248 | 248 | # if the result of the merge of 1 and 2 is the same in 3 and 4, |
|
249 | 249 | # no new manifest will be created and the manifest group will |
|
250 | 250 | # be empty during the pull |
|
251 | 251 | self.manifestheader() |
|
252 | 252 | repo.manifest.addgroup(self, revmap, trp) |
|
253 | 253 | repo.ui.progress(_('manifests'), None) |
|
254 | 254 | self.callback = None |
|
255 | 255 | |
|
256 | 256 | def apply(self, repo, srctype, url, emptyok=False, |
|
257 | 257 | targetphase=phases.draft, expectedtotal=None): |
|
258 | 258 | """Add the changegroup returned by source.read() to this repo. |
|
259 | 259 | srctype is a string like 'push', 'pull', or 'unbundle'. url is |
|
260 | 260 | the URL of the repo where this changegroup is coming from. |
|
261 | 261 | |
|
262 | 262 | Return an integer summarizing the change to this repo: |
|
263 | 263 | - nothing changed or no source: 0 |
|
264 | 264 | - more heads than before: 1+added heads (2..n) |
|
265 | 265 | - fewer heads than before: -1-removed heads (-2..-n) |
|
266 | 266 | - number of heads stays the same: 1 |
|
267 | 267 | """ |
|
268 | 268 | repo = repo.unfiltered() |
|
269 | 269 | def csmap(x): |
|
270 | 270 | repo.ui.debug("add changeset %s\n" % short(x)) |
|
271 | 271 | return len(cl) |
|
272 | 272 | |
|
273 | 273 | def revmap(x): |
|
274 | 274 | return cl.rev(x) |
|
275 | 275 | |
|
276 | 276 | changesets = files = revisions = 0 |
|
277 | 277 | |
|
278 | 278 | try: |
|
279 | 279 | with repo.transaction("\n".join([srctype, |
|
280 | 280 | util.hidepassword(url)])) as tr: |
|
281 | 281 | # The transaction could have been created before and already |
|
282 | 282 | # carries source information. In this case we use the top |
|
283 | 283 | # level data. We overwrite the argument because we need to use |
|
284 | 284 | # the top level value (if they exist) in this function. |
|
285 | 285 | srctype = tr.hookargs.setdefault('source', srctype) |
|
286 | 286 | url = tr.hookargs.setdefault('url', url) |
|
287 | 287 | repo.hook('prechangegroup', throw=True, **tr.hookargs) |
|
288 | 288 | |
|
289 | 289 | # write changelog data to temp files so concurrent readers |
|
290 | 290 | # will not see an inconsistent view |
|
291 | 291 | cl = repo.changelog |
|
292 | 292 | cl.delayupdate(tr) |
|
293 | 293 | oldheads = cl.heads() |
|
294 | 294 | |
|
295 | 295 | trp = weakref.proxy(tr) |
|
296 | 296 | # pull off the changeset group |
|
297 | 297 | repo.ui.status(_("adding changesets\n")) |
|
298 | 298 | clstart = len(cl) |
|
299 | 299 | class prog(object): |
|
300 | 300 | def __init__(self, step, total): |
|
301 | 301 | self._step = step |
|
302 | 302 | self._total = total |
|
303 | 303 | self._count = 1 |
|
304 | 304 | def __call__(self): |
|
305 | 305 | repo.ui.progress(self._step, self._count, |
|
306 | 306 | unit=_('chunks'), total=self._total) |
|
307 | 307 | self._count += 1 |
|
308 | 308 | self.callback = prog(_('changesets'), expectedtotal) |
|
309 | 309 | |
|
310 | 310 | efiles = set() |
|
311 | 311 | def onchangelog(cl, node): |
|
312 | 312 | efiles.update(cl.readfiles(node)) |
|
313 | 313 | |
|
314 | 314 | self.changelogheader() |
|
315 | 315 | srccontent = cl.addgroup(self, csmap, trp, |
|
316 | 316 | addrevisioncb=onchangelog) |
|
317 | 317 | efiles = len(efiles) |
|
318 | 318 | |
|
319 | 319 | if not (srccontent or emptyok): |
|
320 | 320 | raise error.Abort(_("received changelog group is empty")) |
|
321 | 321 | clend = len(cl) |
|
322 | 322 | changesets = clend - clstart |
|
323 | 323 | repo.ui.progress(_('changesets'), None) |
|
324 | 324 | self.callback = None |
|
325 | 325 | |
|
326 | 326 | # pull off the manifest group |
|
327 | 327 | repo.ui.status(_("adding manifests\n")) |
|
328 | 328 | self._unpackmanifests(repo, revmap, trp, prog, changesets) |
|
329 | 329 | |
|
330 | 330 | needfiles = {} |
|
331 | 331 | if repo.ui.configbool('server', 'validate', default=False): |
|
332 | 332 | # validate incoming csets have their manifests |
|
333 | 333 | for cset in xrange(clstart, clend): |
|
334 | 334 | mfnode = repo.changelog.read( |
|
335 | 335 | repo.changelog.node(cset))[0] |
|
336 | 336 | mfest = repo.manifest.readdelta(mfnode) |
|
337 | 337 | # store file nodes we must see |
|
338 | 338 | for f, n in mfest.iteritems(): |
|
339 | 339 | needfiles.setdefault(f, set()).add(n) |
|
340 | 340 | |
|
341 | 341 | # process the files |
|
342 | 342 | repo.ui.status(_("adding file changes\n")) |
|
343 | 343 | newrevs, newfiles = _addchangegroupfiles( |
|
344 | 344 | repo, self, revmap, trp, efiles, needfiles) |
|
345 | 345 | revisions += newrevs |
|
346 | 346 | files += newfiles |
|
347 | 347 | |
|
348 | 348 | dh = 0 |
|
349 | 349 | if oldheads: |
|
350 | 350 | heads = cl.heads() |
|
351 | 351 | dh = len(heads) - len(oldheads) |
|
352 | 352 | for h in heads: |
|
353 | 353 | if h not in oldheads and repo[h].closesbranch(): |
|
354 | 354 | dh -= 1 |
|
355 | 355 | htext = "" |
|
356 | 356 | if dh: |
|
357 | 357 | htext = _(" (%+d heads)") % dh |
|
358 | 358 | |
|
359 | 359 | repo.ui.status(_("added %d changesets" |
|
360 | 360 | " with %d changes to %d files%s\n") |
|
361 | 361 | % (changesets, revisions, files, htext)) |
|
362 | 362 | repo.invalidatevolatilesets() |
|
363 | 363 | |
|
364 | 364 | if changesets > 0: |
|
365 | 365 | if 'node' not in tr.hookargs: |
|
366 | 366 | tr.hookargs['node'] = hex(cl.node(clstart)) |
|
367 | 367 | tr.hookargs['node_last'] = hex(cl.node(clend - 1)) |
|
368 | 368 | hookargs = dict(tr.hookargs) |
|
369 | 369 | else: |
|
370 | 370 | hookargs = dict(tr.hookargs) |
|
371 | 371 | hookargs['node'] = hex(cl.node(clstart)) |
|
372 | 372 | hookargs['node_last'] = hex(cl.node(clend - 1)) |
|
373 | 373 | repo.hook('pretxnchangegroup', throw=True, **hookargs) |
|
374 | 374 | |
|
375 | 375 | added = [cl.node(r) for r in xrange(clstart, clend)] |
|
376 | 376 | publishing = repo.publishing() |
|
377 | 377 | if srctype in ('push', 'serve'): |
|
378 | 378 | # Old servers can not push the boundary themselves. |
|
379 | 379 | # New servers won't push the boundary if changeset already |
|
380 | 380 | # exists locally as secret |
|
381 | 381 | # |
|
382 | 382 | # We should not use added here but the list of all change in |
|
383 | 383 | # the bundle |
|
384 | 384 | if publishing: |
|
385 | 385 | phases.advanceboundary(repo, tr, phases.public, |
|
386 | 386 | srccontent) |
|
387 | 387 | else: |
|
388 | 388 | # Those changesets have been pushed from the |
|
389 | 389 | # outside, their phases are going to be pushed |
|
390 | 390 | # alongside. Therefor `targetphase` is |
|
391 | 391 | # ignored. |
|
392 | 392 | phases.advanceboundary(repo, tr, phases.draft, |
|
393 | 393 | srccontent) |
|
394 | 394 | phases.retractboundary(repo, tr, phases.draft, added) |
|
395 | 395 | elif srctype != 'strip': |
|
396 | 396 | # publishing only alter behavior during push |
|
397 | 397 | # |
|
398 | 398 | # strip should not touch boundary at all |
|
399 | 399 | phases.retractboundary(repo, tr, targetphase, added) |
|
400 | 400 | |
|
401 | 401 | if changesets > 0: |
|
402 | 402 | if srctype != 'strip': |
|
403 | 403 | # During strip, branchcache is invalid but |
|
404 | 404 | # coming call to `destroyed` will repair it. |
|
405 | 405 | # In other case we can safely update cache on |
|
406 | 406 | # disk. |
|
407 | 407 | branchmap.updatecache(repo.filtered('served')) |
|
408 | 408 | |
|
409 | 409 | def runhooks(): |
|
410 | 410 | # These hooks run when the lock releases, not when the |
|
411 | 411 | # transaction closes. So it's possible for the changelog |
|
412 | 412 | # to have changed since we last saw it. |
|
413 | 413 | if clstart >= len(repo): |
|
414 | 414 | return |
|
415 | 415 | |
|
416 | 416 | # forcefully update the on-disk branch cache |
|
417 | 417 | repo.ui.debug("updating the branch cache\n") |
|
418 | 418 | repo.hook("changegroup", **hookargs) |
|
419 | 419 | |
|
420 | 420 | for n in added: |
|
421 | 421 | args = hookargs.copy() |
|
422 | 422 | args['node'] = hex(n) |
|
423 | 423 | del args['node_last'] |
|
424 | 424 | repo.hook("incoming", **args) |
|
425 | 425 | |
|
426 | 426 | newheads = [h for h in repo.heads() |
|
427 | 427 | if h not in oldheads] |
|
428 | 428 | repo.ui.log("incoming", |
|
429 | 429 | "%s incoming changes - new heads: %s\n", |
|
430 | 430 | len(added), |
|
431 | 431 | ', '.join([hex(c[:6]) for c in newheads])) |
|
432 | 432 | |
|
433 | 433 | tr.addpostclose('changegroup-runhooks-%020i' % clstart, |
|
434 | 434 | lambda tr: repo._afterlock(runhooks)) |
|
435 | 435 | finally: |
|
436 | 436 | repo.ui.flush() |
|
437 | 437 | # never return 0 here: |
|
438 | 438 | if dh < 0: |
|
439 | 439 | return dh - 1 |
|
440 | 440 | else: |
|
441 | 441 | return dh + 1 |
|
442 | 442 | |
|
443 | 443 | class cg2unpacker(cg1unpacker): |
|
444 | 444 | """Unpacker for cg2 streams. |
|
445 | 445 | |
|
446 | 446 | cg2 streams add support for generaldelta, so the delta header |
|
447 | 447 | format is slightly different. All other features about the data |
|
448 | 448 | remain the same. |
|
449 | 449 | """ |
|
450 | 450 | deltaheader = _CHANGEGROUPV2_DELTA_HEADER |
|
451 | 451 | deltaheadersize = struct.calcsize(deltaheader) |
|
452 | 452 | version = '02' |
|
453 | 453 | |
|
454 | 454 | def _deltaheader(self, headertuple, prevnode): |
|
455 | 455 | node, p1, p2, deltabase, cs = headertuple |
|
456 | 456 | flags = 0 |
|
457 | 457 | return node, p1, p2, deltabase, cs, flags |
|
458 | 458 | |
|
459 | 459 | class cg3unpacker(cg2unpacker): |
|
460 | 460 | """Unpacker for cg3 streams. |
|
461 | 461 | |
|
462 | 462 | cg3 streams add support for exchanging treemanifests and revlog |
|
463 | 463 | flags. It adds the revlog flags to the delta header and an empty chunk |
|
464 | 464 | separating manifests and files. |
|
465 | 465 | """ |
|
466 | 466 | deltaheader = _CHANGEGROUPV3_DELTA_HEADER |
|
467 | 467 | deltaheadersize = struct.calcsize(deltaheader) |
|
468 | 468 | version = '03' |
|
469 | 469 | _grouplistcount = 2 # One list of manifests and one list of files |
|
470 | 470 | |
|
471 | 471 | def _deltaheader(self, headertuple, prevnode): |
|
472 | 472 | node, p1, p2, deltabase, cs, flags = headertuple |
|
473 | 473 | return node, p1, p2, deltabase, cs, flags |
|
474 | 474 | |
|
475 | 475 | def _unpackmanifests(self, repo, revmap, trp, prog, numchanges): |
|
476 | 476 | super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog, |
|
477 | 477 | numchanges) |
|
478 | 478 | while True: |
|
479 | 479 | chunkdata = self.filelogheader() |
|
480 | 480 | if not chunkdata: |
|
481 | 481 | break |
|
482 | 482 | # If we get here, there are directory manifests in the changegroup |
|
483 | 483 | d = chunkdata["filename"] |
|
484 | 484 | repo.ui.debug("adding %s revisions\n" % d) |
|
485 | 485 | dirlog = repo.manifest.dirlog(d) |
|
486 | 486 | if not dirlog.addgroup(self, revmap, trp): |
|
487 | 487 | raise error.Abort(_("received dir revlog group is empty")) |
|
488 | 488 | |
|
489 | 489 | class headerlessfixup(object): |
|
490 | 490 | def __init__(self, fh, h): |
|
491 | 491 | self._h = h |
|
492 | 492 | self._fh = fh |
|
493 | 493 | def read(self, n): |
|
494 | 494 | if self._h: |
|
495 | 495 | d, self._h = self._h[:n], self._h[n:] |
|
496 | 496 | if len(d) < n: |
|
497 | 497 | d += readexactly(self._fh, n - len(d)) |
|
498 | 498 | return d |
|
499 | 499 | return readexactly(self._fh, n) |
|
500 | 500 | |
|
501 | 501 | class cg1packer(object): |
|
502 | 502 | deltaheader = _CHANGEGROUPV1_DELTA_HEADER |
|
503 | 503 | version = '01' |
|
504 | 504 | def __init__(self, repo, bundlecaps=None): |
|
505 | 505 | """Given a source repo, construct a bundler. |
|
506 | 506 | |
|
507 | 507 | bundlecaps is optional and can be used to specify the set of |
|
508 | 508 | capabilities which can be used to build the bundle. |
|
509 | 509 | """ |
|
510 | 510 | # Set of capabilities we can use to build the bundle. |
|
511 | 511 | if bundlecaps is None: |
|
512 | 512 | bundlecaps = set() |
|
513 | 513 | self._bundlecaps = bundlecaps |
|
514 | 514 | # experimental config: bundle.reorder |
|
515 | 515 | reorder = repo.ui.config('bundle', 'reorder', 'auto') |
|
516 | 516 | if reorder == 'auto': |
|
517 | 517 | reorder = None |
|
518 | 518 | else: |
|
519 | 519 | reorder = util.parsebool(reorder) |
|
520 | 520 | self._repo = repo |
|
521 | 521 | self._reorder = reorder |
|
522 | 522 | self._progress = repo.ui.progress |
|
523 | 523 | if self._repo.ui.verbose and not self._repo.ui.debugflag: |
|
524 | 524 | self._verbosenote = self._repo.ui.note |
|
525 | 525 | else: |
|
526 | 526 | self._verbosenote = lambda s: None |
|
527 | 527 | |
|
528 | 528 | def close(self): |
|
529 | 529 | return closechunk() |
|
530 | 530 | |
|
531 | 531 | def fileheader(self, fname): |
|
532 | 532 | return chunkheader(len(fname)) + fname |
|
533 | 533 | |
|
534 | 534 | # Extracted both for clarity and for overriding in extensions. |
|
535 | 535 | def _sortgroup(self, revlog, nodelist, lookup): |
|
536 | 536 | """Sort nodes for change group and turn them into revnums.""" |
|
537 | 537 | # for generaldelta revlogs, we linearize the revs; this will both be |
|
538 | 538 | # much quicker and generate a much smaller bundle |
|
539 | 539 | if (revlog._generaldelta and self._reorder is None) or self._reorder: |
|
540 | 540 | dag = dagutil.revlogdag(revlog) |
|
541 | 541 | return dag.linearize(set(revlog.rev(n) for n in nodelist)) |
|
542 | 542 | else: |
|
543 | 543 | return sorted([revlog.rev(n) for n in nodelist]) |
|
544 | 544 | |
|
545 | 545 | def group(self, nodelist, revlog, lookup, units=None): |
|
546 | 546 | """Calculate a delta group, yielding a sequence of changegroup chunks |
|
547 | 547 | (strings). |
|
548 | 548 | |
|
549 | 549 | Given a list of changeset revs, return a set of deltas and |
|
550 | 550 | metadata corresponding to nodes. The first delta is |
|
551 | 551 | first parent(nodelist[0]) -> nodelist[0], the receiver is |
|
552 | 552 | guaranteed to have this parent as it has all history before |
|
553 | 553 | these changesets. In the case firstparent is nullrev the |
|
554 | 554 | changegroup starts with a full revision. |
|
555 | 555 | |
|
556 | 556 | If units is not None, progress detail will be generated, units specifies |
|
557 | 557 | the type of revlog that is touched (changelog, manifest, etc.). |
|
558 | 558 | """ |
|
559 | 559 | # if we don't have any revisions touched by these changesets, bail |
|
560 | 560 | if len(nodelist) == 0: |
|
561 | 561 | yield self.close() |
|
562 | 562 | return |
|
563 | 563 | |
|
564 | 564 | revs = self._sortgroup(revlog, nodelist, lookup) |
|
565 | 565 | |
|
566 | 566 | # add the parent of the first rev |
|
567 | 567 | p = revlog.parentrevs(revs[0])[0] |
|
568 | 568 | revs.insert(0, p) |
|
569 | 569 | |
|
570 | 570 | # build deltas |
|
571 | 571 | total = len(revs) - 1 |
|
572 | 572 | msgbundling = _('bundling') |
|
573 | 573 | for r in xrange(len(revs) - 1): |
|
574 | 574 | if units is not None: |
|
575 | 575 | self._progress(msgbundling, r + 1, unit=units, total=total) |
|
576 | 576 | prev, curr = revs[r], revs[r + 1] |
|
577 | 577 | linknode = lookup(revlog.node(curr)) |
|
578 | 578 | for c in self.revchunk(revlog, curr, prev, linknode): |
|
579 | 579 | yield c |
|
580 | 580 | |
|
581 | 581 | if units is not None: |
|
582 | 582 | self._progress(msgbundling, None) |
|
583 | 583 | yield self.close() |
|
584 | 584 | |
|
585 | 585 | # filter any nodes that claim to be part of the known set |
|
586 | 586 | def prune(self, revlog, missing, commonrevs): |
|
587 | 587 | rr, rl = revlog.rev, revlog.linkrev |
|
588 | 588 | return [n for n in missing if rl(rr(n)) not in commonrevs] |
|
589 | 589 | |
|
590 | 590 | def _packmanifests(self, dir, mfnodes, lookuplinknode): |
|
591 | 591 | """Pack flat manifests into a changegroup stream.""" |
|
592 | 592 | assert not dir |
|
593 | 593 | for chunk in self.group(mfnodes, self._repo.manifest, |
|
594 | 594 | lookuplinknode, units=_('manifests')): |
|
595 | 595 | yield chunk |
|
596 | 596 | |
|
597 | 597 | def _manifestsdone(self): |
|
598 | 598 | return '' |
|
599 | 599 | |
|
600 | 600 | def generate(self, commonrevs, clnodes, fastpathlinkrev, source): |
|
601 | 601 | '''yield a sequence of changegroup chunks (strings)''' |
|
602 | 602 | repo = self._repo |
|
603 | 603 | cl = repo.changelog |
|
604 | 604 | |
|
605 | 605 | clrevorder = {} |
|
606 | 606 | mfs = {} # needed manifests |
|
607 | 607 | fnodes = {} # needed file nodes |
|
608 | 608 | changedfiles = set() |
|
609 | 609 | |
|
610 | 610 | # Callback for the changelog, used to collect changed files and manifest |
|
611 | 611 | # nodes. |
|
612 | 612 | # Returns the linkrev node (identity in the changelog case). |
|
613 | 613 | def lookupcl(x): |
|
614 | 614 | c = cl.read(x) |
|
615 | 615 | clrevorder[x] = len(clrevorder) |
|
616 | 616 | n = c[0] |
|
617 | 617 | # record the first changeset introducing this manifest version |
|
618 | 618 | mfs.setdefault(n, x) |
|
619 | 619 | # Record a complete list of potentially-changed files in |
|
620 | 620 | # this manifest. |
|
621 | 621 | changedfiles.update(c[3]) |
|
622 | 622 | return x |
|
623 | 623 | |
|
624 | 624 | self._verbosenote(_('uncompressed size of bundle content:\n')) |
|
625 | 625 | size = 0 |
|
626 | 626 | for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')): |
|
627 | 627 | size += len(chunk) |
|
628 | 628 | yield chunk |
|
629 | 629 | self._verbosenote(_('%8.i (changelog)\n') % size) |
|
630 | 630 | |
|
631 | 631 | # We need to make sure that the linkrev in the changegroup refers to |
|
632 | 632 | # the first changeset that introduced the manifest or file revision. |
|
633 | 633 | # The fastpath is usually safer than the slowpath, because the filelogs |
|
634 | 634 | # are walked in revlog order. |
|
635 | 635 | # |
|
636 | 636 | # When taking the slowpath with reorder=None and the manifest revlog |
|
637 | 637 | # uses generaldelta, the manifest may be walked in the "wrong" order. |
|
638 | 638 | # Without 'clrevorder', we would get an incorrect linkrev (see fix in |
|
639 | 639 | # cc0ff93d0c0c). |
|
640 | 640 | # |
|
641 | 641 | # When taking the fastpath, we are only vulnerable to reordering |
|
642 | 642 | # of the changelog itself. The changelog never uses generaldelta, so |
|
643 | 643 | # it is only reordered when reorder=True. To handle this case, we |
|
644 | 644 | # simply take the slowpath, which already has the 'clrevorder' logic. |
|
645 | 645 | # This was also fixed in cc0ff93d0c0c. |
|
646 | 646 | fastpathlinkrev = fastpathlinkrev and not self._reorder |
|
647 | 647 | # Treemanifests don't work correctly with fastpathlinkrev |
|
648 | 648 | # either, because we don't discover which directory nodes to |
|
649 | 649 | # send along with files. This could probably be fixed. |
|
650 | 650 | fastpathlinkrev = fastpathlinkrev and ( |
|
651 | 651 | 'treemanifest' not in repo.requirements) |
|
652 | 652 | |
|
653 | 653 | for chunk in self.generatemanifests(commonrevs, clrevorder, |
|
654 | 654 | fastpathlinkrev, mfs, fnodes): |
|
655 | 655 | yield chunk |
|
656 | 656 | mfs.clear() |
|
657 | 657 | clrevs = set(cl.rev(x) for x in clnodes) |
|
658 | 658 | |
|
659 | 659 | if not fastpathlinkrev: |
|
660 | 660 | def linknodes(unused, fname): |
|
661 | 661 | return fnodes.get(fname, {}) |
|
662 | 662 | else: |
|
663 | 663 | cln = cl.node |
|
664 | 664 | def linknodes(filerevlog, fname): |
|
665 | 665 | llr = filerevlog.linkrev |
|
666 | 666 | fln = filerevlog.node |
|
667 | 667 | revs = ((r, llr(r)) for r in filerevlog) |
|
668 | 668 | return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs) |
|
669 | 669 | |
|
670 | 670 | for chunk in self.generatefiles(changedfiles, linknodes, commonrevs, |
|
671 | 671 | source): |
|
672 | 672 | yield chunk |
|
673 | 673 | |
|
674 | 674 | yield self.close() |
|
675 | 675 | |
|
676 | 676 | if clnodes: |
|
677 | 677 | repo.hook('outgoing', node=hex(clnodes[0]), source=source) |
|
678 | 678 | |
|
679 | 679 | def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs, |
|
680 | 680 | fnodes): |
|
681 | 681 | repo = self._repo |
|
682 | 682 | dirlog = repo.manifest.dirlog |
|
683 | 683 | tmfnodes = {'': mfs} |
|
684 | 684 | |
|
685 | 685 | # Callback for the manifest, used to collect linkrevs for filelog |
|
686 | 686 | # revisions. |
|
687 | 687 | # Returns the linkrev node (collected in lookupcl). |
|
688 | 688 | def makelookupmflinknode(dir): |
|
689 | 689 | if fastpathlinkrev: |
|
690 | 690 | assert not dir |
|
691 | 691 | return mfs.__getitem__ |
|
692 | 692 | |
|
693 | 693 | def lookupmflinknode(x): |
|
694 | 694 | """Callback for looking up the linknode for manifests. |
|
695 | 695 | |
|
696 | 696 | Returns the linkrev node for the specified manifest. |
|
697 | 697 | |
|
698 | 698 | SIDE EFFECT: |
|
699 | 699 | |
|
700 | 700 | 1) fclnodes gets populated with the list of relevant |
|
701 | 701 | file nodes if we're not using fastpathlinkrev |
|
702 | 702 | 2) When treemanifests are in use, collects treemanifest nodes |
|
703 | 703 | to send |
|
704 | 704 | |
|
705 | 705 | Note that this means manifests must be completely sent to |
|
706 | 706 | the client before you can trust the list of files and |
|
707 | 707 | treemanifests to send. |
|
708 | 708 | """ |
|
709 | 709 | clnode = tmfnodes[dir][x] |
|
710 | 710 | mdata = dirlog(dir).readshallowfast(x) |
|
711 | 711 | for p, n, fl in mdata.iterentries(): |
|
712 | 712 | if fl == 't': # subdirectory manifest |
|
713 | 713 | subdir = dir + p + '/' |
|
714 | 714 | tmfclnodes = tmfnodes.setdefault(subdir, {}) |
|
715 | 715 | tmfclnode = tmfclnodes.setdefault(n, clnode) |
|
716 | 716 | if clrevorder[clnode] < clrevorder[tmfclnode]: |
|
717 | 717 | tmfclnodes[n] = clnode |
|
718 | 718 | else: |
|
719 | 719 | f = dir + p |
|
720 | 720 | fclnodes = fnodes.setdefault(f, {}) |
|
721 | 721 | fclnode = fclnodes.setdefault(n, clnode) |
|
722 | 722 | if clrevorder[clnode] < clrevorder[fclnode]: |
|
723 | 723 | fclnodes[n] = clnode |
|
724 | 724 | return clnode |
|
725 | 725 | return lookupmflinknode |
|
726 | 726 | |
|
727 | 727 | size = 0 |
|
728 | 728 | while tmfnodes: |
|
729 | 729 | dir = min(tmfnodes) |
|
730 | 730 | nodes = tmfnodes[dir] |
|
731 | 731 | prunednodes = self.prune(dirlog(dir), nodes, commonrevs) |
|
732 | 732 | if not dir or prunednodes: |
|
733 | 733 | for x in self._packmanifests(dir, prunednodes, |
|
734 | 734 | makelookupmflinknode(dir)): |
|
735 | 735 | size += len(x) |
|
736 | 736 | yield x |
|
737 | 737 | del tmfnodes[dir] |
|
738 | 738 | self._verbosenote(_('%8.i (manifests)\n') % size) |
|
739 | 739 | yield self._manifestsdone() |
|
740 | 740 | |
|
741 | 741 | # The 'source' parameter is useful for extensions |
|
742 | 742 | def generatefiles(self, changedfiles, linknodes, commonrevs, source): |
|
743 | 743 | repo = self._repo |
|
744 | 744 | progress = self._progress |
|
745 | 745 | msgbundling = _('bundling') |
|
746 | 746 | |
|
747 | 747 | total = len(changedfiles) |
|
748 | 748 | # for progress output |
|
749 | 749 | msgfiles = _('files') |
|
750 | 750 | for i, fname in enumerate(sorted(changedfiles)): |
|
751 | 751 | filerevlog = repo.file(fname) |
|
752 | 752 | if not filerevlog: |
|
753 | 753 | raise error.Abort(_("empty or missing revlog for %s") % fname) |
|
754 | 754 | |
|
755 | 755 | linkrevnodes = linknodes(filerevlog, fname) |
|
756 | 756 | # Lookup for filenodes, we collected the linkrev nodes above in the |
|
757 | 757 | # fastpath case and with lookupmf in the slowpath case. |
|
758 | 758 | def lookupfilelog(x): |
|
759 | 759 | return linkrevnodes[x] |
|
760 | 760 | |
|
761 | 761 | filenodes = self.prune(filerevlog, linkrevnodes, commonrevs) |
|
762 | 762 | if filenodes: |
|
763 | 763 | progress(msgbundling, i + 1, item=fname, unit=msgfiles, |
|
764 | 764 | total=total) |
|
765 | 765 | h = self.fileheader(fname) |
|
766 | 766 | size = len(h) |
|
767 | 767 | yield h |
|
768 | 768 | for chunk in self.group(filenodes, filerevlog, lookupfilelog): |
|
769 | 769 | size += len(chunk) |
|
770 | 770 | yield chunk |
|
771 | 771 | self._verbosenote(_('%8.i %s\n') % (size, fname)) |
|
772 | 772 | progress(msgbundling, None) |
|
773 | 773 | |
|
774 | 774 | def deltaparent(self, revlog, rev, p1, p2, prev): |
|
775 | 775 | return prev |
|
776 | 776 | |
|
777 | 777 | def revchunk(self, revlog, rev, prev, linknode): |
|
778 | 778 | node = revlog.node(rev) |
|
779 | 779 | p1, p2 = revlog.parentrevs(rev) |
|
780 | 780 | base = self.deltaparent(revlog, rev, p1, p2, prev) |
|
781 | 781 | |
|
782 | 782 | prefix = '' |
|
783 | 783 | if revlog.iscensored(base) or revlog.iscensored(rev): |
|
784 | 784 | try: |
|
785 | 785 | delta = revlog.revision(node) |
|
786 | 786 | except error.CensoredNodeError as e: |
|
787 | 787 | delta = e.tombstone |
|
788 | 788 | if base == nullrev: |
|
789 | 789 | prefix = mdiff.trivialdiffheader(len(delta)) |
|
790 | 790 | else: |
|
791 | 791 | baselen = revlog.rawsize(base) |
|
792 | 792 | prefix = mdiff.replacediffheader(baselen, len(delta)) |
|
793 | 793 | elif base == nullrev: |
|
794 | 794 | delta = revlog.revision(node) |
|
795 | 795 | prefix = mdiff.trivialdiffheader(len(delta)) |
|
796 | 796 | else: |
|
797 | 797 | delta = revlog.revdiff(base, rev) |
|
798 | 798 | p1n, p2n = revlog.parents(node) |
|
799 | 799 | basenode = revlog.node(base) |
|
800 | 800 | flags = revlog.flags(rev) |
|
801 | 801 | meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags) |
|
802 | 802 | meta += prefix |
|
803 | 803 | l = len(meta) + len(delta) |
|
804 | 804 | yield chunkheader(l) |
|
805 | 805 | yield meta |
|
806 | 806 | yield delta |
|
807 | 807 | def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags): |
|
808 | 808 | # do nothing with basenode, it is implicitly the previous one in HG10 |
|
809 | 809 | # do nothing with flags, it is implicitly 0 for cg1 and cg2 |
|
810 | 810 | return struct.pack(self.deltaheader, node, p1n, p2n, linknode) |
|
811 | 811 | |
|
812 | 812 | class cg2packer(cg1packer): |
|
813 | 813 | version = '02' |
|
814 | 814 | deltaheader = _CHANGEGROUPV2_DELTA_HEADER |
|
815 | 815 | |
|
816 | 816 | def __init__(self, repo, bundlecaps=None): |
|
817 | 817 | super(cg2packer, self).__init__(repo, bundlecaps) |
|
818 | 818 | if self._reorder is None: |
|
819 | 819 | # Since generaldelta is directly supported by cg2, reordering |
|
820 | 820 | # generally doesn't help, so we disable it by default (treating |
|
821 | 821 | # bundle.reorder=auto just like bundle.reorder=False). |
|
822 | 822 | self._reorder = False |
|
823 | 823 | |
|
824 | 824 | def deltaparent(self, revlog, rev, p1, p2, prev): |
|
825 | 825 | dp = revlog.deltaparent(rev) |
|
826 | 826 | # avoid storing full revisions; pick prev in those cases |
|
827 | 827 | # also pick prev when we can't be sure remote has dp |
|
828 | 828 | if dp == nullrev or (dp != p1 and dp != p2 and dp != prev): |
|
829 | 829 | return prev |
|
830 | 830 | return dp |
|
831 | 831 | |
|
832 | 832 | def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags): |
|
833 | 833 | # Do nothing with flags, it is implicitly 0 in cg1 and cg2 |
|
834 | 834 | return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode) |
|
835 | 835 | |
|
836 | 836 | class cg3packer(cg2packer): |
|
837 | 837 | version = '03' |
|
838 | 838 | deltaheader = _CHANGEGROUPV3_DELTA_HEADER |
|
839 | 839 | |
|
840 | 840 | def _packmanifests(self, dir, mfnodes, lookuplinknode): |
|
841 | 841 | if dir: |
|
842 | 842 | yield self.fileheader(dir) |
|
843 | 843 | for chunk in self.group(mfnodes, self._repo.manifest.dirlog(dir), |
|
844 | 844 | lookuplinknode, units=_('manifests')): |
|
845 | 845 | yield chunk |
|
846 | 846 | |
|
847 | 847 | def _manifestsdone(self): |
|
848 | 848 | return self.close() |
|
849 | 849 | |
|
850 | 850 | def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags): |
|
851 | 851 | return struct.pack( |
|
852 | 852 | self.deltaheader, node, p1n, p2n, basenode, linknode, flags) |
|
853 | 853 | |
|
854 | 854 | _packermap = {'01': (cg1packer, cg1unpacker), |
|
855 | 855 | # cg2 adds support for exchanging generaldelta |
|
856 | 856 | '02': (cg2packer, cg2unpacker), |
|
857 | 857 | # cg3 adds support for exchanging revlog flags and treemanifests |
|
858 | 858 | '03': (cg3packer, cg3unpacker), |
|
859 | 859 | } |
|
860 | 860 | |
|
861 | 861 | def allsupportedversions(ui): |
|
862 | 862 | versions = set(_packermap.keys()) |
|
863 | 863 | versions.discard('03') |
|
864 | 864 | if (ui.configbool('experimental', 'changegroup3') or |
|
865 | 865 | ui.configbool('experimental', 'treemanifest')): |
|
866 | 866 | versions.add('03') |
|
867 | 867 | return versions |
|
868 | 868 | |
|
869 | 869 | # Changegroup versions that can be applied to the repo |
|
870 | 870 | def supportedincomingversions(repo): |
|
871 | 871 | versions = allsupportedversions(repo.ui) |
|
872 | 872 | if 'treemanifest' in repo.requirements: |
|
873 | 873 | versions.add('03') |
|
874 | 874 | return versions |
|
875 | 875 | |
|
876 | 876 | # Changegroup versions that can be created from the repo |
|
877 | 877 | def supportedoutgoingversions(repo): |
|
878 | 878 | versions = allsupportedversions(repo.ui) |
|
879 | 879 | if 'treemanifest' in repo.requirements: |
|
880 | 880 | # Versions 01 and 02 support only flat manifests and it's just too |
|
881 | 881 | # expensive to convert between the flat manifest and tree manifest on |
|
882 | 882 | # the fly. Since tree manifests are hashed differently, all of history |
|
883 | 883 | # would have to be converted. Instead, we simply don't even pretend to |
|
884 | 884 | # support versions 01 and 02. |
|
885 | 885 | versions.discard('01') |
|
886 | 886 | versions.discard('02') |
|
887 | 887 | versions.add('03') |
|
888 | 888 | return versions |
|
889 | 889 | |
|
890 | 890 | def safeversion(repo): |
|
891 | 891 | # Finds the smallest version that it's safe to assume clients of the repo |
|
892 | 892 | # will support. For example, all hg versions that support generaldelta also |
|
893 | 893 | # support changegroup 02. |
|
894 | 894 | versions = supportedoutgoingversions(repo) |
|
895 | 895 | if 'generaldelta' in repo.requirements: |
|
896 | 896 | versions.discard('01') |
|
897 | 897 | assert versions |
|
898 | 898 | return min(versions) |
|
899 | 899 | |
|
900 | 900 | def getbundler(version, repo, bundlecaps=None): |
|
901 | 901 | assert version in supportedoutgoingversions(repo) |
|
902 | 902 | return _packermap[version][0](repo, bundlecaps) |
|
903 | 903 | |
|
904 | 904 | def getunbundler(version, fh, alg, extras=None): |
|
905 | 905 | return _packermap[version][1](fh, alg, extras=extras) |
|
906 | 906 | |
|
907 | 907 | def _changegroupinfo(repo, nodes, source): |
|
908 | 908 | if repo.ui.verbose or source == 'bundle': |
|
909 | 909 | repo.ui.status(_("%d changesets found\n") % len(nodes)) |
|
910 | 910 | if repo.ui.debugflag: |
|
911 | 911 | repo.ui.debug("list of changesets:\n") |
|
912 | 912 | for node in nodes: |
|
913 | 913 | repo.ui.debug("%s\n" % hex(node)) |
|
914 | 914 | |
|
915 | 915 | def getsubsetraw(repo, outgoing, bundler, source, fastpath=False): |
|
916 | 916 | repo = repo.unfiltered() |
|
917 | 917 | commonrevs = outgoing.common |
|
918 | 918 | csets = outgoing.missing |
|
919 | 919 | heads = outgoing.missingheads |
|
920 | 920 | # We go through the fast path if we get told to, or if all (unfiltered |
|
921 | 921 | # heads have been requested (since we then know there all linkrevs will |
|
922 | 922 | # be pulled by the client). |
|
923 | 923 | heads.sort() |
|
924 | 924 | fastpathlinkrev = fastpath or ( |
|
925 | 925 | repo.filtername is None and heads == sorted(repo.heads())) |
|
926 | 926 | |
|
927 | 927 | repo.hook('preoutgoing', throw=True, source=source) |
|
928 | 928 | _changegroupinfo(repo, csets, source) |
|
929 | 929 | return bundler.generate(commonrevs, csets, fastpathlinkrev, source) |
|
930 | 930 | |
|
931 | 931 | def getsubset(repo, outgoing, bundler, source, fastpath=False): |
|
932 | 932 | gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath) |
|
933 | 933 | return getunbundler(bundler.version, util.chunkbuffer(gengroup), None, |
|
934 | 934 | {'clcount': len(outgoing.missing)}) |
|
935 | 935 | |
|
936 | 936 | def changegroupsubset(repo, roots, heads, source, version='01'): |
|
937 | 937 | """Compute a changegroup consisting of all the nodes that are |
|
938 | 938 | descendants of any of the roots and ancestors of any of the heads. |
|
939 | 939 | Return a chunkbuffer object whose read() method will return |
|
940 | 940 | successive changegroup chunks. |
|
941 | 941 | |
|
942 | 942 | It is fairly complex as determining which filenodes and which |
|
943 | 943 | manifest nodes need to be included for the changeset to be complete |
|
944 | 944 | is non-trivial. |
|
945 | 945 | |
|
946 | 946 | Another wrinkle is doing the reverse, figuring out which changeset in |
|
947 | 947 | the changegroup a particular filenode or manifestnode belongs to. |
|
948 | 948 | """ |
|
949 | cl = repo.changelog | |
|
950 | if not roots: | |
|
951 | roots = [nullid] | |
|
952 | discbases = [] | |
|
953 | for n in roots: | |
|
954 | discbases.extend([p for p in cl.parents(n) if p != nullid]) | |
|
955 | # TODO: remove call to nodesbetween. | |
|
956 | csets, roots, heads = cl.nodesbetween(roots, heads) | |
|
957 | included = set(csets) | |
|
958 | discbases = [n for n in discbases if n not in included] | |
|
959 | outgoing = discovery.outgoing(cl, discbases, heads) | |
|
949 | outgoing = discovery.outgoingbetween(repo, roots, heads) | |
|
960 | 950 | bundler = getbundler(version, repo) |
|
961 | 951 | return getsubset(repo, outgoing, bundler, source) |
|
962 | 952 | |
|
963 | 953 | def getlocalchangegroupraw(repo, source, outgoing, bundlecaps=None, |
|
964 | 954 | version='01'): |
|
965 | 955 | """Like getbundle, but taking a discovery.outgoing as an argument. |
|
966 | 956 | |
|
967 | 957 | This is only implemented for local repos and reuses potentially |
|
968 | 958 | precomputed sets in outgoing. Returns a raw changegroup generator.""" |
|
969 | 959 | if not outgoing.missing: |
|
970 | 960 | return None |
|
971 | 961 | bundler = getbundler(version, repo, bundlecaps) |
|
972 | 962 | return getsubsetraw(repo, outgoing, bundler, source) |
|
973 | 963 | |
|
974 | 964 | def getlocalchangegroup(repo, source, outgoing, bundlecaps=None, |
|
975 | 965 | version='01'): |
|
976 | 966 | """Like getbundle, but taking a discovery.outgoing as an argument. |
|
977 | 967 | |
|
978 | 968 | This is only implemented for local repos and reuses potentially |
|
979 | 969 | precomputed sets in outgoing.""" |
|
980 | 970 | if not outgoing.missing: |
|
981 | 971 | return None |
|
982 | 972 | bundler = getbundler(version, repo, bundlecaps) |
|
983 | 973 | return getsubset(repo, outgoing, bundler, source) |
|
984 | 974 | |
|
985 | 975 | def computeoutgoing(repo, heads, common): |
|
986 | 976 | """Computes which revs are outgoing given a set of common |
|
987 | 977 | and a set of heads. |
|
988 | 978 | |
|
989 | 979 | This is a separate function so extensions can have access to |
|
990 | 980 | the logic. |
|
991 | 981 | |
|
992 | 982 | Returns a discovery.outgoing object. |
|
993 | 983 | """ |
|
994 | 984 | cl = repo.changelog |
|
995 | 985 | if common: |
|
996 | 986 | hasnode = cl.hasnode |
|
997 | 987 | common = [n for n in common if hasnode(n)] |
|
998 | 988 | else: |
|
999 | 989 | common = [nullid] |
|
1000 | 990 | if not heads: |
|
1001 | 991 | heads = cl.heads() |
|
1002 | 992 | return discovery.outgoing(cl, common, heads) |
|
1003 | 993 | |
|
1004 | 994 | def getchangegroup(repo, source, heads=None, common=None, bundlecaps=None, |
|
1005 | 995 | version='01'): |
|
1006 | 996 | """Like changegroupsubset, but returns the set difference between the |
|
1007 | 997 | ancestors of heads and the ancestors common. |
|
1008 | 998 | |
|
1009 | 999 | If heads is None, use the local heads. If common is None, use [nullid]. |
|
1010 | 1000 | |
|
1011 | 1001 | The nodes in common might not all be known locally due to the way the |
|
1012 | 1002 | current discovery protocol works. |
|
1013 | 1003 | """ |
|
1014 | 1004 | outgoing = computeoutgoing(repo, heads, common) |
|
1015 | 1005 | return getlocalchangegroup(repo, source, outgoing, bundlecaps=bundlecaps, |
|
1016 | 1006 | version=version) |
|
1017 | 1007 | |
|
1018 | 1008 | def changegroup(repo, basenodes, source): |
|
1019 | 1009 | # to avoid a race we use changegroupsubset() (issue1320) |
|
1020 | 1010 | return changegroupsubset(repo, basenodes, repo.heads(), source) |
|
1021 | 1011 | |
|
1022 | 1012 | def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles): |
|
1023 | 1013 | revisions = 0 |
|
1024 | 1014 | files = 0 |
|
1025 | 1015 | while True: |
|
1026 | 1016 | chunkdata = source.filelogheader() |
|
1027 | 1017 | if not chunkdata: |
|
1028 | 1018 | break |
|
1029 | 1019 | files += 1 |
|
1030 | 1020 | f = chunkdata["filename"] |
|
1031 | 1021 | repo.ui.debug("adding %s revisions\n" % f) |
|
1032 | 1022 | repo.ui.progress(_('files'), files, unit=_('files'), |
|
1033 | 1023 | total=expectedfiles) |
|
1034 | 1024 | fl = repo.file(f) |
|
1035 | 1025 | o = len(fl) |
|
1036 | 1026 | try: |
|
1037 | 1027 | if not fl.addgroup(source, revmap, trp): |
|
1038 | 1028 | raise error.Abort(_("received file revlog group is empty")) |
|
1039 | 1029 | except error.CensoredBaseError as e: |
|
1040 | 1030 | raise error.Abort(_("received delta base is censored: %s") % e) |
|
1041 | 1031 | revisions += len(fl) - o |
|
1042 | 1032 | if f in needfiles: |
|
1043 | 1033 | needs = needfiles[f] |
|
1044 | 1034 | for new in xrange(o, len(fl)): |
|
1045 | 1035 | n = fl.node(new) |
|
1046 | 1036 | if n in needs: |
|
1047 | 1037 | needs.remove(n) |
|
1048 | 1038 | else: |
|
1049 | 1039 | raise error.Abort( |
|
1050 | 1040 | _("received spurious file revlog entry")) |
|
1051 | 1041 | if not needs: |
|
1052 | 1042 | del needfiles[f] |
|
1053 | 1043 | repo.ui.progress(_('files'), None) |
|
1054 | 1044 | |
|
1055 | 1045 | for f, needs in needfiles.iteritems(): |
|
1056 | 1046 | fl = repo.file(f) |
|
1057 | 1047 | for n in needs: |
|
1058 | 1048 | try: |
|
1059 | 1049 | fl.rev(n) |
|
1060 | 1050 | except error.LookupError: |
|
1061 | 1051 | raise error.Abort( |
|
1062 | 1052 | _('missing file data for %s:%s - run hg verify') % |
|
1063 | 1053 | (f, hex(n))) |
|
1064 | 1054 | |
|
1065 | 1055 | return revisions, files |
@@ -1,417 +1,438 b'' | |||
|
1 | 1 | # discovery.py - protocol changeset discovery functions |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2010 Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | from .i18n import _ |
|
11 | 11 | from .node import ( |
|
12 | 12 | nullid, |
|
13 | 13 | short, |
|
14 | 14 | ) |
|
15 | 15 | |
|
16 | 16 | from . import ( |
|
17 | 17 | bookmarks, |
|
18 | 18 | branchmap, |
|
19 | 19 | error, |
|
20 | 20 | obsolete, |
|
21 | 21 | phases, |
|
22 | 22 | setdiscovery, |
|
23 | 23 | treediscovery, |
|
24 | 24 | util, |
|
25 | 25 | ) |
|
26 | 26 | |
|
27 | 27 | def findcommonincoming(repo, remote, heads=None, force=False): |
|
28 | 28 | """Return a tuple (common, anyincoming, heads) used to identify the common |
|
29 | 29 | subset of nodes between repo and remote. |
|
30 | 30 | |
|
31 | 31 | "common" is a list of (at least) the heads of the common subset. |
|
32 | 32 | "anyincoming" is testable as a boolean indicating if any nodes are missing |
|
33 | 33 | locally. If remote does not support getbundle, this actually is a list of |
|
34 | 34 | roots of the nodes that would be incoming, to be supplied to |
|
35 | 35 | changegroupsubset. No code except for pull should be relying on this fact |
|
36 | 36 | any longer. |
|
37 | 37 | "heads" is either the supplied heads, or else the remote's heads. |
|
38 | 38 | |
|
39 | 39 | If you pass heads and they are all known locally, the response lists just |
|
40 | 40 | these heads in "common" and in "heads". |
|
41 | 41 | |
|
42 | 42 | Please use findcommonoutgoing to compute the set of outgoing nodes to give |
|
43 | 43 | extensions a good hook into outgoing. |
|
44 | 44 | """ |
|
45 | 45 | |
|
46 | 46 | if not remote.capable('getbundle'): |
|
47 | 47 | return treediscovery.findcommonincoming(repo, remote, heads, force) |
|
48 | 48 | |
|
49 | 49 | if heads: |
|
50 | 50 | allknown = True |
|
51 | 51 | knownnode = repo.changelog.hasnode # no nodemap until it is filtered |
|
52 | 52 | for h in heads: |
|
53 | 53 | if not knownnode(h): |
|
54 | 54 | allknown = False |
|
55 | 55 | break |
|
56 | 56 | if allknown: |
|
57 | 57 | return (heads, False, heads) |
|
58 | 58 | |
|
59 | 59 | res = setdiscovery.findcommonheads(repo.ui, repo, remote, |
|
60 | 60 | abortwhenunrelated=not force) |
|
61 | 61 | common, anyinc, srvheads = res |
|
62 | 62 | return (list(common), anyinc, heads or list(srvheads)) |
|
63 | 63 | |
|
64 | 64 | class outgoing(object): |
|
65 | 65 | '''Represents the set of nodes present in a local repo but not in a |
|
66 | 66 | (possibly) remote one. |
|
67 | 67 | |
|
68 | 68 | Members: |
|
69 | 69 | |
|
70 | 70 | missing is a list of all nodes present in local but not in remote. |
|
71 | 71 | common is a list of all nodes shared between the two repos. |
|
72 | 72 | excluded is the list of missing changeset that shouldn't be sent remotely. |
|
73 | 73 | missingheads is the list of heads of missing. |
|
74 | 74 | commonheads is the list of heads of common. |
|
75 | 75 | |
|
76 | 76 | The sets are computed on demand from the heads, unless provided upfront |
|
77 | 77 | by discovery.''' |
|
78 | 78 | |
|
79 | 79 | def __init__(self, revlog, commonheads, missingheads): |
|
80 | 80 | self.commonheads = commonheads |
|
81 | 81 | self.missingheads = missingheads |
|
82 | 82 | self._revlog = revlog |
|
83 | 83 | self._common = None |
|
84 | 84 | self._missing = None |
|
85 | 85 | self.excluded = [] |
|
86 | 86 | |
|
87 | 87 | def _computecommonmissing(self): |
|
88 | 88 | sets = self._revlog.findcommonmissing(self.commonheads, |
|
89 | 89 | self.missingheads) |
|
90 | 90 | self._common, self._missing = sets |
|
91 | 91 | |
|
92 | 92 | @util.propertycache |
|
93 | 93 | def common(self): |
|
94 | 94 | if self._common is None: |
|
95 | 95 | self._computecommonmissing() |
|
96 | 96 | return self._common |
|
97 | 97 | |
|
98 | 98 | @util.propertycache |
|
99 | 99 | def missing(self): |
|
100 | 100 | if self._missing is None: |
|
101 | 101 | self._computecommonmissing() |
|
102 | 102 | return self._missing |
|
103 | 103 | |
|
104 | def outgoingbetween(repo, roots, heads): | |
|
105 | """create an ``outgoing`` consisting of nodes between roots and heads | |
|
106 | ||
|
107 | The ``missing`` nodes will be descendants of any of the ``roots`` and | |
|
108 | ancestors of any of the ``heads``, both are which are defined as a list | |
|
109 | of binary nodes. | |
|
110 | """ | |
|
111 | cl = repo.changelog | |
|
112 | if not roots: | |
|
113 | roots = [nullid] | |
|
114 | discbases = [] | |
|
115 | for n in roots: | |
|
116 | discbases.extend([p for p in cl.parents(n) if p != nullid]) | |
|
117 | # TODO remove call to nodesbetween. | |
|
118 | # TODO populate attributes on outgoing instance instead of setting | |
|
119 | # discbases. | |
|
120 | csets, roots, heads = cl.nodesbetween(roots, heads) | |
|
121 | included = set(csets) | |
|
122 | discbases = [n for n in discbases if n not in included] | |
|
123 | return outgoing(cl, discbases, heads) | |
|
124 | ||
|
104 | 125 | def findcommonoutgoing(repo, other, onlyheads=None, force=False, |
|
105 | 126 | commoninc=None, portable=False): |
|
106 | 127 | '''Return an outgoing instance to identify the nodes present in repo but |
|
107 | 128 | not in other. |
|
108 | 129 | |
|
109 | 130 | If onlyheads is given, only nodes ancestral to nodes in onlyheads |
|
110 | 131 | (inclusive) are included. If you already know the local repo's heads, |
|
111 | 132 | passing them in onlyheads is faster than letting them be recomputed here. |
|
112 | 133 | |
|
113 | 134 | If commoninc is given, it must be the result of a prior call to |
|
114 | 135 | findcommonincoming(repo, other, force) to avoid recomputing it here. |
|
115 | 136 | |
|
116 | 137 | If portable is given, compute more conservative common and missingheads, |
|
117 | 138 | to make bundles created from the instance more portable.''' |
|
118 | 139 | # declare an empty outgoing object to be filled later |
|
119 | 140 | og = outgoing(repo.changelog, None, None) |
|
120 | 141 | |
|
121 | 142 | # get common set if not provided |
|
122 | 143 | if commoninc is None: |
|
123 | 144 | commoninc = findcommonincoming(repo, other, force=force) |
|
124 | 145 | og.commonheads, _any, _hds = commoninc |
|
125 | 146 | |
|
126 | 147 | # compute outgoing |
|
127 | 148 | mayexclude = (repo._phasecache.phaseroots[phases.secret] or repo.obsstore) |
|
128 | 149 | if not mayexclude: |
|
129 | 150 | og.missingheads = onlyheads or repo.heads() |
|
130 | 151 | elif onlyheads is None: |
|
131 | 152 | # use visible heads as it should be cached |
|
132 | 153 | og.missingheads = repo.filtered("served").heads() |
|
133 | 154 | og.excluded = [ctx.node() for ctx in repo.set('secret() or extinct()')] |
|
134 | 155 | else: |
|
135 | 156 | # compute common, missing and exclude secret stuff |
|
136 | 157 | sets = repo.changelog.findcommonmissing(og.commonheads, onlyheads) |
|
137 | 158 | og._common, allmissing = sets |
|
138 | 159 | og._missing = missing = [] |
|
139 | 160 | og.excluded = excluded = [] |
|
140 | 161 | for node in allmissing: |
|
141 | 162 | ctx = repo[node] |
|
142 | 163 | if ctx.phase() >= phases.secret or ctx.extinct(): |
|
143 | 164 | excluded.append(node) |
|
144 | 165 | else: |
|
145 | 166 | missing.append(node) |
|
146 | 167 | if len(missing) == len(allmissing): |
|
147 | 168 | missingheads = onlyheads |
|
148 | 169 | else: # update missing heads |
|
149 | 170 | missingheads = phases.newheads(repo, onlyheads, excluded) |
|
150 | 171 | og.missingheads = missingheads |
|
151 | 172 | if portable: |
|
152 | 173 | # recompute common and missingheads as if -r<rev> had been given for |
|
153 | 174 | # each head of missing, and --base <rev> for each head of the proper |
|
154 | 175 | # ancestors of missing |
|
155 | 176 | og._computecommonmissing() |
|
156 | 177 | cl = repo.changelog |
|
157 | 178 | missingrevs = set(cl.rev(n) for n in og._missing) |
|
158 | 179 | og._common = set(cl.ancestors(missingrevs)) - missingrevs |
|
159 | 180 | commonheads = set(og.commonheads) |
|
160 | 181 | og.missingheads = [h for h in og.missingheads if h not in commonheads] |
|
161 | 182 | |
|
162 | 183 | return og |
|
163 | 184 | |
|
164 | 185 | def _headssummary(repo, remote, outgoing): |
|
165 | 186 | """compute a summary of branch and heads status before and after push |
|
166 | 187 | |
|
167 | 188 | return {'branch': ([remoteheads], [newheads], [unsyncedheads])} mapping |
|
168 | 189 | |
|
169 | 190 | - branch: the branch name |
|
170 | 191 | - remoteheads: the list of remote heads known locally |
|
171 | 192 | None if the branch is new |
|
172 | 193 | - newheads: the new remote heads (known locally) with outgoing pushed |
|
173 | 194 | - unsyncedheads: the list of remote heads unknown locally. |
|
174 | 195 | """ |
|
175 | 196 | cl = repo.changelog |
|
176 | 197 | headssum = {} |
|
177 | 198 | # A. Create set of branches involved in the push. |
|
178 | 199 | branches = set(repo[n].branch() for n in outgoing.missing) |
|
179 | 200 | remotemap = remote.branchmap() |
|
180 | 201 | newbranches = branches - set(remotemap) |
|
181 | 202 | branches.difference_update(newbranches) |
|
182 | 203 | |
|
183 | 204 | # A. register remote heads |
|
184 | 205 | remotebranches = set() |
|
185 | 206 | for branch, heads in remote.branchmap().iteritems(): |
|
186 | 207 | remotebranches.add(branch) |
|
187 | 208 | known = [] |
|
188 | 209 | unsynced = [] |
|
189 | 210 | knownnode = cl.hasnode # do not use nodemap until it is filtered |
|
190 | 211 | for h in heads: |
|
191 | 212 | if knownnode(h): |
|
192 | 213 | known.append(h) |
|
193 | 214 | else: |
|
194 | 215 | unsynced.append(h) |
|
195 | 216 | headssum[branch] = (known, list(known), unsynced) |
|
196 | 217 | # B. add new branch data |
|
197 | 218 | missingctx = list(repo[n] for n in outgoing.missing) |
|
198 | 219 | touchedbranches = set() |
|
199 | 220 | for ctx in missingctx: |
|
200 | 221 | branch = ctx.branch() |
|
201 | 222 | touchedbranches.add(branch) |
|
202 | 223 | if branch not in headssum: |
|
203 | 224 | headssum[branch] = (None, [], []) |
|
204 | 225 | |
|
205 | 226 | # C drop data about untouched branches: |
|
206 | 227 | for branch in remotebranches - touchedbranches: |
|
207 | 228 | del headssum[branch] |
|
208 | 229 | |
|
209 | 230 | # D. Update newmap with outgoing changes. |
|
210 | 231 | # This will possibly add new heads and remove existing ones. |
|
211 | 232 | newmap = branchmap.branchcache((branch, heads[1]) |
|
212 | 233 | for branch, heads in headssum.iteritems() |
|
213 | 234 | if heads[0] is not None) |
|
214 | 235 | newmap.update(repo, (ctx.rev() for ctx in missingctx)) |
|
215 | 236 | for branch, newheads in newmap.iteritems(): |
|
216 | 237 | headssum[branch][1][:] = newheads |
|
217 | 238 | return headssum |
|
218 | 239 | |
|
219 | 240 | def _oldheadssummary(repo, remoteheads, outgoing, inc=False): |
|
220 | 241 | """Compute branchmapsummary for repo without branchmap support""" |
|
221 | 242 | |
|
222 | 243 | # 1-4b. old servers: Check for new topological heads. |
|
223 | 244 | # Construct {old,new}map with branch = None (topological branch). |
|
224 | 245 | # (code based on update) |
|
225 | 246 | knownnode = repo.changelog.hasnode # no nodemap until it is filtered |
|
226 | 247 | oldheads = set(h for h in remoteheads if knownnode(h)) |
|
227 | 248 | # all nodes in outgoing.missing are children of either: |
|
228 | 249 | # - an element of oldheads |
|
229 | 250 | # - another element of outgoing.missing |
|
230 | 251 | # - nullrev |
|
231 | 252 | # This explains why the new head are very simple to compute. |
|
232 | 253 | r = repo.set('heads(%ln + %ln)', oldheads, outgoing.missing) |
|
233 | 254 | newheads = list(c.node() for c in r) |
|
234 | 255 | # set some unsynced head to issue the "unsynced changes" warning |
|
235 | 256 | if inc: |
|
236 | 257 | unsynced = set([None]) |
|
237 | 258 | else: |
|
238 | 259 | unsynced = set() |
|
239 | 260 | return {None: (oldheads, newheads, unsynced)} |
|
240 | 261 | |
|
241 | 262 | def _nowarnheads(pushop): |
|
242 | 263 | # Compute newly pushed bookmarks. We don't warn about bookmarked heads. |
|
243 | 264 | repo = pushop.repo.unfiltered() |
|
244 | 265 | remote = pushop.remote |
|
245 | 266 | localbookmarks = repo._bookmarks |
|
246 | 267 | remotebookmarks = remote.listkeys('bookmarks') |
|
247 | 268 | bookmarkedheads = set() |
|
248 | 269 | |
|
249 | 270 | # internal config: bookmarks.pushing |
|
250 | 271 | newbookmarks = [localbookmarks.expandname(b) |
|
251 | 272 | for b in pushop.ui.configlist('bookmarks', 'pushing')] |
|
252 | 273 | |
|
253 | 274 | for bm in localbookmarks: |
|
254 | 275 | rnode = remotebookmarks.get(bm) |
|
255 | 276 | if rnode and rnode in repo: |
|
256 | 277 | lctx, rctx = repo[bm], repo[rnode] |
|
257 | 278 | if bookmarks.validdest(repo, rctx, lctx): |
|
258 | 279 | bookmarkedheads.add(lctx.node()) |
|
259 | 280 | else: |
|
260 | 281 | if bm in newbookmarks and bm not in remotebookmarks: |
|
261 | 282 | bookmarkedheads.add(repo[bm].node()) |
|
262 | 283 | |
|
263 | 284 | return bookmarkedheads |
|
264 | 285 | |
|
265 | 286 | def checkheads(pushop): |
|
266 | 287 | """Check that a push won't add any outgoing head |
|
267 | 288 | |
|
268 | 289 | raise Abort error and display ui message as needed. |
|
269 | 290 | """ |
|
270 | 291 | |
|
271 | 292 | repo = pushop.repo.unfiltered() |
|
272 | 293 | remote = pushop.remote |
|
273 | 294 | outgoing = pushop.outgoing |
|
274 | 295 | remoteheads = pushop.remoteheads |
|
275 | 296 | newbranch = pushop.newbranch |
|
276 | 297 | inc = bool(pushop.incoming) |
|
277 | 298 | |
|
278 | 299 | # Check for each named branch if we're creating new remote heads. |
|
279 | 300 | # To be a remote head after push, node must be either: |
|
280 | 301 | # - unknown locally |
|
281 | 302 | # - a local outgoing head descended from update |
|
282 | 303 | # - a remote head that's known locally and not |
|
283 | 304 | # ancestral to an outgoing head |
|
284 | 305 | if remoteheads == [nullid]: |
|
285 | 306 | # remote is empty, nothing to check. |
|
286 | 307 | return |
|
287 | 308 | |
|
288 | 309 | if remote.capable('branchmap'): |
|
289 | 310 | headssum = _headssummary(repo, remote, outgoing) |
|
290 | 311 | else: |
|
291 | 312 | headssum = _oldheadssummary(repo, remoteheads, outgoing, inc) |
|
292 | 313 | newbranches = [branch for branch, heads in headssum.iteritems() |
|
293 | 314 | if heads[0] is None] |
|
294 | 315 | # 1. Check for new branches on the remote. |
|
295 | 316 | if newbranches and not newbranch: # new branch requires --new-branch |
|
296 | 317 | branchnames = ', '.join(sorted(newbranches)) |
|
297 | 318 | raise error.Abort(_("push creates new remote branches: %s!") |
|
298 | 319 | % branchnames, |
|
299 | 320 | hint=_("use 'hg push --new-branch' to create" |
|
300 | 321 | " new remote branches")) |
|
301 | 322 | |
|
302 | 323 | # 2. Find heads that we need not warn about |
|
303 | 324 | nowarnheads = _nowarnheads(pushop) |
|
304 | 325 | |
|
305 | 326 | # 3. Check for new heads. |
|
306 | 327 | # If there are more heads after the push than before, a suitable |
|
307 | 328 | # error message, depending on unsynced status, is displayed. |
|
308 | 329 | errormsg = None |
|
309 | 330 | # If there is no obsstore, allfuturecommon won't be used, so no |
|
310 | 331 | # need to compute it. |
|
311 | 332 | if repo.obsstore: |
|
312 | 333 | allmissing = set(outgoing.missing) |
|
313 | 334 | cctx = repo.set('%ld', outgoing.common) |
|
314 | 335 | allfuturecommon = set(c.node() for c in cctx) |
|
315 | 336 | allfuturecommon.update(allmissing) |
|
316 | 337 | for branch, heads in sorted(headssum.iteritems()): |
|
317 | 338 | remoteheads, newheads, unsyncedheads = heads |
|
318 | 339 | candidate_newhs = set(newheads) |
|
319 | 340 | # add unsynced data |
|
320 | 341 | if remoteheads is None: |
|
321 | 342 | oldhs = set() |
|
322 | 343 | else: |
|
323 | 344 | oldhs = set(remoteheads) |
|
324 | 345 | oldhs.update(unsyncedheads) |
|
325 | 346 | candidate_newhs.update(unsyncedheads) |
|
326 | 347 | dhs = None # delta heads, the new heads on branch |
|
327 | 348 | discardedheads = set() |
|
328 | 349 | if not repo.obsstore: |
|
329 | 350 | newhs = candidate_newhs |
|
330 | 351 | else: |
|
331 | 352 | # remove future heads which are actually obsoleted by another |
|
332 | 353 | # pushed element: |
|
333 | 354 | # |
|
334 | 355 | # XXX as above, There are several cases this code does not handle |
|
335 | 356 | # XXX properly |
|
336 | 357 | # |
|
337 | 358 | # (1) if <nh> is public, it won't be affected by obsolete marker |
|
338 | 359 | # and a new is created |
|
339 | 360 | # |
|
340 | 361 | # (2) if the new heads have ancestors which are not obsolete and |
|
341 | 362 | # not ancestors of any other heads we will have a new head too. |
|
342 | 363 | # |
|
343 | 364 | # These two cases will be easy to handle for known changeset but |
|
344 | 365 | # much more tricky for unsynced changes. |
|
345 | 366 | # |
|
346 | 367 | # In addition, this code is confused by prune as it only looks for |
|
347 | 368 | # successors of the heads (none if pruned) leading to issue4354 |
|
348 | 369 | newhs = set() |
|
349 | 370 | for nh in candidate_newhs: |
|
350 | 371 | if nh in repo and repo[nh].phase() <= phases.public: |
|
351 | 372 | newhs.add(nh) |
|
352 | 373 | else: |
|
353 | 374 | for suc in obsolete.allsuccessors(repo.obsstore, [nh]): |
|
354 | 375 | if suc != nh and suc in allfuturecommon: |
|
355 | 376 | discardedheads.add(nh) |
|
356 | 377 | break |
|
357 | 378 | else: |
|
358 | 379 | newhs.add(nh) |
|
359 | 380 | unsynced = sorted(h for h in unsyncedheads if h not in discardedheads) |
|
360 | 381 | if unsynced: |
|
361 | 382 | if None in unsynced: |
|
362 | 383 | # old remote, no heads data |
|
363 | 384 | heads = None |
|
364 | 385 | elif len(unsynced) <= 4 or repo.ui.verbose: |
|
365 | 386 | heads = ' '.join(short(h) for h in unsynced) |
|
366 | 387 | else: |
|
367 | 388 | heads = (' '.join(short(h) for h in unsynced[:4]) + |
|
368 | 389 | ' ' + _("and %s others") % (len(unsynced) - 4)) |
|
369 | 390 | if heads is None: |
|
370 | 391 | repo.ui.status(_("remote has heads that are " |
|
371 | 392 | "not known locally\n")) |
|
372 | 393 | elif branch is None: |
|
373 | 394 | repo.ui.status(_("remote has heads that are " |
|
374 | 395 | "not known locally: %s\n") % heads) |
|
375 | 396 | else: |
|
376 | 397 | repo.ui.status(_("remote has heads on branch '%s' that are " |
|
377 | 398 | "not known locally: %s\n") % (branch, heads)) |
|
378 | 399 | if remoteheads is None: |
|
379 | 400 | if len(newhs) > 1: |
|
380 | 401 | dhs = list(newhs) |
|
381 | 402 | if errormsg is None: |
|
382 | 403 | errormsg = (_("push creates new branch '%s' " |
|
383 | 404 | "with multiple heads") % (branch)) |
|
384 | 405 | hint = _("merge or" |
|
385 | 406 | " see \"hg help push\" for details about" |
|
386 | 407 | " pushing new heads") |
|
387 | 408 | elif len(newhs) > len(oldhs): |
|
388 | 409 | # remove bookmarked or existing remote heads from the new heads list |
|
389 | 410 | dhs = sorted(newhs - nowarnheads - oldhs) |
|
390 | 411 | if dhs: |
|
391 | 412 | if errormsg is None: |
|
392 | 413 | if branch not in ('default', None): |
|
393 | 414 | errormsg = _("push creates new remote head %s " |
|
394 | 415 | "on branch '%s'!") % (short(dhs[0]), branch) |
|
395 | 416 | elif repo[dhs[0]].bookmarks(): |
|
396 | 417 | errormsg = _("push creates new remote head %s " |
|
397 | 418 | "with bookmark '%s'!") % ( |
|
398 | 419 | short(dhs[0]), repo[dhs[0]].bookmarks()[0]) |
|
399 | 420 | else: |
|
400 | 421 | errormsg = _("push creates new remote head %s!" |
|
401 | 422 | ) % short(dhs[0]) |
|
402 | 423 | if unsyncedheads: |
|
403 | 424 | hint = _("pull and merge or" |
|
404 | 425 | " see \"hg help push\" for details about" |
|
405 | 426 | " pushing new heads") |
|
406 | 427 | else: |
|
407 | 428 | hint = _("merge or" |
|
408 | 429 | " see \"hg help push\" for details about" |
|
409 | 430 | " pushing new heads") |
|
410 | 431 | if branch is None: |
|
411 | 432 | repo.ui.note(_("new remote heads:\n")) |
|
412 | 433 | else: |
|
413 | 434 | repo.ui.note(_("new remote heads on branch '%s':\n") % branch) |
|
414 | 435 | for h in dhs: |
|
415 | 436 | repo.ui.note((" %s\n") % short(h)) |
|
416 | 437 | if errormsg: |
|
417 | 438 | raise error.Abort(errormsg, hint=hint) |
General Comments 0
You need to be logged in to leave comments.
Login now