Show More
@@ -1,730 +1,730 b'' | |||
|
1 | 1 | # hg.py - hg backend for convert extension |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | # Notes for hg->hg conversion: |
|
9 | 9 | # |
|
10 | 10 | # * Old versions of Mercurial didn't trim the whitespace from the ends |
|
11 | 11 | # of commit messages, but new versions do. Changesets created by |
|
12 | 12 | # those older versions, then converted, may thus have different |
|
13 | 13 | # hashes for changesets that are otherwise identical. |
|
14 | 14 | # |
|
15 | 15 | # * Using "--config convert.hg.saverev=true" will make the source |
|
16 | 16 | # identifier to be stored in the converted revision. This will cause |
|
17 | 17 | # the converted revision to have a different identity than the |
|
18 | 18 | # source. |
|
19 | 19 | from __future__ import absolute_import |
|
20 | 20 | |
|
21 | 21 | import os |
|
22 | 22 | import re |
|
23 | 23 | import time |
|
24 | 24 | |
|
25 | 25 | from mercurial.i18n import _ |
|
26 | 26 | from mercurial.pycompat import open |
|
27 | 27 | from mercurial import ( |
|
28 | 28 | bookmarks, |
|
29 | 29 | context, |
|
30 | 30 | error, |
|
31 | 31 | exchange, |
|
32 | 32 | hg, |
|
33 | 33 | lock as lockmod, |
|
34 | 34 | merge as mergemod, |
|
35 | 35 | node as nodemod, |
|
36 | 36 | phases, |
|
37 | 37 | pycompat, |
|
38 | 38 | scmutil, |
|
39 | 39 | util, |
|
40 | 40 | ) |
|
41 | 41 | from mercurial.utils import dateutil |
|
42 | 42 | |
|
43 | 43 | stringio = util.stringio |
|
44 | 44 | |
|
45 | 45 | from . import common |
|
46 | 46 | |
|
47 | 47 | mapfile = common.mapfile |
|
48 | 48 | NoRepo = common.NoRepo |
|
49 | 49 | |
|
50 | 50 | sha1re = re.compile(br'\b[0-9a-f]{12,40}\b') |
|
51 | 51 | |
|
52 | 52 | |
|
53 | 53 | class mercurial_sink(common.converter_sink): |
|
54 | 54 | def __init__(self, ui, repotype, path): |
|
55 | 55 | common.converter_sink.__init__(self, ui, repotype, path) |
|
56 | 56 | self.branchnames = ui.configbool(b'convert', b'hg.usebranchnames') |
|
57 | 57 | self.clonebranches = ui.configbool(b'convert', b'hg.clonebranches') |
|
58 | 58 | self.tagsbranch = ui.config(b'convert', b'hg.tagsbranch') |
|
59 | 59 | self.lastbranch = None |
|
60 | 60 | if os.path.isdir(path) and len(os.listdir(path)) > 0: |
|
61 | 61 | try: |
|
62 | 62 | self.repo = hg.repository(self.ui, path) |
|
63 | 63 | if not self.repo.local(): |
|
64 | 64 | raise NoRepo( |
|
65 | 65 | _(b'%s is not a local Mercurial repository') % path |
|
66 | 66 | ) |
|
67 | 67 | except error.RepoError as err: |
|
68 | 68 | ui.traceback() |
|
69 | 69 | raise NoRepo(err.args[0]) |
|
70 | 70 | else: |
|
71 | 71 | try: |
|
72 | 72 | ui.status(_(b'initializing destination %s repository\n') % path) |
|
73 | 73 | self.repo = hg.repository(self.ui, path, create=True) |
|
74 | 74 | if not self.repo.local(): |
|
75 | 75 | raise NoRepo( |
|
76 | 76 | _(b'%s is not a local Mercurial repository') % path |
|
77 | 77 | ) |
|
78 | 78 | self.created.append(path) |
|
79 | 79 | except error.RepoError: |
|
80 | 80 | ui.traceback() |
|
81 | 81 | raise NoRepo( |
|
82 | 82 | _(b"could not create hg repository %s as sink") % path |
|
83 | 83 | ) |
|
84 | 84 | self.lock = None |
|
85 | 85 | self.wlock = None |
|
86 | 86 | self.filemapmode = False |
|
87 | 87 | self.subrevmaps = {} |
|
88 | 88 | |
|
89 | 89 | def before(self): |
|
90 | 90 | self.ui.debug(b'run hg sink pre-conversion action\n') |
|
91 | 91 | self.wlock = self.repo.wlock() |
|
92 | 92 | self.lock = self.repo.lock() |
|
93 | 93 | |
|
94 | 94 | def after(self): |
|
95 | 95 | self.ui.debug(b'run hg sink post-conversion action\n') |
|
96 | 96 | if self.lock: |
|
97 | 97 | self.lock.release() |
|
98 | 98 | if self.wlock: |
|
99 | 99 | self.wlock.release() |
|
100 | 100 | |
|
101 | 101 | def revmapfile(self): |
|
102 | 102 | return self.repo.vfs.join(b"shamap") |
|
103 | 103 | |
|
104 | 104 | def authorfile(self): |
|
105 | 105 | return self.repo.vfs.join(b"authormap") |
|
106 | 106 | |
|
107 | 107 | def setbranch(self, branch, pbranches): |
|
108 | 108 | if not self.clonebranches: |
|
109 | 109 | return |
|
110 | 110 | |
|
111 | 111 | setbranch = branch != self.lastbranch |
|
112 | 112 | self.lastbranch = branch |
|
113 | 113 | if not branch: |
|
114 | 114 | branch = b'default' |
|
115 | 115 | pbranches = [(b[0], b[1] and b[1] or b'default') for b in pbranches] |
|
116 | 116 | |
|
117 | 117 | branchpath = os.path.join(self.path, branch) |
|
118 | 118 | if setbranch: |
|
119 | 119 | self.after() |
|
120 | 120 | try: |
|
121 | 121 | self.repo = hg.repository(self.ui, branchpath) |
|
122 | 122 | except Exception: |
|
123 | 123 | self.repo = hg.repository(self.ui, branchpath, create=True) |
|
124 | 124 | self.before() |
|
125 | 125 | |
|
126 | 126 | # pbranches may bring revisions from other branches (merge parents) |
|
127 | 127 | # Make sure we have them, or pull them. |
|
128 | 128 | missings = {} |
|
129 | 129 | for b in pbranches: |
|
130 | 130 | try: |
|
131 | 131 | self.repo.lookup(b[0]) |
|
132 | 132 | except Exception: |
|
133 | 133 | missings.setdefault(b[1], []).append(b[0]) |
|
134 | 134 | |
|
135 | 135 | if missings: |
|
136 | 136 | self.after() |
|
137 | 137 | for pbranch, heads in sorted(pycompat.iteritems(missings)): |
|
138 | 138 | pbranchpath = os.path.join(self.path, pbranch) |
|
139 | 139 | prepo = hg.peer(self.ui, {}, pbranchpath) |
|
140 | 140 | self.ui.note( |
|
141 | 141 | _(b'pulling from %s into %s\n') % (pbranch, branch) |
|
142 | 142 | ) |
|
143 | 143 | exchange.pull( |
|
144 | 144 | self.repo, prepo, [prepo.lookup(h) for h in heads] |
|
145 | 145 | ) |
|
146 | 146 | self.before() |
|
147 | 147 | |
|
148 | 148 | def _rewritetags(self, source, revmap, data): |
|
149 | 149 | fp = stringio() |
|
150 | 150 | for line in data.splitlines(): |
|
151 | 151 | s = line.split(b' ', 1) |
|
152 | 152 | if len(s) != 2: |
|
153 | 153 | self.ui.warn(_(b'invalid tag entry: "%s"\n') % line) |
|
154 | 154 | fp.write(b'%s\n' % line) # Bogus, but keep for hash stability |
|
155 | 155 | continue |
|
156 | 156 | revid = revmap.get(source.lookuprev(s[0])) |
|
157 | 157 | if not revid: |
|
158 | 158 | if s[0] == nodemod.nullhex: |
|
159 | 159 | revid = s[0] |
|
160 | 160 | else: |
|
161 | 161 | # missing, but keep for hash stability |
|
162 | 162 | self.ui.warn(_(b'missing tag entry: "%s"\n') % line) |
|
163 | 163 | fp.write(b'%s\n' % line) |
|
164 | 164 | continue |
|
165 | 165 | fp.write(b'%s %s\n' % (revid, s[1])) |
|
166 | 166 | return fp.getvalue() |
|
167 | 167 | |
|
168 | 168 | def _rewritesubstate(self, source, data): |
|
169 | 169 | fp = stringio() |
|
170 | 170 | for line in data.splitlines(): |
|
171 | 171 | s = line.split(b' ', 1) |
|
172 | 172 | if len(s) != 2: |
|
173 | 173 | continue |
|
174 | 174 | |
|
175 | 175 | revid = s[0] |
|
176 | 176 | subpath = s[1] |
|
177 | 177 | if revid != nodemod.nullhex: |
|
178 | 178 | revmap = self.subrevmaps.get(subpath) |
|
179 | 179 | if revmap is None: |
|
180 | 180 | revmap = mapfile( |
|
181 | 181 | self.ui, self.repo.wjoin(subpath, b'.hg/shamap') |
|
182 | 182 | ) |
|
183 | 183 | self.subrevmaps[subpath] = revmap |
|
184 | 184 | |
|
185 | 185 | # It is reasonable that one or more of the subrepos don't |
|
186 | 186 | # need to be converted, in which case they can be cloned |
|
187 | 187 | # into place instead of converted. Therefore, only warn |
|
188 | 188 | # once. |
|
189 | 189 | msg = _(b'no ".hgsubstate" updates will be made for "%s"\n') |
|
190 | 190 | if len(revmap) == 0: |
|
191 | 191 | sub = self.repo.wvfs.reljoin(subpath, b'.hg') |
|
192 | 192 | |
|
193 | 193 | if self.repo.wvfs.exists(sub): |
|
194 | 194 | self.ui.warn(msg % subpath) |
|
195 | 195 | |
|
196 | 196 | newid = revmap.get(revid) |
|
197 | 197 | if not newid: |
|
198 | 198 | if len(revmap) > 0: |
|
199 | 199 | self.ui.warn( |
|
200 | 200 | _(b"%s is missing from %s/.hg/shamap\n") |
|
201 | 201 | % (revid, subpath) |
|
202 | 202 | ) |
|
203 | 203 | else: |
|
204 | 204 | revid = newid |
|
205 | 205 | |
|
206 | 206 | fp.write(b'%s %s\n' % (revid, subpath)) |
|
207 | 207 | |
|
208 | 208 | return fp.getvalue() |
|
209 | 209 | |
|
210 | 210 | def _calculatemergedfiles(self, source, p1ctx, p2ctx): |
|
211 | 211 | """Calculates the files from p2 that we need to pull in when merging p1 |
|
212 | 212 | and p2, given that the merge is coming from the given source. |
|
213 | 213 | |
|
214 | 214 | This prevents us from losing files that only exist in the target p2 and |
|
215 | 215 | that don't come from the source repo (like if you're merging multiple |
|
216 | 216 | repositories together). |
|
217 | 217 | """ |
|
218 | 218 | anc = [p1ctx.ancestor(p2ctx)] |
|
219 | 219 | # Calculate what files are coming from p2 |
|
220 | 220 | # TODO: mresult.commitinfo might be able to get that info |
|
221 | 221 | mresult = mergemod.calculateupdates( |
|
222 | 222 | self.repo, |
|
223 | 223 | p1ctx, |
|
224 | 224 | p2ctx, |
|
225 | 225 | anc, |
|
226 | 226 | branchmerge=True, |
|
227 | 227 | force=True, |
|
228 | 228 | acceptremote=False, |
|
229 | 229 | followcopies=False, |
|
230 | 230 | ) |
|
231 | 231 | |
|
232 | 232 | for file, (action, info, msg) in mresult.filemap(): |
|
233 | 233 | if source.targetfilebelongstosource(file): |
|
234 | 234 | # If the file belongs to the source repo, ignore the p2 |
|
235 | 235 | # since it will be covered by the existing fileset. |
|
236 | 236 | continue |
|
237 | 237 | |
|
238 | 238 | # If the file requires actual merging, abort. We don't have enough |
|
239 | 239 | # context to resolve merges correctly. |
|
240 | 240 | if action in [b'm', b'dm', b'cd', b'dc']: |
|
241 | 241 | raise error.Abort( |
|
242 | 242 | _( |
|
243 | 243 | b"unable to convert merge commit " |
|
244 | 244 | b"since target parents do not merge cleanly (file " |
|
245 | 245 | b"%s, parents %s and %s)" |
|
246 | 246 | ) |
|
247 | 247 | % (file, p1ctx, p2ctx) |
|
248 | 248 | ) |
|
249 | 249 | elif action == b'k': |
|
250 | 250 | # 'keep' means nothing changed from p1 |
|
251 | 251 | continue |
|
252 | 252 | else: |
|
253 | 253 | # Any other change means we want to take the p2 version |
|
254 | 254 | yield file |
|
255 | 255 | |
|
256 | 256 | def putcommit( |
|
257 | 257 | self, files, copies, parents, commit, source, revmap, full, cleanp2 |
|
258 | 258 | ): |
|
259 | 259 | files = dict(files) |
|
260 | 260 | |
|
261 | 261 | def getfilectx(repo, memctx, f): |
|
262 | 262 | if p2ctx and f in p2files and f not in copies: |
|
263 | 263 | self.ui.debug(b'reusing %s from p2\n' % f) |
|
264 | 264 | try: |
|
265 | 265 | return p2ctx[f] |
|
266 | 266 | except error.ManifestLookupError: |
|
267 | 267 | # If the file doesn't exist in p2, then we're syncing a |
|
268 | 268 | # delete, so just return None. |
|
269 | 269 | return None |
|
270 | 270 | try: |
|
271 | 271 | v = files[f] |
|
272 | 272 | except KeyError: |
|
273 | 273 | return None |
|
274 | 274 | data, mode = source.getfile(f, v) |
|
275 | 275 | if data is None: |
|
276 | 276 | return None |
|
277 | 277 | if f == b'.hgtags': |
|
278 | 278 | data = self._rewritetags(source, revmap, data) |
|
279 | 279 | if f == b'.hgsubstate': |
|
280 | 280 | data = self._rewritesubstate(source, data) |
|
281 | 281 | return context.memfilectx( |
|
282 | 282 | self.repo, |
|
283 | 283 | memctx, |
|
284 | 284 | f, |
|
285 | 285 | data, |
|
286 | 286 | b'l' in mode, |
|
287 | 287 | b'x' in mode, |
|
288 | 288 | copies.get(f), |
|
289 | 289 | ) |
|
290 | 290 | |
|
291 | 291 | pl = [] |
|
292 | 292 | for p in parents: |
|
293 | 293 | if p not in pl: |
|
294 | 294 | pl.append(p) |
|
295 | 295 | parents = pl |
|
296 | 296 | nparents = len(parents) |
|
297 | 297 | if self.filemapmode and nparents == 1: |
|
298 | 298 | m1node = self.repo.changelog.read(nodemod.bin(parents[0]))[0] |
|
299 | 299 | parent = parents[0] |
|
300 | 300 | |
|
301 | 301 | if len(parents) < 2: |
|
302 | 302 | parents.append(nodemod.nullid) |
|
303 | 303 | if len(parents) < 2: |
|
304 | 304 | parents.append(nodemod.nullid) |
|
305 | 305 | p2 = parents.pop(0) |
|
306 | 306 | |
|
307 | 307 | text = commit.desc |
|
308 | 308 | |
|
309 | 309 | sha1s = re.findall(sha1re, text) |
|
310 | 310 | for sha1 in sha1s: |
|
311 | 311 | oldrev = source.lookuprev(sha1) |
|
312 | 312 | newrev = revmap.get(oldrev) |
|
313 | 313 | if newrev is not None: |
|
314 | 314 | text = text.replace(sha1, newrev[: len(sha1)]) |
|
315 | 315 | |
|
316 | 316 | extra = commit.extra.copy() |
|
317 | 317 | |
|
318 | 318 | sourcename = self.repo.ui.config(b'convert', b'hg.sourcename') |
|
319 | 319 | if sourcename: |
|
320 | 320 | extra[b'convert_source'] = sourcename |
|
321 | 321 | |
|
322 | 322 | for label in ( |
|
323 | 323 | b'source', |
|
324 | 324 | b'transplant_source', |
|
325 | 325 | b'rebase_source', |
|
326 | 326 | b'intermediate-source', |
|
327 | 327 | ): |
|
328 | 328 | node = extra.get(label) |
|
329 | 329 | |
|
330 | 330 | if node is None: |
|
331 | 331 | continue |
|
332 | 332 | |
|
333 | 333 | # Only transplant stores its reference in binary |
|
334 | 334 | if label == b'transplant_source': |
|
335 | 335 | node = nodemod.hex(node) |
|
336 | 336 | |
|
337 | 337 | newrev = revmap.get(node) |
|
338 | 338 | if newrev is not None: |
|
339 | 339 | if label == b'transplant_source': |
|
340 | 340 | newrev = nodemod.bin(newrev) |
|
341 | 341 | |
|
342 | 342 | extra[label] = newrev |
|
343 | 343 | |
|
344 | 344 | if self.branchnames and commit.branch: |
|
345 | 345 | extra[b'branch'] = commit.branch |
|
346 | 346 | if commit.rev and commit.saverev: |
|
347 | 347 | extra[b'convert_revision'] = commit.rev |
|
348 | 348 | |
|
349 | 349 | while parents: |
|
350 | 350 | p1 = p2 |
|
351 | 351 | p2 = parents.pop(0) |
|
352 | 352 | p1ctx = self.repo[p1] |
|
353 | 353 | p2ctx = None |
|
354 | 354 | if p2 != nodemod.nullid: |
|
355 | 355 | p2ctx = self.repo[p2] |
|
356 | 356 | fileset = set(files) |
|
357 | 357 | if full: |
|
358 | 358 | fileset.update(self.repo[p1]) |
|
359 | 359 | fileset.update(self.repo[p2]) |
|
360 | 360 | |
|
361 | 361 | if p2ctx: |
|
362 | 362 | p2files = set(cleanp2) |
|
363 | 363 | for file in self._calculatemergedfiles(source, p1ctx, p2ctx): |
|
364 | 364 | p2files.add(file) |
|
365 | 365 | fileset.add(file) |
|
366 | 366 | |
|
367 | 367 | ctx = context.memctx( |
|
368 | 368 | self.repo, |
|
369 | 369 | (p1, p2), |
|
370 | 370 | text, |
|
371 | 371 | fileset, |
|
372 | 372 | getfilectx, |
|
373 | 373 | commit.author, |
|
374 | 374 | commit.date, |
|
375 | 375 | extra, |
|
376 | 376 | ) |
|
377 | 377 | |
|
378 | 378 | # We won't know if the conversion changes the node until after the |
|
379 | 379 | # commit, so copy the source's phase for now. |
|
380 | 380 | self.repo.ui.setconfig( |
|
381 | 381 | b'phases', |
|
382 | 382 | b'new-commit', |
|
383 | 383 | phases.phasenames[commit.phase], |
|
384 | 384 | b'convert', |
|
385 | 385 | ) |
|
386 | 386 | |
|
387 | 387 | with self.repo.transaction(b"convert") as tr: |
|
388 | 388 | if self.repo.ui.config(b'convert', b'hg.preserve-hash'): |
|
389 | 389 | origctx = commit.ctx |
|
390 | 390 | else: |
|
391 | 391 | origctx = None |
|
392 | 392 | node = nodemod.hex(self.repo.commitctx(ctx, origctx=origctx)) |
|
393 | 393 | |
|
394 | 394 | # If the node value has changed, but the phase is lower than |
|
395 | 395 | # draft, set it back to draft since it hasn't been exposed |
|
396 | 396 | # anywhere. |
|
397 | 397 | if commit.rev != node: |
|
398 | 398 | ctx = self.repo[node] |
|
399 | 399 | if ctx.phase() < phases.draft: |
|
400 | 400 | phases.registernew( |
|
401 |
self.repo, tr, phases.draft, [ctx. |
|
|
401 | self.repo, tr, phases.draft, [ctx.rev()] | |
|
402 | 402 | ) |
|
403 | 403 | |
|
404 | 404 | text = b"(octopus merge fixup)\n" |
|
405 | 405 | p2 = node |
|
406 | 406 | |
|
407 | 407 | if self.filemapmode and nparents == 1: |
|
408 | 408 | man = self.repo.manifestlog.getstorage(b'') |
|
409 | 409 | mnode = self.repo.changelog.read(nodemod.bin(p2))[0] |
|
410 | 410 | closed = b'close' in commit.extra |
|
411 | 411 | if not closed and not man.cmp(m1node, man.revision(mnode)): |
|
412 | 412 | self.ui.status(_(b"filtering out empty revision\n")) |
|
413 | 413 | self.repo.rollback(force=True) |
|
414 | 414 | return parent |
|
415 | 415 | return p2 |
|
416 | 416 | |
|
417 | 417 | def puttags(self, tags): |
|
418 | 418 | tagparent = self.repo.branchtip(self.tagsbranch, ignoremissing=True) |
|
419 | 419 | tagparent = tagparent or nodemod.nullid |
|
420 | 420 | |
|
421 | 421 | oldlines = set() |
|
422 | 422 | for branch, heads in pycompat.iteritems(self.repo.branchmap()): |
|
423 | 423 | for h in heads: |
|
424 | 424 | if b'.hgtags' in self.repo[h]: |
|
425 | 425 | oldlines.update( |
|
426 | 426 | set(self.repo[h][b'.hgtags'].data().splitlines(True)) |
|
427 | 427 | ) |
|
428 | 428 | oldlines = sorted(list(oldlines)) |
|
429 | 429 | |
|
430 | 430 | newlines = sorted([(b"%s %s\n" % (tags[tag], tag)) for tag in tags]) |
|
431 | 431 | if newlines == oldlines: |
|
432 | 432 | return None, None |
|
433 | 433 | |
|
434 | 434 | # if the old and new tags match, then there is nothing to update |
|
435 | 435 | oldtags = set() |
|
436 | 436 | newtags = set() |
|
437 | 437 | for line in oldlines: |
|
438 | 438 | s = line.strip().split(b' ', 1) |
|
439 | 439 | if len(s) != 2: |
|
440 | 440 | continue |
|
441 | 441 | oldtags.add(s[1]) |
|
442 | 442 | for line in newlines: |
|
443 | 443 | s = line.strip().split(b' ', 1) |
|
444 | 444 | if len(s) != 2: |
|
445 | 445 | continue |
|
446 | 446 | if s[1] not in oldtags: |
|
447 | 447 | newtags.add(s[1].strip()) |
|
448 | 448 | |
|
449 | 449 | if not newtags: |
|
450 | 450 | return None, None |
|
451 | 451 | |
|
452 | 452 | data = b"".join(newlines) |
|
453 | 453 | |
|
454 | 454 | def getfilectx(repo, memctx, f): |
|
455 | 455 | return context.memfilectx(repo, memctx, f, data, False, False, None) |
|
456 | 456 | |
|
457 | 457 | self.ui.status(_(b"updating tags\n")) |
|
458 | 458 | date = b"%d 0" % int(time.mktime(time.gmtime())) |
|
459 | 459 | extra = {b'branch': self.tagsbranch} |
|
460 | 460 | ctx = context.memctx( |
|
461 | 461 | self.repo, |
|
462 | 462 | (tagparent, None), |
|
463 | 463 | b"update tags", |
|
464 | 464 | [b".hgtags"], |
|
465 | 465 | getfilectx, |
|
466 | 466 | b"convert-repo", |
|
467 | 467 | date, |
|
468 | 468 | extra, |
|
469 | 469 | ) |
|
470 | 470 | node = self.repo.commitctx(ctx) |
|
471 | 471 | return nodemod.hex(node), nodemod.hex(tagparent) |
|
472 | 472 | |
|
473 | 473 | def setfilemapmode(self, active): |
|
474 | 474 | self.filemapmode = active |
|
475 | 475 | |
|
476 | 476 | def putbookmarks(self, updatedbookmark): |
|
477 | 477 | if not len(updatedbookmark): |
|
478 | 478 | return |
|
479 | 479 | wlock = lock = tr = None |
|
480 | 480 | try: |
|
481 | 481 | wlock = self.repo.wlock() |
|
482 | 482 | lock = self.repo.lock() |
|
483 | 483 | tr = self.repo.transaction(b'bookmark') |
|
484 | 484 | self.ui.status(_(b"updating bookmarks\n")) |
|
485 | 485 | destmarks = self.repo._bookmarks |
|
486 | 486 | changes = [ |
|
487 | 487 | (bookmark, nodemod.bin(updatedbookmark[bookmark])) |
|
488 | 488 | for bookmark in updatedbookmark |
|
489 | 489 | ] |
|
490 | 490 | destmarks.applychanges(self.repo, tr, changes) |
|
491 | 491 | tr.close() |
|
492 | 492 | finally: |
|
493 | 493 | lockmod.release(lock, wlock, tr) |
|
494 | 494 | |
|
495 | 495 | def hascommitfrommap(self, rev): |
|
496 | 496 | # the exact semantics of clonebranches is unclear so we can't say no |
|
497 | 497 | return rev in self.repo or self.clonebranches |
|
498 | 498 | |
|
499 | 499 | def hascommitforsplicemap(self, rev): |
|
500 | 500 | if rev not in self.repo and self.clonebranches: |
|
501 | 501 | raise error.Abort( |
|
502 | 502 | _( |
|
503 | 503 | b'revision %s not found in destination ' |
|
504 | 504 | b'repository (lookups with clonebranches=true ' |
|
505 | 505 | b'are not implemented)' |
|
506 | 506 | ) |
|
507 | 507 | % rev |
|
508 | 508 | ) |
|
509 | 509 | return rev in self.repo |
|
510 | 510 | |
|
511 | 511 | |
|
512 | 512 | class mercurial_source(common.converter_source): |
|
513 | 513 | def __init__(self, ui, repotype, path, revs=None): |
|
514 | 514 | common.converter_source.__init__(self, ui, repotype, path, revs) |
|
515 | 515 | self.ignoreerrors = ui.configbool(b'convert', b'hg.ignoreerrors') |
|
516 | 516 | self.ignored = set() |
|
517 | 517 | self.saverev = ui.configbool(b'convert', b'hg.saverev') |
|
518 | 518 | try: |
|
519 | 519 | self.repo = hg.repository(self.ui, path) |
|
520 | 520 | # try to provoke an exception if this isn't really a hg |
|
521 | 521 | # repo, but some other bogus compatible-looking url |
|
522 | 522 | if not self.repo.local(): |
|
523 | 523 | raise error.RepoError |
|
524 | 524 | except error.RepoError: |
|
525 | 525 | ui.traceback() |
|
526 | 526 | raise NoRepo(_(b"%s is not a local Mercurial repository") % path) |
|
527 | 527 | self.lastrev = None |
|
528 | 528 | self.lastctx = None |
|
529 | 529 | self._changescache = None, None |
|
530 | 530 | self.convertfp = None |
|
531 | 531 | # Restrict converted revisions to startrev descendants |
|
532 | 532 | startnode = ui.config(b'convert', b'hg.startrev') |
|
533 | 533 | hgrevs = ui.config(b'convert', b'hg.revs') |
|
534 | 534 | if hgrevs is None: |
|
535 | 535 | if startnode is not None: |
|
536 | 536 | try: |
|
537 | 537 | startnode = self.repo.lookup(startnode) |
|
538 | 538 | except error.RepoError: |
|
539 | 539 | raise error.Abort( |
|
540 | 540 | _(b'%s is not a valid start revision') % startnode |
|
541 | 541 | ) |
|
542 | 542 | startrev = self.repo.changelog.rev(startnode) |
|
543 | 543 | children = {startnode: 1} |
|
544 | 544 | for r in self.repo.changelog.descendants([startrev]): |
|
545 | 545 | children[self.repo.changelog.node(r)] = 1 |
|
546 | 546 | self.keep = children.__contains__ |
|
547 | 547 | else: |
|
548 | 548 | self.keep = util.always |
|
549 | 549 | if revs: |
|
550 | 550 | self._heads = [self.repo.lookup(r) for r in revs] |
|
551 | 551 | else: |
|
552 | 552 | self._heads = self.repo.heads() |
|
553 | 553 | else: |
|
554 | 554 | if revs or startnode is not None: |
|
555 | 555 | raise error.Abort( |
|
556 | 556 | _( |
|
557 | 557 | b'hg.revs cannot be combined with ' |
|
558 | 558 | b'hg.startrev or --rev' |
|
559 | 559 | ) |
|
560 | 560 | ) |
|
561 | 561 | nodes = set() |
|
562 | 562 | parents = set() |
|
563 | 563 | for r in scmutil.revrange(self.repo, [hgrevs]): |
|
564 | 564 | ctx = self.repo[r] |
|
565 | 565 | nodes.add(ctx.node()) |
|
566 | 566 | parents.update(p.node() for p in ctx.parents()) |
|
567 | 567 | self.keep = nodes.__contains__ |
|
568 | 568 | self._heads = nodes - parents |
|
569 | 569 | |
|
570 | 570 | def _changectx(self, rev): |
|
571 | 571 | if self.lastrev != rev: |
|
572 | 572 | self.lastctx = self.repo[rev] |
|
573 | 573 | self.lastrev = rev |
|
574 | 574 | return self.lastctx |
|
575 | 575 | |
|
576 | 576 | def _parents(self, ctx): |
|
577 | 577 | return [p for p in ctx.parents() if p and self.keep(p.node())] |
|
578 | 578 | |
|
579 | 579 | def getheads(self): |
|
580 | 580 | return [nodemod.hex(h) for h in self._heads if self.keep(h)] |
|
581 | 581 | |
|
582 | 582 | def getfile(self, name, rev): |
|
583 | 583 | try: |
|
584 | 584 | fctx = self._changectx(rev)[name] |
|
585 | 585 | return fctx.data(), fctx.flags() |
|
586 | 586 | except error.LookupError: |
|
587 | 587 | return None, None |
|
588 | 588 | |
|
589 | 589 | def _changedfiles(self, ctx1, ctx2): |
|
590 | 590 | ma, r = [], [] |
|
591 | 591 | maappend = ma.append |
|
592 | 592 | rappend = r.append |
|
593 | 593 | d = ctx1.manifest().diff(ctx2.manifest()) |
|
594 | 594 | for f, ((node1, flag1), (node2, flag2)) in pycompat.iteritems(d): |
|
595 | 595 | if node2 is None: |
|
596 | 596 | rappend(f) |
|
597 | 597 | else: |
|
598 | 598 | maappend(f) |
|
599 | 599 | return ma, r |
|
600 | 600 | |
|
601 | 601 | def getchanges(self, rev, full): |
|
602 | 602 | ctx = self._changectx(rev) |
|
603 | 603 | parents = self._parents(ctx) |
|
604 | 604 | if full or not parents: |
|
605 | 605 | files = copyfiles = ctx.manifest() |
|
606 | 606 | if parents: |
|
607 | 607 | if self._changescache[0] == rev: |
|
608 | 608 | ma, r = self._changescache[1] |
|
609 | 609 | else: |
|
610 | 610 | ma, r = self._changedfiles(parents[0], ctx) |
|
611 | 611 | if not full: |
|
612 | 612 | files = ma + r |
|
613 | 613 | copyfiles = ma |
|
614 | 614 | # _getcopies() is also run for roots and before filtering so missing |
|
615 | 615 | # revlogs are detected early |
|
616 | 616 | copies = self._getcopies(ctx, parents, copyfiles) |
|
617 | 617 | cleanp2 = set() |
|
618 | 618 | if len(parents) == 2: |
|
619 | 619 | d = parents[1].manifest().diff(ctx.manifest(), clean=True) |
|
620 | 620 | for f, value in pycompat.iteritems(d): |
|
621 | 621 | if value is None: |
|
622 | 622 | cleanp2.add(f) |
|
623 | 623 | changes = [(f, rev) for f in files if f not in self.ignored] |
|
624 | 624 | changes.sort() |
|
625 | 625 | return changes, copies, cleanp2 |
|
626 | 626 | |
|
627 | 627 | def _getcopies(self, ctx, parents, files): |
|
628 | 628 | copies = {} |
|
629 | 629 | for name in files: |
|
630 | 630 | if name in self.ignored: |
|
631 | 631 | continue |
|
632 | 632 | try: |
|
633 | 633 | copysource = ctx.filectx(name).copysource() |
|
634 | 634 | if copysource in self.ignored: |
|
635 | 635 | continue |
|
636 | 636 | # Ignore copy sources not in parent revisions |
|
637 | 637 | if not any(copysource in p for p in parents): |
|
638 | 638 | continue |
|
639 | 639 | copies[name] = copysource |
|
640 | 640 | except TypeError: |
|
641 | 641 | pass |
|
642 | 642 | except error.LookupError as e: |
|
643 | 643 | if not self.ignoreerrors: |
|
644 | 644 | raise |
|
645 | 645 | self.ignored.add(name) |
|
646 | 646 | self.ui.warn(_(b'ignoring: %s\n') % e) |
|
647 | 647 | return copies |
|
648 | 648 | |
|
649 | 649 | def getcommit(self, rev): |
|
650 | 650 | ctx = self._changectx(rev) |
|
651 | 651 | _parents = self._parents(ctx) |
|
652 | 652 | parents = [p.hex() for p in _parents] |
|
653 | 653 | optparents = [p.hex() for p in ctx.parents() if p and p not in _parents] |
|
654 | 654 | crev = rev |
|
655 | 655 | |
|
656 | 656 | return common.commit( |
|
657 | 657 | author=ctx.user(), |
|
658 | 658 | date=dateutil.datestr(ctx.date(), b'%Y-%m-%d %H:%M:%S %1%2'), |
|
659 | 659 | desc=ctx.description(), |
|
660 | 660 | rev=crev, |
|
661 | 661 | parents=parents, |
|
662 | 662 | optparents=optparents, |
|
663 | 663 | branch=ctx.branch(), |
|
664 | 664 | extra=ctx.extra(), |
|
665 | 665 | sortkey=ctx.rev(), |
|
666 | 666 | saverev=self.saverev, |
|
667 | 667 | phase=ctx.phase(), |
|
668 | 668 | ctx=ctx, |
|
669 | 669 | ) |
|
670 | 670 | |
|
671 | 671 | def numcommits(self): |
|
672 | 672 | return len(self.repo) |
|
673 | 673 | |
|
674 | 674 | def gettags(self): |
|
675 | 675 | # This will get written to .hgtags, filter non global tags out. |
|
676 | 676 | tags = [ |
|
677 | 677 | t |
|
678 | 678 | for t in self.repo.tagslist() |
|
679 | 679 | if self.repo.tagtype(t[0]) == b'global' |
|
680 | 680 | ] |
|
681 | 681 | return { |
|
682 | 682 | name: nodemod.hex(node) for name, node in tags if self.keep(node) |
|
683 | 683 | } |
|
684 | 684 | |
|
685 | 685 | def getchangedfiles(self, rev, i): |
|
686 | 686 | ctx = self._changectx(rev) |
|
687 | 687 | parents = self._parents(ctx) |
|
688 | 688 | if not parents and i is None: |
|
689 | 689 | i = 0 |
|
690 | 690 | ma, r = ctx.manifest().keys(), [] |
|
691 | 691 | else: |
|
692 | 692 | i = i or 0 |
|
693 | 693 | ma, r = self._changedfiles(parents[i], ctx) |
|
694 | 694 | ma, r = [[f for f in l if f not in self.ignored] for l in (ma, r)] |
|
695 | 695 | |
|
696 | 696 | if i == 0: |
|
697 | 697 | self._changescache = (rev, (ma, r)) |
|
698 | 698 | |
|
699 | 699 | return ma + r |
|
700 | 700 | |
|
701 | 701 | def converted(self, rev, destrev): |
|
702 | 702 | if self.convertfp is None: |
|
703 | 703 | self.convertfp = open(self.repo.vfs.join(b'shamap'), b'ab') |
|
704 | 704 | self.convertfp.write(util.tonativeeol(b'%s %s\n' % (destrev, rev))) |
|
705 | 705 | self.convertfp.flush() |
|
706 | 706 | |
|
707 | 707 | def before(self): |
|
708 | 708 | self.ui.debug(b'run hg source pre-conversion action\n') |
|
709 | 709 | |
|
710 | 710 | def after(self): |
|
711 | 711 | self.ui.debug(b'run hg source post-conversion action\n') |
|
712 | 712 | |
|
713 | 713 | def hasnativeorder(self): |
|
714 | 714 | return True |
|
715 | 715 | |
|
716 | 716 | def hasnativeclose(self): |
|
717 | 717 | return True |
|
718 | 718 | |
|
719 | 719 | def lookuprev(self, rev): |
|
720 | 720 | try: |
|
721 | 721 | return nodemod.hex(self.repo.lookup(rev)) |
|
722 | 722 | except (error.RepoError, error.LookupError): |
|
723 | 723 | return None |
|
724 | 724 | |
|
725 | 725 | def getbookmarks(self): |
|
726 | 726 | return bookmarks.listbookmarks(self.repo) |
|
727 | 727 | |
|
728 | 728 | def checkrevformat(self, revstr, mapname=b'splicemap'): |
|
729 | 729 | """ Mercurial, revision string is a 40 byte hex """ |
|
730 | 730 | self.checkhexformat(revstr, mapname) |
@@ -1,1703 +1,1703 b'' | |||
|
1 | 1 | # changegroup.py - Mercurial changegroup manipulation functions |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2006 Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import os |
|
11 | 11 | import struct |
|
12 | 12 | import weakref |
|
13 | 13 | |
|
14 | 14 | from .i18n import _ |
|
15 | 15 | from .node import ( |
|
16 | 16 | hex, |
|
17 | 17 | nullid, |
|
18 | 18 | nullrev, |
|
19 | 19 | short, |
|
20 | 20 | ) |
|
21 | 21 | from .pycompat import open |
|
22 | 22 | |
|
23 | 23 | from . import ( |
|
24 | 24 | error, |
|
25 | 25 | match as matchmod, |
|
26 | 26 | mdiff, |
|
27 | 27 | phases, |
|
28 | 28 | pycompat, |
|
29 | 29 | requirements, |
|
30 | 30 | scmutil, |
|
31 | 31 | util, |
|
32 | 32 | ) |
|
33 | 33 | |
|
34 | 34 | from .interfaces import repository |
|
35 | 35 | |
|
36 | 36 | _CHANGEGROUPV1_DELTA_HEADER = struct.Struct(b"20s20s20s20s") |
|
37 | 37 | _CHANGEGROUPV2_DELTA_HEADER = struct.Struct(b"20s20s20s20s20s") |
|
38 | 38 | _CHANGEGROUPV3_DELTA_HEADER = struct.Struct(b">20s20s20s20s20sH") |
|
39 | 39 | |
|
40 | 40 | LFS_REQUIREMENT = b'lfs' |
|
41 | 41 | |
|
42 | 42 | readexactly = util.readexactly |
|
43 | 43 | |
|
44 | 44 | |
|
45 | 45 | def getchunk(stream): |
|
46 | 46 | """return the next chunk from stream as a string""" |
|
47 | 47 | d = readexactly(stream, 4) |
|
48 | 48 | l = struct.unpack(b">l", d)[0] |
|
49 | 49 | if l <= 4: |
|
50 | 50 | if l: |
|
51 | 51 | raise error.Abort(_(b"invalid chunk length %d") % l) |
|
52 | 52 | return b"" |
|
53 | 53 | return readexactly(stream, l - 4) |
|
54 | 54 | |
|
55 | 55 | |
|
56 | 56 | def chunkheader(length): |
|
57 | 57 | """return a changegroup chunk header (string)""" |
|
58 | 58 | return struct.pack(b">l", length + 4) |
|
59 | 59 | |
|
60 | 60 | |
|
61 | 61 | def closechunk(): |
|
62 | 62 | """return a changegroup chunk header (string) for a zero-length chunk""" |
|
63 | 63 | return struct.pack(b">l", 0) |
|
64 | 64 | |
|
65 | 65 | |
|
66 | 66 | def _fileheader(path): |
|
67 | 67 | """Obtain a changegroup chunk header for a named path.""" |
|
68 | 68 | return chunkheader(len(path)) + path |
|
69 | 69 | |
|
70 | 70 | |
|
71 | 71 | def writechunks(ui, chunks, filename, vfs=None): |
|
72 | 72 | """Write chunks to a file and return its filename. |
|
73 | 73 | |
|
74 | 74 | The stream is assumed to be a bundle file. |
|
75 | 75 | Existing files will not be overwritten. |
|
76 | 76 | If no filename is specified, a temporary file is created. |
|
77 | 77 | """ |
|
78 | 78 | fh = None |
|
79 | 79 | cleanup = None |
|
80 | 80 | try: |
|
81 | 81 | if filename: |
|
82 | 82 | if vfs: |
|
83 | 83 | fh = vfs.open(filename, b"wb") |
|
84 | 84 | else: |
|
85 | 85 | # Increase default buffer size because default is usually |
|
86 | 86 | # small (4k is common on Linux). |
|
87 | 87 | fh = open(filename, b"wb", 131072) |
|
88 | 88 | else: |
|
89 | 89 | fd, filename = pycompat.mkstemp(prefix=b"hg-bundle-", suffix=b".hg") |
|
90 | 90 | fh = os.fdopen(fd, "wb") |
|
91 | 91 | cleanup = filename |
|
92 | 92 | for c in chunks: |
|
93 | 93 | fh.write(c) |
|
94 | 94 | cleanup = None |
|
95 | 95 | return filename |
|
96 | 96 | finally: |
|
97 | 97 | if fh is not None: |
|
98 | 98 | fh.close() |
|
99 | 99 | if cleanup is not None: |
|
100 | 100 | if filename and vfs: |
|
101 | 101 | vfs.unlink(cleanup) |
|
102 | 102 | else: |
|
103 | 103 | os.unlink(cleanup) |
|
104 | 104 | |
|
105 | 105 | |
|
106 | 106 | class cg1unpacker(object): |
|
107 | 107 | """Unpacker for cg1 changegroup streams. |
|
108 | 108 | |
|
109 | 109 | A changegroup unpacker handles the framing of the revision data in |
|
110 | 110 | the wire format. Most consumers will want to use the apply() |
|
111 | 111 | method to add the changes from the changegroup to a repository. |
|
112 | 112 | |
|
113 | 113 | If you're forwarding a changegroup unmodified to another consumer, |
|
114 | 114 | use getchunks(), which returns an iterator of changegroup |
|
115 | 115 | chunks. This is mostly useful for cases where you need to know the |
|
116 | 116 | data stream has ended by observing the end of the changegroup. |
|
117 | 117 | |
|
118 | 118 | deltachunk() is useful only if you're applying delta data. Most |
|
119 | 119 | consumers should prefer apply() instead. |
|
120 | 120 | |
|
121 | 121 | A few other public methods exist. Those are used only for |
|
122 | 122 | bundlerepo and some debug commands - their use is discouraged. |
|
123 | 123 | """ |
|
124 | 124 | |
|
125 | 125 | deltaheader = _CHANGEGROUPV1_DELTA_HEADER |
|
126 | 126 | deltaheadersize = deltaheader.size |
|
127 | 127 | version = b'01' |
|
128 | 128 | _grouplistcount = 1 # One list of files after the manifests |
|
129 | 129 | |
|
130 | 130 | def __init__(self, fh, alg, extras=None): |
|
131 | 131 | if alg is None: |
|
132 | 132 | alg = b'UN' |
|
133 | 133 | if alg not in util.compengines.supportedbundletypes: |
|
134 | 134 | raise error.Abort(_(b'unknown stream compression type: %s') % alg) |
|
135 | 135 | if alg == b'BZ': |
|
136 | 136 | alg = b'_truncatedBZ' |
|
137 | 137 | |
|
138 | 138 | compengine = util.compengines.forbundletype(alg) |
|
139 | 139 | self._stream = compengine.decompressorreader(fh) |
|
140 | 140 | self._type = alg |
|
141 | 141 | self.extras = extras or {} |
|
142 | 142 | self.callback = None |
|
143 | 143 | |
|
144 | 144 | # These methods (compressed, read, seek, tell) all appear to only |
|
145 | 145 | # be used by bundlerepo, but it's a little hard to tell. |
|
146 | 146 | def compressed(self): |
|
147 | 147 | return self._type is not None and self._type != b'UN' |
|
148 | 148 | |
|
149 | 149 | def read(self, l): |
|
150 | 150 | return self._stream.read(l) |
|
151 | 151 | |
|
152 | 152 | def seek(self, pos): |
|
153 | 153 | return self._stream.seek(pos) |
|
154 | 154 | |
|
155 | 155 | def tell(self): |
|
156 | 156 | return self._stream.tell() |
|
157 | 157 | |
|
158 | 158 | def close(self): |
|
159 | 159 | return self._stream.close() |
|
160 | 160 | |
|
161 | 161 | def _chunklength(self): |
|
162 | 162 | d = readexactly(self._stream, 4) |
|
163 | 163 | l = struct.unpack(b">l", d)[0] |
|
164 | 164 | if l <= 4: |
|
165 | 165 | if l: |
|
166 | 166 | raise error.Abort(_(b"invalid chunk length %d") % l) |
|
167 | 167 | return 0 |
|
168 | 168 | if self.callback: |
|
169 | 169 | self.callback() |
|
170 | 170 | return l - 4 |
|
171 | 171 | |
|
172 | 172 | def changelogheader(self): |
|
173 | 173 | """v10 does not have a changelog header chunk""" |
|
174 | 174 | return {} |
|
175 | 175 | |
|
176 | 176 | def manifestheader(self): |
|
177 | 177 | """v10 does not have a manifest header chunk""" |
|
178 | 178 | return {} |
|
179 | 179 | |
|
180 | 180 | def filelogheader(self): |
|
181 | 181 | """return the header of the filelogs chunk, v10 only has the filename""" |
|
182 | 182 | l = self._chunklength() |
|
183 | 183 | if not l: |
|
184 | 184 | return {} |
|
185 | 185 | fname = readexactly(self._stream, l) |
|
186 | 186 | return {b'filename': fname} |
|
187 | 187 | |
|
188 | 188 | def _deltaheader(self, headertuple, prevnode): |
|
189 | 189 | node, p1, p2, cs = headertuple |
|
190 | 190 | if prevnode is None: |
|
191 | 191 | deltabase = p1 |
|
192 | 192 | else: |
|
193 | 193 | deltabase = prevnode |
|
194 | 194 | flags = 0 |
|
195 | 195 | return node, p1, p2, deltabase, cs, flags |
|
196 | 196 | |
|
197 | 197 | def deltachunk(self, prevnode): |
|
198 | 198 | l = self._chunklength() |
|
199 | 199 | if not l: |
|
200 | 200 | return {} |
|
201 | 201 | headerdata = readexactly(self._stream, self.deltaheadersize) |
|
202 | 202 | header = self.deltaheader.unpack(headerdata) |
|
203 | 203 | delta = readexactly(self._stream, l - self.deltaheadersize) |
|
204 | 204 | node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode) |
|
205 | 205 | return (node, p1, p2, cs, deltabase, delta, flags) |
|
206 | 206 | |
|
207 | 207 | def getchunks(self): |
|
208 | 208 | """returns all the chunks contains in the bundle |
|
209 | 209 | |
|
210 | 210 | Used when you need to forward the binary stream to a file or another |
|
211 | 211 | network API. To do so, it parse the changegroup data, otherwise it will |
|
212 | 212 | block in case of sshrepo because it don't know the end of the stream. |
|
213 | 213 | """ |
|
214 | 214 | # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog, |
|
215 | 215 | # and a list of filelogs. For changegroup 3, we expect 4 parts: |
|
216 | 216 | # changelog, manifestlog, a list of tree manifestlogs, and a list of |
|
217 | 217 | # filelogs. |
|
218 | 218 | # |
|
219 | 219 | # Changelog and manifestlog parts are terminated with empty chunks. The |
|
220 | 220 | # tree and file parts are a list of entry sections. Each entry section |
|
221 | 221 | # is a series of chunks terminating in an empty chunk. The list of these |
|
222 | 222 | # entry sections is terminated in yet another empty chunk, so we know |
|
223 | 223 | # we've reached the end of the tree/file list when we reach an empty |
|
224 | 224 | # chunk that was proceeded by no non-empty chunks. |
|
225 | 225 | |
|
226 | 226 | parts = 0 |
|
227 | 227 | while parts < 2 + self._grouplistcount: |
|
228 | 228 | noentries = True |
|
229 | 229 | while True: |
|
230 | 230 | chunk = getchunk(self) |
|
231 | 231 | if not chunk: |
|
232 | 232 | # The first two empty chunks represent the end of the |
|
233 | 233 | # changelog and the manifestlog portions. The remaining |
|
234 | 234 | # empty chunks represent either A) the end of individual |
|
235 | 235 | # tree or file entries in the file list, or B) the end of |
|
236 | 236 | # the entire list. It's the end of the entire list if there |
|
237 | 237 | # were no entries (i.e. noentries is True). |
|
238 | 238 | if parts < 2: |
|
239 | 239 | parts += 1 |
|
240 | 240 | elif noentries: |
|
241 | 241 | parts += 1 |
|
242 | 242 | break |
|
243 | 243 | noentries = False |
|
244 | 244 | yield chunkheader(len(chunk)) |
|
245 | 245 | pos = 0 |
|
246 | 246 | while pos < len(chunk): |
|
247 | 247 | next = pos + 2 ** 20 |
|
248 | 248 | yield chunk[pos:next] |
|
249 | 249 | pos = next |
|
250 | 250 | yield closechunk() |
|
251 | 251 | |
|
252 | 252 | def _unpackmanifests(self, repo, revmap, trp, prog): |
|
253 | 253 | self.callback = prog.increment |
|
254 | 254 | # no need to check for empty manifest group here: |
|
255 | 255 | # if the result of the merge of 1 and 2 is the same in 3 and 4, |
|
256 | 256 | # no new manifest will be created and the manifest group will |
|
257 | 257 | # be empty during the pull |
|
258 | 258 | self.manifestheader() |
|
259 | 259 | deltas = self.deltaiter() |
|
260 | 260 | repo.manifestlog.getstorage(b'').addgroup(deltas, revmap, trp) |
|
261 | 261 | prog.complete() |
|
262 | 262 | self.callback = None |
|
263 | 263 | |
|
264 | 264 | def apply( |
|
265 | 265 | self, |
|
266 | 266 | repo, |
|
267 | 267 | tr, |
|
268 | 268 | srctype, |
|
269 | 269 | url, |
|
270 | 270 | targetphase=phases.draft, |
|
271 | 271 | expectedtotal=None, |
|
272 | 272 | ): |
|
273 | 273 | """Add the changegroup returned by source.read() to this repo. |
|
274 | 274 | srctype is a string like 'push', 'pull', or 'unbundle'. url is |
|
275 | 275 | the URL of the repo where this changegroup is coming from. |
|
276 | 276 | |
|
277 | 277 | Return an integer summarizing the change to this repo: |
|
278 | 278 | - nothing changed or no source: 0 |
|
279 | 279 | - more heads than before: 1+added heads (2..n) |
|
280 | 280 | - fewer heads than before: -1-removed heads (-2..-n) |
|
281 | 281 | - number of heads stays the same: 1 |
|
282 | 282 | """ |
|
283 | 283 | repo = repo.unfiltered() |
|
284 | 284 | |
|
285 | 285 | def csmap(x): |
|
286 | 286 | repo.ui.debug(b"add changeset %s\n" % short(x)) |
|
287 | 287 | return len(cl) |
|
288 | 288 | |
|
289 | 289 | def revmap(x): |
|
290 | 290 | return cl.rev(x) |
|
291 | 291 | |
|
292 | 292 | try: |
|
293 | 293 | # The transaction may already carry source information. In this |
|
294 | 294 | # case we use the top level data. We overwrite the argument |
|
295 | 295 | # because we need to use the top level value (if they exist) |
|
296 | 296 | # in this function. |
|
297 | 297 | srctype = tr.hookargs.setdefault(b'source', srctype) |
|
298 | 298 | tr.hookargs.setdefault(b'url', url) |
|
299 | 299 | repo.hook( |
|
300 | 300 | b'prechangegroup', throw=True, **pycompat.strkwargs(tr.hookargs) |
|
301 | 301 | ) |
|
302 | 302 | |
|
303 | 303 | # write changelog data to temp files so concurrent readers |
|
304 | 304 | # will not see an inconsistent view |
|
305 | 305 | cl = repo.changelog |
|
306 | 306 | cl.delayupdate(tr) |
|
307 | 307 | oldheads = set(cl.heads()) |
|
308 | 308 | |
|
309 | 309 | trp = weakref.proxy(tr) |
|
310 | 310 | # pull off the changeset group |
|
311 | 311 | repo.ui.status(_(b"adding changesets\n")) |
|
312 | 312 | clstart = len(cl) |
|
313 | 313 | progress = repo.ui.makeprogress( |
|
314 | 314 | _(b'changesets'), unit=_(b'chunks'), total=expectedtotal |
|
315 | 315 | ) |
|
316 | 316 | self.callback = progress.increment |
|
317 | 317 | |
|
318 | 318 | efilesset = set() |
|
319 | 319 | cgnodes = [] |
|
320 | 320 | |
|
321 | 321 | def ondupchangelog(cl, node): |
|
322 | 322 | if cl.rev(node) < clstart: |
|
323 | 323 | cgnodes.append(node) |
|
324 | 324 | |
|
325 | 325 | def onchangelog(cl, node): |
|
326 | 326 | efilesset.update(cl.readfiles(node)) |
|
327 | 327 | |
|
328 | 328 | self.changelogheader() |
|
329 | 329 | deltas = self.deltaiter() |
|
330 | 330 | if not cl.addgroup( |
|
331 | 331 | deltas, |
|
332 | 332 | csmap, |
|
333 | 333 | trp, |
|
334 | 334 | addrevisioncb=onchangelog, |
|
335 | 335 | duplicaterevisioncb=ondupchangelog, |
|
336 | 336 | ): |
|
337 | 337 | repo.ui.develwarn( |
|
338 | 338 | b'applied empty changelog from changegroup', |
|
339 | 339 | config=b'warn-empty-changegroup', |
|
340 | 340 | ) |
|
341 | 341 | efiles = len(efilesset) |
|
342 | 342 | clend = len(cl) |
|
343 | 343 | changesets = clend - clstart |
|
344 | 344 | progress.complete() |
|
345 | 345 | del deltas |
|
346 | 346 | # TODO Python 2.7 removal |
|
347 | 347 | # del efilesset |
|
348 | 348 | efilesset = None |
|
349 | 349 | self.callback = None |
|
350 | 350 | |
|
351 | 351 | # pull off the manifest group |
|
352 | 352 | repo.ui.status(_(b"adding manifests\n")) |
|
353 | 353 | # We know that we'll never have more manifests than we had |
|
354 | 354 | # changesets. |
|
355 | 355 | progress = repo.ui.makeprogress( |
|
356 | 356 | _(b'manifests'), unit=_(b'chunks'), total=changesets |
|
357 | 357 | ) |
|
358 | 358 | self._unpackmanifests(repo, revmap, trp, progress) |
|
359 | 359 | |
|
360 | 360 | needfiles = {} |
|
361 | 361 | if repo.ui.configbool(b'server', b'validate'): |
|
362 | 362 | cl = repo.changelog |
|
363 | 363 | ml = repo.manifestlog |
|
364 | 364 | # validate incoming csets have their manifests |
|
365 | 365 | for cset in pycompat.xrange(clstart, clend): |
|
366 | 366 | mfnode = cl.changelogrevision(cset).manifest |
|
367 | 367 | mfest = ml[mfnode].readdelta() |
|
368 | 368 | # store file nodes we must see |
|
369 | 369 | for f, n in pycompat.iteritems(mfest): |
|
370 | 370 | needfiles.setdefault(f, set()).add(n) |
|
371 | 371 | |
|
372 | 372 | # process the files |
|
373 | 373 | repo.ui.status(_(b"adding file changes\n")) |
|
374 | 374 | newrevs, newfiles = _addchangegroupfiles( |
|
375 | 375 | repo, self, revmap, trp, efiles, needfiles |
|
376 | 376 | ) |
|
377 | 377 | |
|
378 | 378 | # making sure the value exists |
|
379 | 379 | tr.changes.setdefault(b'changegroup-count-changesets', 0) |
|
380 | 380 | tr.changes.setdefault(b'changegroup-count-revisions', 0) |
|
381 | 381 | tr.changes.setdefault(b'changegroup-count-files', 0) |
|
382 | 382 | tr.changes.setdefault(b'changegroup-count-heads', 0) |
|
383 | 383 | |
|
384 | 384 | # some code use bundle operation for internal purpose. They usually |
|
385 | 385 | # set `ui.quiet` to do this outside of user sight. Size the report |
|
386 | 386 | # of such operation now happens at the end of the transaction, that |
|
387 | 387 | # ui.quiet has not direct effect on the output. |
|
388 | 388 | # |
|
389 | 389 | # To preserve this intend use an inelegant hack, we fail to report |
|
390 | 390 | # the change if `quiet` is set. We should probably move to |
|
391 | 391 | # something better, but this is a good first step to allow the "end |
|
392 | 392 | # of transaction report" to pass tests. |
|
393 | 393 | if not repo.ui.quiet: |
|
394 | 394 | tr.changes[b'changegroup-count-changesets'] += changesets |
|
395 | 395 | tr.changes[b'changegroup-count-revisions'] += newrevs |
|
396 | 396 | tr.changes[b'changegroup-count-files'] += newfiles |
|
397 | 397 | |
|
398 | 398 | deltaheads = 0 |
|
399 | 399 | if oldheads: |
|
400 | 400 | heads = cl.heads() |
|
401 | 401 | deltaheads += len(heads) - len(oldheads) |
|
402 | 402 | for h in heads: |
|
403 | 403 | if h not in oldheads and repo[h].closesbranch(): |
|
404 | 404 | deltaheads -= 1 |
|
405 | 405 | |
|
406 | 406 | # see previous comment about checking ui.quiet |
|
407 | 407 | if not repo.ui.quiet: |
|
408 | 408 | tr.changes[b'changegroup-count-heads'] += deltaheads |
|
409 | 409 | repo.invalidatevolatilesets() |
|
410 | 410 | |
|
411 | 411 | if changesets > 0: |
|
412 | 412 | if b'node' not in tr.hookargs: |
|
413 | 413 | tr.hookargs[b'node'] = hex(cl.node(clstart)) |
|
414 | 414 | tr.hookargs[b'node_last'] = hex(cl.node(clend - 1)) |
|
415 | 415 | hookargs = dict(tr.hookargs) |
|
416 | 416 | else: |
|
417 | 417 | hookargs = dict(tr.hookargs) |
|
418 | 418 | hookargs[b'node'] = hex(cl.node(clstart)) |
|
419 | 419 | hookargs[b'node_last'] = hex(cl.node(clend - 1)) |
|
420 | 420 | repo.hook( |
|
421 | 421 | b'pretxnchangegroup', |
|
422 | 422 | throw=True, |
|
423 | 423 | **pycompat.strkwargs(hookargs) |
|
424 | 424 | ) |
|
425 | 425 | |
|
426 | 426 | added = pycompat.xrange(clstart, clend) |
|
427 | 427 | phaseall = None |
|
428 | 428 | if srctype in (b'push', b'serve'): |
|
429 | 429 | # Old servers can not push the boundary themselves. |
|
430 | 430 | # New servers won't push the boundary if changeset already |
|
431 | 431 | # exists locally as secret |
|
432 | 432 | # |
|
433 | 433 | # We should not use added here but the list of all change in |
|
434 | 434 | # the bundle |
|
435 | 435 | if repo.publishing(): |
|
436 | 436 | targetphase = phaseall = phases.public |
|
437 | 437 | else: |
|
438 | 438 | # closer target phase computation |
|
439 | 439 | |
|
440 | 440 | # Those changesets have been pushed from the |
|
441 | 441 | # outside, their phases are going to be pushed |
|
442 | 442 | # alongside. Therefor `targetphase` is |
|
443 | 443 | # ignored. |
|
444 | 444 | targetphase = phaseall = phases.draft |
|
445 | 445 | if added: |
|
446 |
phases.registernew(repo, tr, targetphase, |
|
|
446 | phases.registernew(repo, tr, targetphase, added) | |
|
447 | 447 | if phaseall is not None: |
|
448 | 448 | phases.advanceboundary(repo, tr, phaseall, cgnodes, revs=added) |
|
449 | 449 | cgnodes = [] |
|
450 | 450 | |
|
451 | 451 | if changesets > 0: |
|
452 | 452 | |
|
453 | 453 | def runhooks(unused_success): |
|
454 | 454 | # These hooks run when the lock releases, not when the |
|
455 | 455 | # transaction closes. So it's possible for the changelog |
|
456 | 456 | # to have changed since we last saw it. |
|
457 | 457 | if clstart >= len(repo): |
|
458 | 458 | return |
|
459 | 459 | |
|
460 | 460 | repo.hook(b"changegroup", **pycompat.strkwargs(hookargs)) |
|
461 | 461 | |
|
462 | 462 | for rev in added: |
|
463 | 463 | args = hookargs.copy() |
|
464 | 464 | args[b'node'] = hex(cl.node(rev)) |
|
465 | 465 | del args[b'node_last'] |
|
466 | 466 | repo.hook(b"incoming", **pycompat.strkwargs(args)) |
|
467 | 467 | |
|
468 | 468 | newheads = [h for h in repo.heads() if h not in oldheads] |
|
469 | 469 | repo.ui.log( |
|
470 | 470 | b"incoming", |
|
471 | 471 | b"%d incoming changes - new heads: %s\n", |
|
472 | 472 | len(added), |
|
473 | 473 | b', '.join([hex(c[:6]) for c in newheads]), |
|
474 | 474 | ) |
|
475 | 475 | |
|
476 | 476 | tr.addpostclose( |
|
477 | 477 | b'changegroup-runhooks-%020i' % clstart, |
|
478 | 478 | lambda tr: repo._afterlock(runhooks), |
|
479 | 479 | ) |
|
480 | 480 | finally: |
|
481 | 481 | repo.ui.flush() |
|
482 | 482 | # never return 0 here: |
|
483 | 483 | if deltaheads < 0: |
|
484 | 484 | ret = deltaheads - 1 |
|
485 | 485 | else: |
|
486 | 486 | ret = deltaheads + 1 |
|
487 | 487 | return ret |
|
488 | 488 | |
|
489 | 489 | def deltaiter(self): |
|
490 | 490 | """ |
|
491 | 491 | returns an iterator of the deltas in this changegroup |
|
492 | 492 | |
|
493 | 493 | Useful for passing to the underlying storage system to be stored. |
|
494 | 494 | """ |
|
495 | 495 | chain = None |
|
496 | 496 | for chunkdata in iter(lambda: self.deltachunk(chain), {}): |
|
497 | 497 | # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags) |
|
498 | 498 | yield chunkdata |
|
499 | 499 | chain = chunkdata[0] |
|
500 | 500 | |
|
501 | 501 | |
|
502 | 502 | class cg2unpacker(cg1unpacker): |
|
503 | 503 | """Unpacker for cg2 streams. |
|
504 | 504 | |
|
505 | 505 | cg2 streams add support for generaldelta, so the delta header |
|
506 | 506 | format is slightly different. All other features about the data |
|
507 | 507 | remain the same. |
|
508 | 508 | """ |
|
509 | 509 | |
|
510 | 510 | deltaheader = _CHANGEGROUPV2_DELTA_HEADER |
|
511 | 511 | deltaheadersize = deltaheader.size |
|
512 | 512 | version = b'02' |
|
513 | 513 | |
|
514 | 514 | def _deltaheader(self, headertuple, prevnode): |
|
515 | 515 | node, p1, p2, deltabase, cs = headertuple |
|
516 | 516 | flags = 0 |
|
517 | 517 | return node, p1, p2, deltabase, cs, flags |
|
518 | 518 | |
|
519 | 519 | |
|
520 | 520 | class cg3unpacker(cg2unpacker): |
|
521 | 521 | """Unpacker for cg3 streams. |
|
522 | 522 | |
|
523 | 523 | cg3 streams add support for exchanging treemanifests and revlog |
|
524 | 524 | flags. It adds the revlog flags to the delta header and an empty chunk |
|
525 | 525 | separating manifests and files. |
|
526 | 526 | """ |
|
527 | 527 | |
|
528 | 528 | deltaheader = _CHANGEGROUPV3_DELTA_HEADER |
|
529 | 529 | deltaheadersize = deltaheader.size |
|
530 | 530 | version = b'03' |
|
531 | 531 | _grouplistcount = 2 # One list of manifests and one list of files |
|
532 | 532 | |
|
533 | 533 | def _deltaheader(self, headertuple, prevnode): |
|
534 | 534 | node, p1, p2, deltabase, cs, flags = headertuple |
|
535 | 535 | return node, p1, p2, deltabase, cs, flags |
|
536 | 536 | |
|
537 | 537 | def _unpackmanifests(self, repo, revmap, trp, prog): |
|
538 | 538 | super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog) |
|
539 | 539 | for chunkdata in iter(self.filelogheader, {}): |
|
540 | 540 | # If we get here, there are directory manifests in the changegroup |
|
541 | 541 | d = chunkdata[b"filename"] |
|
542 | 542 | repo.ui.debug(b"adding %s revisions\n" % d) |
|
543 | 543 | deltas = self.deltaiter() |
|
544 | 544 | if not repo.manifestlog.getstorage(d).addgroup(deltas, revmap, trp): |
|
545 | 545 | raise error.Abort(_(b"received dir revlog group is empty")) |
|
546 | 546 | |
|
547 | 547 | |
|
548 | 548 | class headerlessfixup(object): |
|
549 | 549 | def __init__(self, fh, h): |
|
550 | 550 | self._h = h |
|
551 | 551 | self._fh = fh |
|
552 | 552 | |
|
553 | 553 | def read(self, n): |
|
554 | 554 | if self._h: |
|
555 | 555 | d, self._h = self._h[:n], self._h[n:] |
|
556 | 556 | if len(d) < n: |
|
557 | 557 | d += readexactly(self._fh, n - len(d)) |
|
558 | 558 | return d |
|
559 | 559 | return readexactly(self._fh, n) |
|
560 | 560 | |
|
561 | 561 | |
|
562 | 562 | def _revisiondeltatochunks(delta, headerfn): |
|
563 | 563 | """Serialize a revisiondelta to changegroup chunks.""" |
|
564 | 564 | |
|
565 | 565 | # The captured revision delta may be encoded as a delta against |
|
566 | 566 | # a base revision or as a full revision. The changegroup format |
|
567 | 567 | # requires that everything on the wire be deltas. So for full |
|
568 | 568 | # revisions, we need to invent a header that says to rewrite |
|
569 | 569 | # data. |
|
570 | 570 | |
|
571 | 571 | if delta.delta is not None: |
|
572 | 572 | prefix, data = b'', delta.delta |
|
573 | 573 | elif delta.basenode == nullid: |
|
574 | 574 | data = delta.revision |
|
575 | 575 | prefix = mdiff.trivialdiffheader(len(data)) |
|
576 | 576 | else: |
|
577 | 577 | data = delta.revision |
|
578 | 578 | prefix = mdiff.replacediffheader(delta.baserevisionsize, len(data)) |
|
579 | 579 | |
|
580 | 580 | meta = headerfn(delta) |
|
581 | 581 | |
|
582 | 582 | yield chunkheader(len(meta) + len(prefix) + len(data)) |
|
583 | 583 | yield meta |
|
584 | 584 | if prefix: |
|
585 | 585 | yield prefix |
|
586 | 586 | yield data |
|
587 | 587 | |
|
588 | 588 | |
|
589 | 589 | def _sortnodesellipsis(store, nodes, cl, lookup): |
|
590 | 590 | """Sort nodes for changegroup generation.""" |
|
591 | 591 | # Ellipses serving mode. |
|
592 | 592 | # |
|
593 | 593 | # In a perfect world, we'd generate better ellipsis-ified graphs |
|
594 | 594 | # for non-changelog revlogs. In practice, we haven't started doing |
|
595 | 595 | # that yet, so the resulting DAGs for the manifestlog and filelogs |
|
596 | 596 | # are actually full of bogus parentage on all the ellipsis |
|
597 | 597 | # nodes. This has the side effect that, while the contents are |
|
598 | 598 | # correct, the individual DAGs might be completely out of whack in |
|
599 | 599 | # a case like 882681bc3166 and its ancestors (back about 10 |
|
600 | 600 | # revisions or so) in the main hg repo. |
|
601 | 601 | # |
|
602 | 602 | # The one invariant we *know* holds is that the new (potentially |
|
603 | 603 | # bogus) DAG shape will be valid if we order the nodes in the |
|
604 | 604 | # order that they're introduced in dramatis personae by the |
|
605 | 605 | # changelog, so what we do is we sort the non-changelog histories |
|
606 | 606 | # by the order in which they are used by the changelog. |
|
607 | 607 | key = lambda n: cl.rev(lookup(n)) |
|
608 | 608 | return sorted(nodes, key=key) |
|
609 | 609 | |
|
610 | 610 | |
|
611 | 611 | def _resolvenarrowrevisioninfo( |
|
612 | 612 | cl, |
|
613 | 613 | store, |
|
614 | 614 | ischangelog, |
|
615 | 615 | rev, |
|
616 | 616 | linkrev, |
|
617 | 617 | linknode, |
|
618 | 618 | clrevtolocalrev, |
|
619 | 619 | fullclnodes, |
|
620 | 620 | precomputedellipsis, |
|
621 | 621 | ): |
|
622 | 622 | linkparents = precomputedellipsis[linkrev] |
|
623 | 623 | |
|
624 | 624 | def local(clrev): |
|
625 | 625 | """Turn a changelog revnum into a local revnum. |
|
626 | 626 | |
|
627 | 627 | The ellipsis dag is stored as revnums on the changelog, |
|
628 | 628 | but when we're producing ellipsis entries for |
|
629 | 629 | non-changelog revlogs, we need to turn those numbers into |
|
630 | 630 | something local. This does that for us, and during the |
|
631 | 631 | changelog sending phase will also expand the stored |
|
632 | 632 | mappings as needed. |
|
633 | 633 | """ |
|
634 | 634 | if clrev == nullrev: |
|
635 | 635 | return nullrev |
|
636 | 636 | |
|
637 | 637 | if ischangelog: |
|
638 | 638 | return clrev |
|
639 | 639 | |
|
640 | 640 | # Walk the ellipsis-ized changelog breadth-first looking for a |
|
641 | 641 | # change that has been linked from the current revlog. |
|
642 | 642 | # |
|
643 | 643 | # For a flat manifest revlog only a single step should be necessary |
|
644 | 644 | # as all relevant changelog entries are relevant to the flat |
|
645 | 645 | # manifest. |
|
646 | 646 | # |
|
647 | 647 | # For a filelog or tree manifest dirlog however not every changelog |
|
648 | 648 | # entry will have been relevant, so we need to skip some changelog |
|
649 | 649 | # nodes even after ellipsis-izing. |
|
650 | 650 | walk = [clrev] |
|
651 | 651 | while walk: |
|
652 | 652 | p = walk[0] |
|
653 | 653 | walk = walk[1:] |
|
654 | 654 | if p in clrevtolocalrev: |
|
655 | 655 | return clrevtolocalrev[p] |
|
656 | 656 | elif p in fullclnodes: |
|
657 | 657 | walk.extend([pp for pp in cl.parentrevs(p) if pp != nullrev]) |
|
658 | 658 | elif p in precomputedellipsis: |
|
659 | 659 | walk.extend( |
|
660 | 660 | [pp for pp in precomputedellipsis[p] if pp != nullrev] |
|
661 | 661 | ) |
|
662 | 662 | else: |
|
663 | 663 | # In this case, we've got an ellipsis with parents |
|
664 | 664 | # outside the current bundle (likely an |
|
665 | 665 | # incremental pull). We "know" that we can use the |
|
666 | 666 | # value of this same revlog at whatever revision |
|
667 | 667 | # is pointed to by linknode. "Know" is in scare |
|
668 | 668 | # quotes because I haven't done enough examination |
|
669 | 669 | # of edge cases to convince myself this is really |
|
670 | 670 | # a fact - it works for all the (admittedly |
|
671 | 671 | # thorough) cases in our testsuite, but I would be |
|
672 | 672 | # somewhat unsurprised to find a case in the wild |
|
673 | 673 | # where this breaks down a bit. That said, I don't |
|
674 | 674 | # know if it would hurt anything. |
|
675 | 675 | for i in pycompat.xrange(rev, 0, -1): |
|
676 | 676 | if store.linkrev(i) == clrev: |
|
677 | 677 | return i |
|
678 | 678 | # We failed to resolve a parent for this node, so |
|
679 | 679 | # we crash the changegroup construction. |
|
680 | 680 | raise error.Abort( |
|
681 | 681 | b'unable to resolve parent while packing %r %r' |
|
682 | 682 | b' for changeset %r' % (store.indexfile, rev, clrev) |
|
683 | 683 | ) |
|
684 | 684 | |
|
685 | 685 | return nullrev |
|
686 | 686 | |
|
687 | 687 | if not linkparents or (store.parentrevs(rev) == (nullrev, nullrev)): |
|
688 | 688 | p1, p2 = nullrev, nullrev |
|
689 | 689 | elif len(linkparents) == 1: |
|
690 | 690 | (p1,) = sorted(local(p) for p in linkparents) |
|
691 | 691 | p2 = nullrev |
|
692 | 692 | else: |
|
693 | 693 | p1, p2 = sorted(local(p) for p in linkparents) |
|
694 | 694 | |
|
695 | 695 | p1node, p2node = store.node(p1), store.node(p2) |
|
696 | 696 | |
|
697 | 697 | return p1node, p2node, linknode |
|
698 | 698 | |
|
699 | 699 | |
|
700 | 700 | def deltagroup( |
|
701 | 701 | repo, |
|
702 | 702 | store, |
|
703 | 703 | nodes, |
|
704 | 704 | ischangelog, |
|
705 | 705 | lookup, |
|
706 | 706 | forcedeltaparentprev, |
|
707 | 707 | topic=None, |
|
708 | 708 | ellipses=False, |
|
709 | 709 | clrevtolocalrev=None, |
|
710 | 710 | fullclnodes=None, |
|
711 | 711 | precomputedellipsis=None, |
|
712 | 712 | ): |
|
713 | 713 | """Calculate deltas for a set of revisions. |
|
714 | 714 | |
|
715 | 715 | Is a generator of ``revisiondelta`` instances. |
|
716 | 716 | |
|
717 | 717 | If topic is not None, progress detail will be generated using this |
|
718 | 718 | topic name (e.g. changesets, manifests, etc). |
|
719 | 719 | """ |
|
720 | 720 | if not nodes: |
|
721 | 721 | return |
|
722 | 722 | |
|
723 | 723 | cl = repo.changelog |
|
724 | 724 | |
|
725 | 725 | if ischangelog: |
|
726 | 726 | # `hg log` shows changesets in storage order. To preserve order |
|
727 | 727 | # across clones, send out changesets in storage order. |
|
728 | 728 | nodesorder = b'storage' |
|
729 | 729 | elif ellipses: |
|
730 | 730 | nodes = _sortnodesellipsis(store, nodes, cl, lookup) |
|
731 | 731 | nodesorder = b'nodes' |
|
732 | 732 | else: |
|
733 | 733 | nodesorder = None |
|
734 | 734 | |
|
735 | 735 | # Perform ellipses filtering and revision massaging. We do this before |
|
736 | 736 | # emitrevisions() because a) filtering out revisions creates less work |
|
737 | 737 | # for emitrevisions() b) dropping revisions would break emitrevisions()'s |
|
738 | 738 | # assumptions about delta choices and we would possibly send a delta |
|
739 | 739 | # referencing a missing base revision. |
|
740 | 740 | # |
|
741 | 741 | # Also, calling lookup() has side-effects with regards to populating |
|
742 | 742 | # data structures. If we don't call lookup() for each node or if we call |
|
743 | 743 | # lookup() after the first pass through each node, things can break - |
|
744 | 744 | # possibly intermittently depending on the python hash seed! For that |
|
745 | 745 | # reason, we store a mapping of all linknodes during the initial node |
|
746 | 746 | # pass rather than use lookup() on the output side. |
|
747 | 747 | if ellipses: |
|
748 | 748 | filtered = [] |
|
749 | 749 | adjustedparents = {} |
|
750 | 750 | linknodes = {} |
|
751 | 751 | |
|
752 | 752 | for node in nodes: |
|
753 | 753 | rev = store.rev(node) |
|
754 | 754 | linknode = lookup(node) |
|
755 | 755 | linkrev = cl.rev(linknode) |
|
756 | 756 | clrevtolocalrev[linkrev] = rev |
|
757 | 757 | |
|
758 | 758 | # If linknode is in fullclnodes, it means the corresponding |
|
759 | 759 | # changeset was a full changeset and is being sent unaltered. |
|
760 | 760 | if linknode in fullclnodes: |
|
761 | 761 | linknodes[node] = linknode |
|
762 | 762 | |
|
763 | 763 | # If the corresponding changeset wasn't in the set computed |
|
764 | 764 | # as relevant to us, it should be dropped outright. |
|
765 | 765 | elif linkrev not in precomputedellipsis: |
|
766 | 766 | continue |
|
767 | 767 | |
|
768 | 768 | else: |
|
769 | 769 | # We could probably do this later and avoid the dict |
|
770 | 770 | # holding state. But it likely doesn't matter. |
|
771 | 771 | p1node, p2node, linknode = _resolvenarrowrevisioninfo( |
|
772 | 772 | cl, |
|
773 | 773 | store, |
|
774 | 774 | ischangelog, |
|
775 | 775 | rev, |
|
776 | 776 | linkrev, |
|
777 | 777 | linknode, |
|
778 | 778 | clrevtolocalrev, |
|
779 | 779 | fullclnodes, |
|
780 | 780 | precomputedellipsis, |
|
781 | 781 | ) |
|
782 | 782 | |
|
783 | 783 | adjustedparents[node] = (p1node, p2node) |
|
784 | 784 | linknodes[node] = linknode |
|
785 | 785 | |
|
786 | 786 | filtered.append(node) |
|
787 | 787 | |
|
788 | 788 | nodes = filtered |
|
789 | 789 | |
|
790 | 790 | # We expect the first pass to be fast, so we only engage the progress |
|
791 | 791 | # meter for constructing the revision deltas. |
|
792 | 792 | progress = None |
|
793 | 793 | if topic is not None: |
|
794 | 794 | progress = repo.ui.makeprogress( |
|
795 | 795 | topic, unit=_(b'chunks'), total=len(nodes) |
|
796 | 796 | ) |
|
797 | 797 | |
|
798 | 798 | configtarget = repo.ui.config(b'devel', b'bundle.delta') |
|
799 | 799 | if configtarget not in (b'', b'p1', b'full'): |
|
800 | 800 | msg = _("""config "devel.bundle.delta" as unknown value: %s""") |
|
801 | 801 | repo.ui.warn(msg % configtarget) |
|
802 | 802 | |
|
803 | 803 | deltamode = repository.CG_DELTAMODE_STD |
|
804 | 804 | if forcedeltaparentprev: |
|
805 | 805 | deltamode = repository.CG_DELTAMODE_PREV |
|
806 | 806 | elif configtarget == b'p1': |
|
807 | 807 | deltamode = repository.CG_DELTAMODE_P1 |
|
808 | 808 | elif configtarget == b'full': |
|
809 | 809 | deltamode = repository.CG_DELTAMODE_FULL |
|
810 | 810 | |
|
811 | 811 | revisions = store.emitrevisions( |
|
812 | 812 | nodes, |
|
813 | 813 | nodesorder=nodesorder, |
|
814 | 814 | revisiondata=True, |
|
815 | 815 | assumehaveparentrevisions=not ellipses, |
|
816 | 816 | deltamode=deltamode, |
|
817 | 817 | ) |
|
818 | 818 | |
|
819 | 819 | for i, revision in enumerate(revisions): |
|
820 | 820 | if progress: |
|
821 | 821 | progress.update(i + 1) |
|
822 | 822 | |
|
823 | 823 | if ellipses: |
|
824 | 824 | linknode = linknodes[revision.node] |
|
825 | 825 | |
|
826 | 826 | if revision.node in adjustedparents: |
|
827 | 827 | p1node, p2node = adjustedparents[revision.node] |
|
828 | 828 | revision.p1node = p1node |
|
829 | 829 | revision.p2node = p2node |
|
830 | 830 | revision.flags |= repository.REVISION_FLAG_ELLIPSIS |
|
831 | 831 | |
|
832 | 832 | else: |
|
833 | 833 | linknode = lookup(revision.node) |
|
834 | 834 | |
|
835 | 835 | revision.linknode = linknode |
|
836 | 836 | yield revision |
|
837 | 837 | |
|
838 | 838 | if progress: |
|
839 | 839 | progress.complete() |
|
840 | 840 | |
|
841 | 841 | |
|
842 | 842 | class cgpacker(object): |
|
843 | 843 | def __init__( |
|
844 | 844 | self, |
|
845 | 845 | repo, |
|
846 | 846 | oldmatcher, |
|
847 | 847 | matcher, |
|
848 | 848 | version, |
|
849 | 849 | builddeltaheader, |
|
850 | 850 | manifestsend, |
|
851 | 851 | forcedeltaparentprev=False, |
|
852 | 852 | bundlecaps=None, |
|
853 | 853 | ellipses=False, |
|
854 | 854 | shallow=False, |
|
855 | 855 | ellipsisroots=None, |
|
856 | 856 | fullnodes=None, |
|
857 | 857 | ): |
|
858 | 858 | """Given a source repo, construct a bundler. |
|
859 | 859 | |
|
860 | 860 | oldmatcher is a matcher that matches on files the client already has. |
|
861 | 861 | These will not be included in the changegroup. |
|
862 | 862 | |
|
863 | 863 | matcher is a matcher that matches on files to include in the |
|
864 | 864 | changegroup. Used to facilitate sparse changegroups. |
|
865 | 865 | |
|
866 | 866 | forcedeltaparentprev indicates whether delta parents must be against |
|
867 | 867 | the previous revision in a delta group. This should only be used for |
|
868 | 868 | compatibility with changegroup version 1. |
|
869 | 869 | |
|
870 | 870 | builddeltaheader is a callable that constructs the header for a group |
|
871 | 871 | delta. |
|
872 | 872 | |
|
873 | 873 | manifestsend is a chunk to send after manifests have been fully emitted. |
|
874 | 874 | |
|
875 | 875 | ellipses indicates whether ellipsis serving mode is enabled. |
|
876 | 876 | |
|
877 | 877 | bundlecaps is optional and can be used to specify the set of |
|
878 | 878 | capabilities which can be used to build the bundle. While bundlecaps is |
|
879 | 879 | unused in core Mercurial, extensions rely on this feature to communicate |
|
880 | 880 | capabilities to customize the changegroup packer. |
|
881 | 881 | |
|
882 | 882 | shallow indicates whether shallow data might be sent. The packer may |
|
883 | 883 | need to pack file contents not introduced by the changes being packed. |
|
884 | 884 | |
|
885 | 885 | fullnodes is the set of changelog nodes which should not be ellipsis |
|
886 | 886 | nodes. We store this rather than the set of nodes that should be |
|
887 | 887 | ellipsis because for very large histories we expect this to be |
|
888 | 888 | significantly smaller. |
|
889 | 889 | """ |
|
890 | 890 | assert oldmatcher |
|
891 | 891 | assert matcher |
|
892 | 892 | self._oldmatcher = oldmatcher |
|
893 | 893 | self._matcher = matcher |
|
894 | 894 | |
|
895 | 895 | self.version = version |
|
896 | 896 | self._forcedeltaparentprev = forcedeltaparentprev |
|
897 | 897 | self._builddeltaheader = builddeltaheader |
|
898 | 898 | self._manifestsend = manifestsend |
|
899 | 899 | self._ellipses = ellipses |
|
900 | 900 | |
|
901 | 901 | # Set of capabilities we can use to build the bundle. |
|
902 | 902 | if bundlecaps is None: |
|
903 | 903 | bundlecaps = set() |
|
904 | 904 | self._bundlecaps = bundlecaps |
|
905 | 905 | self._isshallow = shallow |
|
906 | 906 | self._fullclnodes = fullnodes |
|
907 | 907 | |
|
908 | 908 | # Maps ellipsis revs to their roots at the changelog level. |
|
909 | 909 | self._precomputedellipsis = ellipsisroots |
|
910 | 910 | |
|
911 | 911 | self._repo = repo |
|
912 | 912 | |
|
913 | 913 | if self._repo.ui.verbose and not self._repo.ui.debugflag: |
|
914 | 914 | self._verbosenote = self._repo.ui.note |
|
915 | 915 | else: |
|
916 | 916 | self._verbosenote = lambda s: None |
|
917 | 917 | |
|
918 | 918 | def generate( |
|
919 | 919 | self, commonrevs, clnodes, fastpathlinkrev, source, changelog=True |
|
920 | 920 | ): |
|
921 | 921 | """Yield a sequence of changegroup byte chunks. |
|
922 | 922 | If changelog is False, changelog data won't be added to changegroup |
|
923 | 923 | """ |
|
924 | 924 | |
|
925 | 925 | repo = self._repo |
|
926 | 926 | cl = repo.changelog |
|
927 | 927 | |
|
928 | 928 | self._verbosenote(_(b'uncompressed size of bundle content:\n')) |
|
929 | 929 | size = 0 |
|
930 | 930 | |
|
931 | 931 | clstate, deltas = self._generatechangelog( |
|
932 | 932 | cl, clnodes, generate=changelog |
|
933 | 933 | ) |
|
934 | 934 | for delta in deltas: |
|
935 | 935 | for chunk in _revisiondeltatochunks(delta, self._builddeltaheader): |
|
936 | 936 | size += len(chunk) |
|
937 | 937 | yield chunk |
|
938 | 938 | |
|
939 | 939 | close = closechunk() |
|
940 | 940 | size += len(close) |
|
941 | 941 | yield closechunk() |
|
942 | 942 | |
|
943 | 943 | self._verbosenote(_(b'%8.i (changelog)\n') % size) |
|
944 | 944 | |
|
945 | 945 | clrevorder = clstate[b'clrevorder'] |
|
946 | 946 | manifests = clstate[b'manifests'] |
|
947 | 947 | changedfiles = clstate[b'changedfiles'] |
|
948 | 948 | |
|
949 | 949 | # We need to make sure that the linkrev in the changegroup refers to |
|
950 | 950 | # the first changeset that introduced the manifest or file revision. |
|
951 | 951 | # The fastpath is usually safer than the slowpath, because the filelogs |
|
952 | 952 | # are walked in revlog order. |
|
953 | 953 | # |
|
954 | 954 | # When taking the slowpath when the manifest revlog uses generaldelta, |
|
955 | 955 | # the manifest may be walked in the "wrong" order. Without 'clrevorder', |
|
956 | 956 | # we would get an incorrect linkrev (see fix in cc0ff93d0c0c). |
|
957 | 957 | # |
|
958 | 958 | # When taking the fastpath, we are only vulnerable to reordering |
|
959 | 959 | # of the changelog itself. The changelog never uses generaldelta and is |
|
960 | 960 | # never reordered. To handle this case, we simply take the slowpath, |
|
961 | 961 | # which already has the 'clrevorder' logic. This was also fixed in |
|
962 | 962 | # cc0ff93d0c0c. |
|
963 | 963 | |
|
964 | 964 | # Treemanifests don't work correctly with fastpathlinkrev |
|
965 | 965 | # either, because we don't discover which directory nodes to |
|
966 | 966 | # send along with files. This could probably be fixed. |
|
967 | 967 | fastpathlinkrev = fastpathlinkrev and not scmutil.istreemanifest(repo) |
|
968 | 968 | |
|
969 | 969 | fnodes = {} # needed file nodes |
|
970 | 970 | |
|
971 | 971 | size = 0 |
|
972 | 972 | it = self.generatemanifests( |
|
973 | 973 | commonrevs, |
|
974 | 974 | clrevorder, |
|
975 | 975 | fastpathlinkrev, |
|
976 | 976 | manifests, |
|
977 | 977 | fnodes, |
|
978 | 978 | source, |
|
979 | 979 | clstate[b'clrevtomanifestrev'], |
|
980 | 980 | ) |
|
981 | 981 | |
|
982 | 982 | for tree, deltas in it: |
|
983 | 983 | if tree: |
|
984 | 984 | assert self.version == b'03' |
|
985 | 985 | chunk = _fileheader(tree) |
|
986 | 986 | size += len(chunk) |
|
987 | 987 | yield chunk |
|
988 | 988 | |
|
989 | 989 | for delta in deltas: |
|
990 | 990 | chunks = _revisiondeltatochunks(delta, self._builddeltaheader) |
|
991 | 991 | for chunk in chunks: |
|
992 | 992 | size += len(chunk) |
|
993 | 993 | yield chunk |
|
994 | 994 | |
|
995 | 995 | close = closechunk() |
|
996 | 996 | size += len(close) |
|
997 | 997 | yield close |
|
998 | 998 | |
|
999 | 999 | self._verbosenote(_(b'%8.i (manifests)\n') % size) |
|
1000 | 1000 | yield self._manifestsend |
|
1001 | 1001 | |
|
1002 | 1002 | mfdicts = None |
|
1003 | 1003 | if self._ellipses and self._isshallow: |
|
1004 | 1004 | mfdicts = [ |
|
1005 | 1005 | (self._repo.manifestlog[n].read(), lr) |
|
1006 | 1006 | for (n, lr) in pycompat.iteritems(manifests) |
|
1007 | 1007 | ] |
|
1008 | 1008 | |
|
1009 | 1009 | manifests.clear() |
|
1010 | 1010 | clrevs = {cl.rev(x) for x in clnodes} |
|
1011 | 1011 | |
|
1012 | 1012 | it = self.generatefiles( |
|
1013 | 1013 | changedfiles, |
|
1014 | 1014 | commonrevs, |
|
1015 | 1015 | source, |
|
1016 | 1016 | mfdicts, |
|
1017 | 1017 | fastpathlinkrev, |
|
1018 | 1018 | fnodes, |
|
1019 | 1019 | clrevs, |
|
1020 | 1020 | ) |
|
1021 | 1021 | |
|
1022 | 1022 | for path, deltas in it: |
|
1023 | 1023 | h = _fileheader(path) |
|
1024 | 1024 | size = len(h) |
|
1025 | 1025 | yield h |
|
1026 | 1026 | |
|
1027 | 1027 | for delta in deltas: |
|
1028 | 1028 | chunks = _revisiondeltatochunks(delta, self._builddeltaheader) |
|
1029 | 1029 | for chunk in chunks: |
|
1030 | 1030 | size += len(chunk) |
|
1031 | 1031 | yield chunk |
|
1032 | 1032 | |
|
1033 | 1033 | close = closechunk() |
|
1034 | 1034 | size += len(close) |
|
1035 | 1035 | yield close |
|
1036 | 1036 | |
|
1037 | 1037 | self._verbosenote(_(b'%8.i %s\n') % (size, path)) |
|
1038 | 1038 | |
|
1039 | 1039 | yield closechunk() |
|
1040 | 1040 | |
|
1041 | 1041 | if clnodes: |
|
1042 | 1042 | repo.hook(b'outgoing', node=hex(clnodes[0]), source=source) |
|
1043 | 1043 | |
|
1044 | 1044 | def _generatechangelog(self, cl, nodes, generate=True): |
|
1045 | 1045 | """Generate data for changelog chunks. |
|
1046 | 1046 | |
|
1047 | 1047 | Returns a 2-tuple of a dict containing state and an iterable of |
|
1048 | 1048 | byte chunks. The state will not be fully populated until the |
|
1049 | 1049 | chunk stream has been fully consumed. |
|
1050 | 1050 | |
|
1051 | 1051 | if generate is False, the state will be fully populated and no chunk |
|
1052 | 1052 | stream will be yielded |
|
1053 | 1053 | """ |
|
1054 | 1054 | clrevorder = {} |
|
1055 | 1055 | manifests = {} |
|
1056 | 1056 | mfl = self._repo.manifestlog |
|
1057 | 1057 | changedfiles = set() |
|
1058 | 1058 | clrevtomanifestrev = {} |
|
1059 | 1059 | |
|
1060 | 1060 | state = { |
|
1061 | 1061 | b'clrevorder': clrevorder, |
|
1062 | 1062 | b'manifests': manifests, |
|
1063 | 1063 | b'changedfiles': changedfiles, |
|
1064 | 1064 | b'clrevtomanifestrev': clrevtomanifestrev, |
|
1065 | 1065 | } |
|
1066 | 1066 | |
|
1067 | 1067 | if not (generate or self._ellipses): |
|
1068 | 1068 | # sort the nodes in storage order |
|
1069 | 1069 | nodes = sorted(nodes, key=cl.rev) |
|
1070 | 1070 | for node in nodes: |
|
1071 | 1071 | c = cl.changelogrevision(node) |
|
1072 | 1072 | clrevorder[node] = len(clrevorder) |
|
1073 | 1073 | # record the first changeset introducing this manifest version |
|
1074 | 1074 | manifests.setdefault(c.manifest, node) |
|
1075 | 1075 | # Record a complete list of potentially-changed files in |
|
1076 | 1076 | # this manifest. |
|
1077 | 1077 | changedfiles.update(c.files) |
|
1078 | 1078 | |
|
1079 | 1079 | return state, () |
|
1080 | 1080 | |
|
1081 | 1081 | # Callback for the changelog, used to collect changed files and |
|
1082 | 1082 | # manifest nodes. |
|
1083 | 1083 | # Returns the linkrev node (identity in the changelog case). |
|
1084 | 1084 | def lookupcl(x): |
|
1085 | 1085 | c = cl.changelogrevision(x) |
|
1086 | 1086 | clrevorder[x] = len(clrevorder) |
|
1087 | 1087 | |
|
1088 | 1088 | if self._ellipses: |
|
1089 | 1089 | # Only update manifests if x is going to be sent. Otherwise we |
|
1090 | 1090 | # end up with bogus linkrevs specified for manifests and |
|
1091 | 1091 | # we skip some manifest nodes that we should otherwise |
|
1092 | 1092 | # have sent. |
|
1093 | 1093 | if ( |
|
1094 | 1094 | x in self._fullclnodes |
|
1095 | 1095 | or cl.rev(x) in self._precomputedellipsis |
|
1096 | 1096 | ): |
|
1097 | 1097 | |
|
1098 | 1098 | manifestnode = c.manifest |
|
1099 | 1099 | # Record the first changeset introducing this manifest |
|
1100 | 1100 | # version. |
|
1101 | 1101 | manifests.setdefault(manifestnode, x) |
|
1102 | 1102 | # Set this narrow-specific dict so we have the lowest |
|
1103 | 1103 | # manifest revnum to look up for this cl revnum. (Part of |
|
1104 | 1104 | # mapping changelog ellipsis parents to manifest ellipsis |
|
1105 | 1105 | # parents) |
|
1106 | 1106 | clrevtomanifestrev.setdefault( |
|
1107 | 1107 | cl.rev(x), mfl.rev(manifestnode) |
|
1108 | 1108 | ) |
|
1109 | 1109 | # We can't trust the changed files list in the changeset if the |
|
1110 | 1110 | # client requested a shallow clone. |
|
1111 | 1111 | if self._isshallow: |
|
1112 | 1112 | changedfiles.update(mfl[c.manifest].read().keys()) |
|
1113 | 1113 | else: |
|
1114 | 1114 | changedfiles.update(c.files) |
|
1115 | 1115 | else: |
|
1116 | 1116 | # record the first changeset introducing this manifest version |
|
1117 | 1117 | manifests.setdefault(c.manifest, x) |
|
1118 | 1118 | # Record a complete list of potentially-changed files in |
|
1119 | 1119 | # this manifest. |
|
1120 | 1120 | changedfiles.update(c.files) |
|
1121 | 1121 | |
|
1122 | 1122 | return x |
|
1123 | 1123 | |
|
1124 | 1124 | gen = deltagroup( |
|
1125 | 1125 | self._repo, |
|
1126 | 1126 | cl, |
|
1127 | 1127 | nodes, |
|
1128 | 1128 | True, |
|
1129 | 1129 | lookupcl, |
|
1130 | 1130 | self._forcedeltaparentprev, |
|
1131 | 1131 | ellipses=self._ellipses, |
|
1132 | 1132 | topic=_(b'changesets'), |
|
1133 | 1133 | clrevtolocalrev={}, |
|
1134 | 1134 | fullclnodes=self._fullclnodes, |
|
1135 | 1135 | precomputedellipsis=self._precomputedellipsis, |
|
1136 | 1136 | ) |
|
1137 | 1137 | |
|
1138 | 1138 | return state, gen |
|
1139 | 1139 | |
|
1140 | 1140 | def generatemanifests( |
|
1141 | 1141 | self, |
|
1142 | 1142 | commonrevs, |
|
1143 | 1143 | clrevorder, |
|
1144 | 1144 | fastpathlinkrev, |
|
1145 | 1145 | manifests, |
|
1146 | 1146 | fnodes, |
|
1147 | 1147 | source, |
|
1148 | 1148 | clrevtolocalrev, |
|
1149 | 1149 | ): |
|
1150 | 1150 | """Returns an iterator of changegroup chunks containing manifests. |
|
1151 | 1151 | |
|
1152 | 1152 | `source` is unused here, but is used by extensions like remotefilelog to |
|
1153 | 1153 | change what is sent based in pulls vs pushes, etc. |
|
1154 | 1154 | """ |
|
1155 | 1155 | repo = self._repo |
|
1156 | 1156 | mfl = repo.manifestlog |
|
1157 | 1157 | tmfnodes = {b'': manifests} |
|
1158 | 1158 | |
|
1159 | 1159 | # Callback for the manifest, used to collect linkrevs for filelog |
|
1160 | 1160 | # revisions. |
|
1161 | 1161 | # Returns the linkrev node (collected in lookupcl). |
|
1162 | 1162 | def makelookupmflinknode(tree, nodes): |
|
1163 | 1163 | if fastpathlinkrev: |
|
1164 | 1164 | assert not tree |
|
1165 | 1165 | return ( |
|
1166 | 1166 | manifests.__getitem__ |
|
1167 | 1167 | ) # pytype: disable=unsupported-operands |
|
1168 | 1168 | |
|
1169 | 1169 | def lookupmflinknode(x): |
|
1170 | 1170 | """Callback for looking up the linknode for manifests. |
|
1171 | 1171 | |
|
1172 | 1172 | Returns the linkrev node for the specified manifest. |
|
1173 | 1173 | |
|
1174 | 1174 | SIDE EFFECT: |
|
1175 | 1175 | |
|
1176 | 1176 | 1) fclnodes gets populated with the list of relevant |
|
1177 | 1177 | file nodes if we're not using fastpathlinkrev |
|
1178 | 1178 | 2) When treemanifests are in use, collects treemanifest nodes |
|
1179 | 1179 | to send |
|
1180 | 1180 | |
|
1181 | 1181 | Note that this means manifests must be completely sent to |
|
1182 | 1182 | the client before you can trust the list of files and |
|
1183 | 1183 | treemanifests to send. |
|
1184 | 1184 | """ |
|
1185 | 1185 | clnode = nodes[x] |
|
1186 | 1186 | mdata = mfl.get(tree, x).readfast(shallow=True) |
|
1187 | 1187 | for p, n, fl in mdata.iterentries(): |
|
1188 | 1188 | if fl == b't': # subdirectory manifest |
|
1189 | 1189 | subtree = tree + p + b'/' |
|
1190 | 1190 | tmfclnodes = tmfnodes.setdefault(subtree, {}) |
|
1191 | 1191 | tmfclnode = tmfclnodes.setdefault(n, clnode) |
|
1192 | 1192 | if clrevorder[clnode] < clrevorder[tmfclnode]: |
|
1193 | 1193 | tmfclnodes[n] = clnode |
|
1194 | 1194 | else: |
|
1195 | 1195 | f = tree + p |
|
1196 | 1196 | fclnodes = fnodes.setdefault(f, {}) |
|
1197 | 1197 | fclnode = fclnodes.setdefault(n, clnode) |
|
1198 | 1198 | if clrevorder[clnode] < clrevorder[fclnode]: |
|
1199 | 1199 | fclnodes[n] = clnode |
|
1200 | 1200 | return clnode |
|
1201 | 1201 | |
|
1202 | 1202 | return lookupmflinknode |
|
1203 | 1203 | |
|
1204 | 1204 | while tmfnodes: |
|
1205 | 1205 | tree, nodes = tmfnodes.popitem() |
|
1206 | 1206 | |
|
1207 | 1207 | should_visit = self._matcher.visitdir(tree[:-1]) |
|
1208 | 1208 | if tree and not should_visit: |
|
1209 | 1209 | continue |
|
1210 | 1210 | |
|
1211 | 1211 | store = mfl.getstorage(tree) |
|
1212 | 1212 | |
|
1213 | 1213 | if not should_visit: |
|
1214 | 1214 | # No nodes to send because this directory is out of |
|
1215 | 1215 | # the client's view of the repository (probably |
|
1216 | 1216 | # because of narrow clones). Do this even for the root |
|
1217 | 1217 | # directory (tree=='') |
|
1218 | 1218 | prunednodes = [] |
|
1219 | 1219 | else: |
|
1220 | 1220 | # Avoid sending any manifest nodes we can prove the |
|
1221 | 1221 | # client already has by checking linkrevs. See the |
|
1222 | 1222 | # related comment in generatefiles(). |
|
1223 | 1223 | prunednodes = self._prunemanifests(store, nodes, commonrevs) |
|
1224 | 1224 | |
|
1225 | 1225 | if tree and not prunednodes: |
|
1226 | 1226 | continue |
|
1227 | 1227 | |
|
1228 | 1228 | lookupfn = makelookupmflinknode(tree, nodes) |
|
1229 | 1229 | |
|
1230 | 1230 | deltas = deltagroup( |
|
1231 | 1231 | self._repo, |
|
1232 | 1232 | store, |
|
1233 | 1233 | prunednodes, |
|
1234 | 1234 | False, |
|
1235 | 1235 | lookupfn, |
|
1236 | 1236 | self._forcedeltaparentprev, |
|
1237 | 1237 | ellipses=self._ellipses, |
|
1238 | 1238 | topic=_(b'manifests'), |
|
1239 | 1239 | clrevtolocalrev=clrevtolocalrev, |
|
1240 | 1240 | fullclnodes=self._fullclnodes, |
|
1241 | 1241 | precomputedellipsis=self._precomputedellipsis, |
|
1242 | 1242 | ) |
|
1243 | 1243 | |
|
1244 | 1244 | if not self._oldmatcher.visitdir(store.tree[:-1]): |
|
1245 | 1245 | yield tree, deltas |
|
1246 | 1246 | else: |
|
1247 | 1247 | # 'deltas' is a generator and we need to consume it even if |
|
1248 | 1248 | # we are not going to send it because a side-effect is that |
|
1249 | 1249 | # it updates tmdnodes (via lookupfn) |
|
1250 | 1250 | for d in deltas: |
|
1251 | 1251 | pass |
|
1252 | 1252 | if not tree: |
|
1253 | 1253 | yield tree, [] |
|
1254 | 1254 | |
|
1255 | 1255 | def _prunemanifests(self, store, nodes, commonrevs): |
|
1256 | 1256 | if not self._ellipses: |
|
1257 | 1257 | # In non-ellipses case and large repositories, it is better to |
|
1258 | 1258 | # prevent calling of store.rev and store.linkrev on a lot of |
|
1259 | 1259 | # nodes as compared to sending some extra data |
|
1260 | 1260 | return nodes.copy() |
|
1261 | 1261 | # This is split out as a separate method to allow filtering |
|
1262 | 1262 | # commonrevs in extension code. |
|
1263 | 1263 | # |
|
1264 | 1264 | # TODO(augie): this shouldn't be required, instead we should |
|
1265 | 1265 | # make filtering of revisions to send delegated to the store |
|
1266 | 1266 | # layer. |
|
1267 | 1267 | frev, flr = store.rev, store.linkrev |
|
1268 | 1268 | return [n for n in nodes if flr(frev(n)) not in commonrevs] |
|
1269 | 1269 | |
|
1270 | 1270 | # The 'source' parameter is useful for extensions |
|
1271 | 1271 | def generatefiles( |
|
1272 | 1272 | self, |
|
1273 | 1273 | changedfiles, |
|
1274 | 1274 | commonrevs, |
|
1275 | 1275 | source, |
|
1276 | 1276 | mfdicts, |
|
1277 | 1277 | fastpathlinkrev, |
|
1278 | 1278 | fnodes, |
|
1279 | 1279 | clrevs, |
|
1280 | 1280 | ): |
|
1281 | 1281 | changedfiles = [ |
|
1282 | 1282 | f |
|
1283 | 1283 | for f in changedfiles |
|
1284 | 1284 | if self._matcher(f) and not self._oldmatcher(f) |
|
1285 | 1285 | ] |
|
1286 | 1286 | |
|
1287 | 1287 | if not fastpathlinkrev: |
|
1288 | 1288 | |
|
1289 | 1289 | def normallinknodes(unused, fname): |
|
1290 | 1290 | return fnodes.get(fname, {}) |
|
1291 | 1291 | |
|
1292 | 1292 | else: |
|
1293 | 1293 | cln = self._repo.changelog.node |
|
1294 | 1294 | |
|
1295 | 1295 | def normallinknodes(store, fname): |
|
1296 | 1296 | flinkrev = store.linkrev |
|
1297 | 1297 | fnode = store.node |
|
1298 | 1298 | revs = ((r, flinkrev(r)) for r in store) |
|
1299 | 1299 | return {fnode(r): cln(lr) for r, lr in revs if lr in clrevs} |
|
1300 | 1300 | |
|
1301 | 1301 | clrevtolocalrev = {} |
|
1302 | 1302 | |
|
1303 | 1303 | if self._isshallow: |
|
1304 | 1304 | # In a shallow clone, the linknodes callback needs to also include |
|
1305 | 1305 | # those file nodes that are in the manifests we sent but weren't |
|
1306 | 1306 | # introduced by those manifests. |
|
1307 | 1307 | commonctxs = [self._repo[c] for c in commonrevs] |
|
1308 | 1308 | clrev = self._repo.changelog.rev |
|
1309 | 1309 | |
|
1310 | 1310 | def linknodes(flog, fname): |
|
1311 | 1311 | for c in commonctxs: |
|
1312 | 1312 | try: |
|
1313 | 1313 | fnode = c.filenode(fname) |
|
1314 | 1314 | clrevtolocalrev[c.rev()] = flog.rev(fnode) |
|
1315 | 1315 | except error.ManifestLookupError: |
|
1316 | 1316 | pass |
|
1317 | 1317 | links = normallinknodes(flog, fname) |
|
1318 | 1318 | if len(links) != len(mfdicts): |
|
1319 | 1319 | for mf, lr in mfdicts: |
|
1320 | 1320 | fnode = mf.get(fname, None) |
|
1321 | 1321 | if fnode in links: |
|
1322 | 1322 | links[fnode] = min(links[fnode], lr, key=clrev) |
|
1323 | 1323 | elif fnode: |
|
1324 | 1324 | links[fnode] = lr |
|
1325 | 1325 | return links |
|
1326 | 1326 | |
|
1327 | 1327 | else: |
|
1328 | 1328 | linknodes = normallinknodes |
|
1329 | 1329 | |
|
1330 | 1330 | repo = self._repo |
|
1331 | 1331 | progress = repo.ui.makeprogress( |
|
1332 | 1332 | _(b'files'), unit=_(b'files'), total=len(changedfiles) |
|
1333 | 1333 | ) |
|
1334 | 1334 | for i, fname in enumerate(sorted(changedfiles)): |
|
1335 | 1335 | filerevlog = repo.file(fname) |
|
1336 | 1336 | if not filerevlog: |
|
1337 | 1337 | raise error.Abort( |
|
1338 | 1338 | _(b"empty or missing file data for %s") % fname |
|
1339 | 1339 | ) |
|
1340 | 1340 | |
|
1341 | 1341 | clrevtolocalrev.clear() |
|
1342 | 1342 | |
|
1343 | 1343 | linkrevnodes = linknodes(filerevlog, fname) |
|
1344 | 1344 | # Lookup for filenodes, we collected the linkrev nodes above in the |
|
1345 | 1345 | # fastpath case and with lookupmf in the slowpath case. |
|
1346 | 1346 | def lookupfilelog(x): |
|
1347 | 1347 | return linkrevnodes[x] |
|
1348 | 1348 | |
|
1349 | 1349 | frev, flr = filerevlog.rev, filerevlog.linkrev |
|
1350 | 1350 | # Skip sending any filenode we know the client already |
|
1351 | 1351 | # has. This avoids over-sending files relatively |
|
1352 | 1352 | # inexpensively, so it's not a problem if we under-filter |
|
1353 | 1353 | # here. |
|
1354 | 1354 | filenodes = [ |
|
1355 | 1355 | n for n in linkrevnodes if flr(frev(n)) not in commonrevs |
|
1356 | 1356 | ] |
|
1357 | 1357 | |
|
1358 | 1358 | if not filenodes: |
|
1359 | 1359 | continue |
|
1360 | 1360 | |
|
1361 | 1361 | progress.update(i + 1, item=fname) |
|
1362 | 1362 | |
|
1363 | 1363 | deltas = deltagroup( |
|
1364 | 1364 | self._repo, |
|
1365 | 1365 | filerevlog, |
|
1366 | 1366 | filenodes, |
|
1367 | 1367 | False, |
|
1368 | 1368 | lookupfilelog, |
|
1369 | 1369 | self._forcedeltaparentprev, |
|
1370 | 1370 | ellipses=self._ellipses, |
|
1371 | 1371 | clrevtolocalrev=clrevtolocalrev, |
|
1372 | 1372 | fullclnodes=self._fullclnodes, |
|
1373 | 1373 | precomputedellipsis=self._precomputedellipsis, |
|
1374 | 1374 | ) |
|
1375 | 1375 | |
|
1376 | 1376 | yield fname, deltas |
|
1377 | 1377 | |
|
1378 | 1378 | progress.complete() |
|
1379 | 1379 | |
|
1380 | 1380 | |
|
1381 | 1381 | def _makecg1packer( |
|
1382 | 1382 | repo, |
|
1383 | 1383 | oldmatcher, |
|
1384 | 1384 | matcher, |
|
1385 | 1385 | bundlecaps, |
|
1386 | 1386 | ellipses=False, |
|
1387 | 1387 | shallow=False, |
|
1388 | 1388 | ellipsisroots=None, |
|
1389 | 1389 | fullnodes=None, |
|
1390 | 1390 | ): |
|
1391 | 1391 | builddeltaheader = lambda d: _CHANGEGROUPV1_DELTA_HEADER.pack( |
|
1392 | 1392 | d.node, d.p1node, d.p2node, d.linknode |
|
1393 | 1393 | ) |
|
1394 | 1394 | |
|
1395 | 1395 | return cgpacker( |
|
1396 | 1396 | repo, |
|
1397 | 1397 | oldmatcher, |
|
1398 | 1398 | matcher, |
|
1399 | 1399 | b'01', |
|
1400 | 1400 | builddeltaheader=builddeltaheader, |
|
1401 | 1401 | manifestsend=b'', |
|
1402 | 1402 | forcedeltaparentprev=True, |
|
1403 | 1403 | bundlecaps=bundlecaps, |
|
1404 | 1404 | ellipses=ellipses, |
|
1405 | 1405 | shallow=shallow, |
|
1406 | 1406 | ellipsisroots=ellipsisroots, |
|
1407 | 1407 | fullnodes=fullnodes, |
|
1408 | 1408 | ) |
|
1409 | 1409 | |
|
1410 | 1410 | |
|
1411 | 1411 | def _makecg2packer( |
|
1412 | 1412 | repo, |
|
1413 | 1413 | oldmatcher, |
|
1414 | 1414 | matcher, |
|
1415 | 1415 | bundlecaps, |
|
1416 | 1416 | ellipses=False, |
|
1417 | 1417 | shallow=False, |
|
1418 | 1418 | ellipsisroots=None, |
|
1419 | 1419 | fullnodes=None, |
|
1420 | 1420 | ): |
|
1421 | 1421 | builddeltaheader = lambda d: _CHANGEGROUPV2_DELTA_HEADER.pack( |
|
1422 | 1422 | d.node, d.p1node, d.p2node, d.basenode, d.linknode |
|
1423 | 1423 | ) |
|
1424 | 1424 | |
|
1425 | 1425 | return cgpacker( |
|
1426 | 1426 | repo, |
|
1427 | 1427 | oldmatcher, |
|
1428 | 1428 | matcher, |
|
1429 | 1429 | b'02', |
|
1430 | 1430 | builddeltaheader=builddeltaheader, |
|
1431 | 1431 | manifestsend=b'', |
|
1432 | 1432 | bundlecaps=bundlecaps, |
|
1433 | 1433 | ellipses=ellipses, |
|
1434 | 1434 | shallow=shallow, |
|
1435 | 1435 | ellipsisroots=ellipsisroots, |
|
1436 | 1436 | fullnodes=fullnodes, |
|
1437 | 1437 | ) |
|
1438 | 1438 | |
|
1439 | 1439 | |
|
1440 | 1440 | def _makecg3packer( |
|
1441 | 1441 | repo, |
|
1442 | 1442 | oldmatcher, |
|
1443 | 1443 | matcher, |
|
1444 | 1444 | bundlecaps, |
|
1445 | 1445 | ellipses=False, |
|
1446 | 1446 | shallow=False, |
|
1447 | 1447 | ellipsisroots=None, |
|
1448 | 1448 | fullnodes=None, |
|
1449 | 1449 | ): |
|
1450 | 1450 | builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack( |
|
1451 | 1451 | d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags |
|
1452 | 1452 | ) |
|
1453 | 1453 | |
|
1454 | 1454 | return cgpacker( |
|
1455 | 1455 | repo, |
|
1456 | 1456 | oldmatcher, |
|
1457 | 1457 | matcher, |
|
1458 | 1458 | b'03', |
|
1459 | 1459 | builddeltaheader=builddeltaheader, |
|
1460 | 1460 | manifestsend=closechunk(), |
|
1461 | 1461 | bundlecaps=bundlecaps, |
|
1462 | 1462 | ellipses=ellipses, |
|
1463 | 1463 | shallow=shallow, |
|
1464 | 1464 | ellipsisroots=ellipsisroots, |
|
1465 | 1465 | fullnodes=fullnodes, |
|
1466 | 1466 | ) |
|
1467 | 1467 | |
|
1468 | 1468 | |
|
1469 | 1469 | _packermap = { |
|
1470 | 1470 | b'01': (_makecg1packer, cg1unpacker), |
|
1471 | 1471 | # cg2 adds support for exchanging generaldelta |
|
1472 | 1472 | b'02': (_makecg2packer, cg2unpacker), |
|
1473 | 1473 | # cg3 adds support for exchanging revlog flags and treemanifests |
|
1474 | 1474 | b'03': (_makecg3packer, cg3unpacker), |
|
1475 | 1475 | } |
|
1476 | 1476 | |
|
1477 | 1477 | |
|
1478 | 1478 | def allsupportedversions(repo): |
|
1479 | 1479 | versions = set(_packermap.keys()) |
|
1480 | 1480 | needv03 = False |
|
1481 | 1481 | if ( |
|
1482 | 1482 | repo.ui.configbool(b'experimental', b'changegroup3') |
|
1483 | 1483 | or repo.ui.configbool(b'experimental', b'treemanifest') |
|
1484 | 1484 | or scmutil.istreemanifest(repo) |
|
1485 | 1485 | ): |
|
1486 | 1486 | # we keep version 03 because we need to to exchange treemanifest data |
|
1487 | 1487 | # |
|
1488 | 1488 | # we also keep vresion 01 and 02, because it is possible for repo to |
|
1489 | 1489 | # contains both normal and tree manifest at the same time. so using |
|
1490 | 1490 | # older version to pull data is viable |
|
1491 | 1491 | # |
|
1492 | 1492 | # (or even to push subset of history) |
|
1493 | 1493 | needv03 = True |
|
1494 | 1494 | if b'exp-sidedata-flag' in repo.requirements: |
|
1495 | 1495 | needv03 = True |
|
1496 | 1496 | # don't attempt to use 01/02 until we do sidedata cleaning |
|
1497 | 1497 | versions.discard(b'01') |
|
1498 | 1498 | versions.discard(b'02') |
|
1499 | 1499 | if not needv03: |
|
1500 | 1500 | versions.discard(b'03') |
|
1501 | 1501 | return versions |
|
1502 | 1502 | |
|
1503 | 1503 | |
|
1504 | 1504 | # Changegroup versions that can be applied to the repo |
|
1505 | 1505 | def supportedincomingversions(repo): |
|
1506 | 1506 | return allsupportedversions(repo) |
|
1507 | 1507 | |
|
1508 | 1508 | |
|
1509 | 1509 | # Changegroup versions that can be created from the repo |
|
1510 | 1510 | def supportedoutgoingversions(repo): |
|
1511 | 1511 | versions = allsupportedversions(repo) |
|
1512 | 1512 | if scmutil.istreemanifest(repo): |
|
1513 | 1513 | # Versions 01 and 02 support only flat manifests and it's just too |
|
1514 | 1514 | # expensive to convert between the flat manifest and tree manifest on |
|
1515 | 1515 | # the fly. Since tree manifests are hashed differently, all of history |
|
1516 | 1516 | # would have to be converted. Instead, we simply don't even pretend to |
|
1517 | 1517 | # support versions 01 and 02. |
|
1518 | 1518 | versions.discard(b'01') |
|
1519 | 1519 | versions.discard(b'02') |
|
1520 | 1520 | if requirements.NARROW_REQUIREMENT in repo.requirements: |
|
1521 | 1521 | # Versions 01 and 02 don't support revlog flags, and we need to |
|
1522 | 1522 | # support that for stripping and unbundling to work. |
|
1523 | 1523 | versions.discard(b'01') |
|
1524 | 1524 | versions.discard(b'02') |
|
1525 | 1525 | if LFS_REQUIREMENT in repo.requirements: |
|
1526 | 1526 | # Versions 01 and 02 don't support revlog flags, and we need to |
|
1527 | 1527 | # mark LFS entries with REVIDX_EXTSTORED. |
|
1528 | 1528 | versions.discard(b'01') |
|
1529 | 1529 | versions.discard(b'02') |
|
1530 | 1530 | |
|
1531 | 1531 | return versions |
|
1532 | 1532 | |
|
1533 | 1533 | |
|
1534 | 1534 | def localversion(repo): |
|
1535 | 1535 | # Finds the best version to use for bundles that are meant to be used |
|
1536 | 1536 | # locally, such as those from strip and shelve, and temporary bundles. |
|
1537 | 1537 | return max(supportedoutgoingversions(repo)) |
|
1538 | 1538 | |
|
1539 | 1539 | |
|
1540 | 1540 | def safeversion(repo): |
|
1541 | 1541 | # Finds the smallest version that it's safe to assume clients of the repo |
|
1542 | 1542 | # will support. For example, all hg versions that support generaldelta also |
|
1543 | 1543 | # support changegroup 02. |
|
1544 | 1544 | versions = supportedoutgoingversions(repo) |
|
1545 | 1545 | if b'generaldelta' in repo.requirements: |
|
1546 | 1546 | versions.discard(b'01') |
|
1547 | 1547 | assert versions |
|
1548 | 1548 | return min(versions) |
|
1549 | 1549 | |
|
1550 | 1550 | |
|
1551 | 1551 | def getbundler( |
|
1552 | 1552 | version, |
|
1553 | 1553 | repo, |
|
1554 | 1554 | bundlecaps=None, |
|
1555 | 1555 | oldmatcher=None, |
|
1556 | 1556 | matcher=None, |
|
1557 | 1557 | ellipses=False, |
|
1558 | 1558 | shallow=False, |
|
1559 | 1559 | ellipsisroots=None, |
|
1560 | 1560 | fullnodes=None, |
|
1561 | 1561 | ): |
|
1562 | 1562 | assert version in supportedoutgoingversions(repo) |
|
1563 | 1563 | |
|
1564 | 1564 | if matcher is None: |
|
1565 | 1565 | matcher = matchmod.always() |
|
1566 | 1566 | if oldmatcher is None: |
|
1567 | 1567 | oldmatcher = matchmod.never() |
|
1568 | 1568 | |
|
1569 | 1569 | if version == b'01' and not matcher.always(): |
|
1570 | 1570 | raise error.ProgrammingError( |
|
1571 | 1571 | b'version 01 changegroups do not support sparse file matchers' |
|
1572 | 1572 | ) |
|
1573 | 1573 | |
|
1574 | 1574 | if ellipses and version in (b'01', b'02'): |
|
1575 | 1575 | raise error.Abort( |
|
1576 | 1576 | _( |
|
1577 | 1577 | b'ellipsis nodes require at least cg3 on client and server, ' |
|
1578 | 1578 | b'but negotiated version %s' |
|
1579 | 1579 | ) |
|
1580 | 1580 | % version |
|
1581 | 1581 | ) |
|
1582 | 1582 | |
|
1583 | 1583 | # Requested files could include files not in the local store. So |
|
1584 | 1584 | # filter those out. |
|
1585 | 1585 | matcher = repo.narrowmatch(matcher) |
|
1586 | 1586 | |
|
1587 | 1587 | fn = _packermap[version][0] |
|
1588 | 1588 | return fn( |
|
1589 | 1589 | repo, |
|
1590 | 1590 | oldmatcher, |
|
1591 | 1591 | matcher, |
|
1592 | 1592 | bundlecaps, |
|
1593 | 1593 | ellipses=ellipses, |
|
1594 | 1594 | shallow=shallow, |
|
1595 | 1595 | ellipsisroots=ellipsisroots, |
|
1596 | 1596 | fullnodes=fullnodes, |
|
1597 | 1597 | ) |
|
1598 | 1598 | |
|
1599 | 1599 | |
|
1600 | 1600 | def getunbundler(version, fh, alg, extras=None): |
|
1601 | 1601 | return _packermap[version][1](fh, alg, extras=extras) |
|
1602 | 1602 | |
|
1603 | 1603 | |
|
1604 | 1604 | def _changegroupinfo(repo, nodes, source): |
|
1605 | 1605 | if repo.ui.verbose or source == b'bundle': |
|
1606 | 1606 | repo.ui.status(_(b"%d changesets found\n") % len(nodes)) |
|
1607 | 1607 | if repo.ui.debugflag: |
|
1608 | 1608 | repo.ui.debug(b"list of changesets:\n") |
|
1609 | 1609 | for node in nodes: |
|
1610 | 1610 | repo.ui.debug(b"%s\n" % hex(node)) |
|
1611 | 1611 | |
|
1612 | 1612 | |
|
1613 | 1613 | def makechangegroup( |
|
1614 | 1614 | repo, outgoing, version, source, fastpath=False, bundlecaps=None |
|
1615 | 1615 | ): |
|
1616 | 1616 | cgstream = makestream( |
|
1617 | 1617 | repo, |
|
1618 | 1618 | outgoing, |
|
1619 | 1619 | version, |
|
1620 | 1620 | source, |
|
1621 | 1621 | fastpath=fastpath, |
|
1622 | 1622 | bundlecaps=bundlecaps, |
|
1623 | 1623 | ) |
|
1624 | 1624 | return getunbundler( |
|
1625 | 1625 | version, |
|
1626 | 1626 | util.chunkbuffer(cgstream), |
|
1627 | 1627 | None, |
|
1628 | 1628 | {b'clcount': len(outgoing.missing)}, |
|
1629 | 1629 | ) |
|
1630 | 1630 | |
|
1631 | 1631 | |
|
1632 | 1632 | def makestream( |
|
1633 | 1633 | repo, |
|
1634 | 1634 | outgoing, |
|
1635 | 1635 | version, |
|
1636 | 1636 | source, |
|
1637 | 1637 | fastpath=False, |
|
1638 | 1638 | bundlecaps=None, |
|
1639 | 1639 | matcher=None, |
|
1640 | 1640 | ): |
|
1641 | 1641 | bundler = getbundler(version, repo, bundlecaps=bundlecaps, matcher=matcher) |
|
1642 | 1642 | |
|
1643 | 1643 | repo = repo.unfiltered() |
|
1644 | 1644 | commonrevs = outgoing.common |
|
1645 | 1645 | csets = outgoing.missing |
|
1646 | 1646 | heads = outgoing.ancestorsof |
|
1647 | 1647 | # We go through the fast path if we get told to, or if all (unfiltered |
|
1648 | 1648 | # heads have been requested (since we then know there all linkrevs will |
|
1649 | 1649 | # be pulled by the client). |
|
1650 | 1650 | heads.sort() |
|
1651 | 1651 | fastpathlinkrev = fastpath or ( |
|
1652 | 1652 | repo.filtername is None and heads == sorted(repo.heads()) |
|
1653 | 1653 | ) |
|
1654 | 1654 | |
|
1655 | 1655 | repo.hook(b'preoutgoing', throw=True, source=source) |
|
1656 | 1656 | _changegroupinfo(repo, csets, source) |
|
1657 | 1657 | return bundler.generate(commonrevs, csets, fastpathlinkrev, source) |
|
1658 | 1658 | |
|
1659 | 1659 | |
|
1660 | 1660 | def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles): |
|
1661 | 1661 | revisions = 0 |
|
1662 | 1662 | files = 0 |
|
1663 | 1663 | progress = repo.ui.makeprogress( |
|
1664 | 1664 | _(b'files'), unit=_(b'files'), total=expectedfiles |
|
1665 | 1665 | ) |
|
1666 | 1666 | for chunkdata in iter(source.filelogheader, {}): |
|
1667 | 1667 | files += 1 |
|
1668 | 1668 | f = chunkdata[b"filename"] |
|
1669 | 1669 | repo.ui.debug(b"adding %s revisions\n" % f) |
|
1670 | 1670 | progress.increment() |
|
1671 | 1671 | fl = repo.file(f) |
|
1672 | 1672 | o = len(fl) |
|
1673 | 1673 | try: |
|
1674 | 1674 | deltas = source.deltaiter() |
|
1675 | 1675 | if not fl.addgroup(deltas, revmap, trp): |
|
1676 | 1676 | raise error.Abort(_(b"received file revlog group is empty")) |
|
1677 | 1677 | except error.CensoredBaseError as e: |
|
1678 | 1678 | raise error.Abort(_(b"received delta base is censored: %s") % e) |
|
1679 | 1679 | revisions += len(fl) - o |
|
1680 | 1680 | if f in needfiles: |
|
1681 | 1681 | needs = needfiles[f] |
|
1682 | 1682 | for new in pycompat.xrange(o, len(fl)): |
|
1683 | 1683 | n = fl.node(new) |
|
1684 | 1684 | if n in needs: |
|
1685 | 1685 | needs.remove(n) |
|
1686 | 1686 | else: |
|
1687 | 1687 | raise error.Abort(_(b"received spurious file revlog entry")) |
|
1688 | 1688 | if not needs: |
|
1689 | 1689 | del needfiles[f] |
|
1690 | 1690 | progress.complete() |
|
1691 | 1691 | |
|
1692 | 1692 | for f, needs in pycompat.iteritems(needfiles): |
|
1693 | 1693 | fl = repo.file(f) |
|
1694 | 1694 | for n in needs: |
|
1695 | 1695 | try: |
|
1696 | 1696 | fl.rev(n) |
|
1697 | 1697 | except error.LookupError: |
|
1698 | 1698 | raise error.Abort( |
|
1699 | 1699 | _(b'missing file data for %s:%s - run hg verify') |
|
1700 | 1700 | % (f, hex(n)) |
|
1701 | 1701 | ) |
|
1702 | 1702 | |
|
1703 | 1703 | return revisions, files |
@@ -1,475 +1,475 b'' | |||
|
1 | 1 | # commit.py - fonction to perform commit |
|
2 | 2 | # |
|
3 | 3 | # This software may be used and distributed according to the terms of the |
|
4 | 4 | # GNU General Public License version 2 or any later version. |
|
5 | 5 | |
|
6 | 6 | from __future__ import absolute_import |
|
7 | 7 | |
|
8 | 8 | import errno |
|
9 | 9 | |
|
10 | 10 | from .i18n import _ |
|
11 | 11 | from .node import ( |
|
12 | 12 | hex, |
|
13 | 13 | nullid, |
|
14 | 14 | nullrev, |
|
15 | 15 | ) |
|
16 | 16 | |
|
17 | 17 | from . import ( |
|
18 | 18 | context, |
|
19 | 19 | mergestate, |
|
20 | 20 | metadata, |
|
21 | 21 | phases, |
|
22 | 22 | scmutil, |
|
23 | 23 | subrepoutil, |
|
24 | 24 | ) |
|
25 | 25 | |
|
26 | 26 | |
|
27 | 27 | def _write_copy_meta(repo): |
|
28 | 28 | """return a (changelog, filelog) boolean tuple |
|
29 | 29 | |
|
30 | 30 | changelog: copy related information should be stored in the changeset |
|
31 | 31 | filelof: copy related information should be written in the file revision |
|
32 | 32 | """ |
|
33 | 33 | if repo.filecopiesmode == b'changeset-sidedata': |
|
34 | 34 | writechangesetcopy = True |
|
35 | 35 | writefilecopymeta = True |
|
36 | 36 | else: |
|
37 | 37 | writecopiesto = repo.ui.config(b'experimental', b'copies.write-to') |
|
38 | 38 | writefilecopymeta = writecopiesto != b'changeset-only' |
|
39 | 39 | writechangesetcopy = writecopiesto in ( |
|
40 | 40 | b'changeset-only', |
|
41 | 41 | b'compatibility', |
|
42 | 42 | ) |
|
43 | 43 | return writechangesetcopy, writefilecopymeta |
|
44 | 44 | |
|
45 | 45 | |
|
46 | 46 | def commitctx(repo, ctx, error=False, origctx=None): |
|
47 | 47 | """Add a new revision to the target repository. |
|
48 | 48 | Revision information is passed via the context argument. |
|
49 | 49 | |
|
50 | 50 | ctx.files() should list all files involved in this commit, i.e. |
|
51 | 51 | modified/added/removed files. On merge, it may be wider than the |
|
52 | 52 | ctx.files() to be committed, since any file nodes derived directly |
|
53 | 53 | from p1 or p2 are excluded from the committed ctx.files(). |
|
54 | 54 | |
|
55 | 55 | origctx is for convert to work around the problem that bug |
|
56 | 56 | fixes to the files list in changesets change hashes. For |
|
57 | 57 | convert to be the identity, it can pass an origctx and this |
|
58 | 58 | function will use the same files list when it makes sense to |
|
59 | 59 | do so. |
|
60 | 60 | """ |
|
61 | 61 | repo = repo.unfiltered() |
|
62 | 62 | |
|
63 | 63 | p1, p2 = ctx.p1(), ctx.p2() |
|
64 | 64 | user = ctx.user() |
|
65 | 65 | |
|
66 | 66 | with repo.lock(), repo.transaction(b"commit") as tr: |
|
67 | 67 | mn, files = _prepare_files(tr, ctx, error=error, origctx=origctx) |
|
68 | 68 | |
|
69 | 69 | extra = ctx.extra().copy() |
|
70 | 70 | |
|
71 | 71 | if extra is not None: |
|
72 | 72 | for name in ( |
|
73 | 73 | b'p1copies', |
|
74 | 74 | b'p2copies', |
|
75 | 75 | b'filesadded', |
|
76 | 76 | b'filesremoved', |
|
77 | 77 | ): |
|
78 | 78 | extra.pop(name, None) |
|
79 | 79 | if repo.changelog._copiesstorage == b'extra': |
|
80 | 80 | extra = _extra_with_copies(repo, extra, files) |
|
81 | 81 | |
|
82 | 82 | # update changelog |
|
83 | 83 | repo.ui.note(_(b"committing changelog\n")) |
|
84 | 84 | repo.changelog.delayupdate(tr) |
|
85 | 85 | n = repo.changelog.add( |
|
86 | 86 | mn, |
|
87 | 87 | files, |
|
88 | 88 | ctx.description(), |
|
89 | 89 | tr, |
|
90 | 90 | p1.node(), |
|
91 | 91 | p2.node(), |
|
92 | 92 | user, |
|
93 | 93 | ctx.date(), |
|
94 | 94 | extra, |
|
95 | 95 | ) |
|
96 | 96 | xp1, xp2 = p1.hex(), p2 and p2.hex() or b'' |
|
97 | 97 | repo.hook( |
|
98 | 98 | b'pretxncommit', throw=True, node=hex(n), parent1=xp1, parent2=xp2, |
|
99 | 99 | ) |
|
100 | 100 | # set the new commit is proper phase |
|
101 | 101 | targetphase = subrepoutil.newcommitphase(repo.ui, ctx) |
|
102 | 102 | if targetphase: |
|
103 | 103 | # retract boundary do not alter parent changeset. |
|
104 | 104 | # if a parent have higher the resulting phase will |
|
105 | 105 | # be compliant anyway |
|
106 | 106 | # |
|
107 | 107 | # if minimal phase was 0 we don't need to retract anything |
|
108 | phases.registernew(repo, tr, targetphase, [n]) | |
|
108 | phases.registernew(repo, tr, targetphase, [repo[n].rev()]) | |
|
109 | 109 | return n |
|
110 | 110 | |
|
111 | 111 | |
|
112 | 112 | def _prepare_files(tr, ctx, error=False, origctx=None): |
|
113 | 113 | repo = ctx.repo() |
|
114 | 114 | p1 = ctx.p1() |
|
115 | 115 | |
|
116 | 116 | writechangesetcopy, writefilecopymeta = _write_copy_meta(repo) |
|
117 | 117 | files = metadata.ChangingFiles() |
|
118 | 118 | ms = mergestate.mergestate.read(repo) |
|
119 | 119 | salvaged = _get_salvaged(repo, ms, ctx) |
|
120 | 120 | for s in salvaged: |
|
121 | 121 | files.mark_salvaged(s) |
|
122 | 122 | |
|
123 | 123 | if ctx.manifestnode(): |
|
124 | 124 | # reuse an existing manifest revision |
|
125 | 125 | repo.ui.debug(b'reusing known manifest\n') |
|
126 | 126 | mn = ctx.manifestnode() |
|
127 | 127 | files.update_touched(ctx.files()) |
|
128 | 128 | if writechangesetcopy: |
|
129 | 129 | files.update_added(ctx.filesadded()) |
|
130 | 130 | files.update_removed(ctx.filesremoved()) |
|
131 | 131 | elif not ctx.files(): |
|
132 | 132 | repo.ui.debug(b'reusing manifest from p1 (no file change)\n') |
|
133 | 133 | mn = p1.manifestnode() |
|
134 | 134 | else: |
|
135 | 135 | mn = _process_files(tr, ctx, ms, files, error=error) |
|
136 | 136 | |
|
137 | 137 | if origctx and origctx.manifestnode() == mn: |
|
138 | 138 | origfiles = origctx.files() |
|
139 | 139 | assert files.touched.issubset(origfiles) |
|
140 | 140 | files.update_touched(origfiles) |
|
141 | 141 | |
|
142 | 142 | if writechangesetcopy: |
|
143 | 143 | files.update_copies_from_p1(ctx.p1copies()) |
|
144 | 144 | files.update_copies_from_p2(ctx.p2copies()) |
|
145 | 145 | |
|
146 | 146 | return mn, files |
|
147 | 147 | |
|
148 | 148 | |
|
149 | 149 | def _get_salvaged(repo, ms, ctx): |
|
150 | 150 | """ returns a list of salvaged files |
|
151 | 151 | |
|
152 | 152 | returns empty list if config option which process salvaged files are |
|
153 | 153 | not enabled """ |
|
154 | 154 | salvaged = [] |
|
155 | 155 | copy_sd = repo.filecopiesmode == b'changeset-sidedata' |
|
156 | 156 | if copy_sd and len(ctx.parents()) > 1: |
|
157 | 157 | if ms.active(): |
|
158 | 158 | for fname in sorted(ms.allextras().keys()): |
|
159 | 159 | might_removed = ms.extras(fname).get(b'merge-removal-candidate') |
|
160 | 160 | if might_removed == b'yes': |
|
161 | 161 | if fname in ctx: |
|
162 | 162 | salvaged.append(fname) |
|
163 | 163 | return salvaged |
|
164 | 164 | |
|
165 | 165 | |
|
166 | 166 | def _process_files(tr, ctx, ms, files, error=False): |
|
167 | 167 | repo = ctx.repo() |
|
168 | 168 | p1 = ctx.p1() |
|
169 | 169 | p2 = ctx.p2() |
|
170 | 170 | |
|
171 | 171 | writechangesetcopy, writefilecopymeta = _write_copy_meta(repo) |
|
172 | 172 | |
|
173 | 173 | m1ctx = p1.manifestctx() |
|
174 | 174 | m2ctx = p2.manifestctx() |
|
175 | 175 | mctx = m1ctx.copy() |
|
176 | 176 | |
|
177 | 177 | m = mctx.read() |
|
178 | 178 | m1 = m1ctx.read() |
|
179 | 179 | m2 = m2ctx.read() |
|
180 | 180 | |
|
181 | 181 | # check in files |
|
182 | 182 | added = [] |
|
183 | 183 | removed = list(ctx.removed()) |
|
184 | 184 | linkrev = len(repo) |
|
185 | 185 | repo.ui.note(_(b"committing files:\n")) |
|
186 | 186 | uipathfn = scmutil.getuipathfn(repo) |
|
187 | 187 | for f in sorted(ctx.modified() + ctx.added()): |
|
188 | 188 | repo.ui.note(uipathfn(f) + b"\n") |
|
189 | 189 | try: |
|
190 | 190 | fctx = ctx[f] |
|
191 | 191 | if fctx is None: |
|
192 | 192 | removed.append(f) |
|
193 | 193 | else: |
|
194 | 194 | added.append(f) |
|
195 | 195 | m[f], is_touched = _filecommit( |
|
196 | 196 | repo, fctx, m1, m2, linkrev, tr, writefilecopymeta, ms |
|
197 | 197 | ) |
|
198 | 198 | if is_touched: |
|
199 | 199 | if is_touched == 'added': |
|
200 | 200 | files.mark_added(f) |
|
201 | 201 | elif is_touched == 'merged': |
|
202 | 202 | files.mark_merged(f) |
|
203 | 203 | else: |
|
204 | 204 | files.mark_touched(f) |
|
205 | 205 | m.setflag(f, fctx.flags()) |
|
206 | 206 | except OSError: |
|
207 | 207 | repo.ui.warn(_(b"trouble committing %s!\n") % uipathfn(f)) |
|
208 | 208 | raise |
|
209 | 209 | except IOError as inst: |
|
210 | 210 | errcode = getattr(inst, 'errno', errno.ENOENT) |
|
211 | 211 | if error or errcode and errcode != errno.ENOENT: |
|
212 | 212 | repo.ui.warn(_(b"trouble committing %s!\n") % uipathfn(f)) |
|
213 | 213 | raise |
|
214 | 214 | |
|
215 | 215 | # update manifest |
|
216 | 216 | removed = [f for f in removed if f in m1 or f in m2] |
|
217 | 217 | drop = sorted([f for f in removed if f in m]) |
|
218 | 218 | for f in drop: |
|
219 | 219 | del m[f] |
|
220 | 220 | if p2.rev() == nullrev: |
|
221 | 221 | files.update_removed(removed) |
|
222 | 222 | else: |
|
223 | 223 | rf = metadata.get_removal_filter(ctx, (p1, p2, m1, m2)) |
|
224 | 224 | for f in removed: |
|
225 | 225 | if not rf(f): |
|
226 | 226 | files.mark_removed(f) |
|
227 | 227 | |
|
228 | 228 | mn = _commit_manifest(tr, linkrev, ctx, mctx, m, files.touched, added, drop) |
|
229 | 229 | |
|
230 | 230 | return mn |
|
231 | 231 | |
|
232 | 232 | |
|
233 | 233 | def _filecommit( |
|
234 | 234 | repo, fctx, manifest1, manifest2, linkrev, tr, includecopymeta, ms, |
|
235 | 235 | ): |
|
236 | 236 | """ |
|
237 | 237 | commit an individual file as part of a larger transaction |
|
238 | 238 | |
|
239 | 239 | input: |
|
240 | 240 | |
|
241 | 241 | fctx: a file context with the content we are trying to commit |
|
242 | 242 | manifest1: manifest of changeset first parent |
|
243 | 243 | manifest2: manifest of changeset second parent |
|
244 | 244 | linkrev: revision number of the changeset being created |
|
245 | 245 | tr: current transation |
|
246 | 246 | includecopymeta: boolean, set to False to skip storing the copy data |
|
247 | 247 | (only used by the Google specific feature of using |
|
248 | 248 | changeset extra as copy source of truth). |
|
249 | 249 | ms: mergestate object |
|
250 | 250 | |
|
251 | 251 | output: (filenode, touched) |
|
252 | 252 | |
|
253 | 253 | filenode: the filenode that should be used by this changeset |
|
254 | 254 | touched: one of: None (mean untouched), 'added' or 'modified' |
|
255 | 255 | """ |
|
256 | 256 | |
|
257 | 257 | fname = fctx.path() |
|
258 | 258 | fparent1 = manifest1.get(fname, nullid) |
|
259 | 259 | fparent2 = manifest2.get(fname, nullid) |
|
260 | 260 | touched = None |
|
261 | 261 | if fparent1 == fparent2 == nullid: |
|
262 | 262 | touched = 'added' |
|
263 | 263 | |
|
264 | 264 | if isinstance(fctx, context.filectx): |
|
265 | 265 | # This block fast path most comparisons which are usually done. It |
|
266 | 266 | # assumes that bare filectx is used and no merge happened, hence no |
|
267 | 267 | # need to create a new file revision in this case. |
|
268 | 268 | node = fctx.filenode() |
|
269 | 269 | if node in [fparent1, fparent2]: |
|
270 | 270 | repo.ui.debug(b'reusing %s filelog entry\n' % fname) |
|
271 | 271 | if ( |
|
272 | 272 | fparent1 != nullid and manifest1.flags(fname) != fctx.flags() |
|
273 | 273 | ) or ( |
|
274 | 274 | fparent2 != nullid and manifest2.flags(fname) != fctx.flags() |
|
275 | 275 | ): |
|
276 | 276 | touched = 'modified' |
|
277 | 277 | return node, touched |
|
278 | 278 | |
|
279 | 279 | flog = repo.file(fname) |
|
280 | 280 | meta = {} |
|
281 | 281 | cfname = fctx.copysource() |
|
282 | 282 | fnode = None |
|
283 | 283 | |
|
284 | 284 | if cfname and cfname != fname: |
|
285 | 285 | # Mark the new revision of this file as a copy of another |
|
286 | 286 | # file. This copy data will effectively act as a parent |
|
287 | 287 | # of this new revision. If this is a merge, the first |
|
288 | 288 | # parent will be the nullid (meaning "look up the copy data") |
|
289 | 289 | # and the second one will be the other parent. For example: |
|
290 | 290 | # |
|
291 | 291 | # 0 --- 1 --- 3 rev1 changes file foo |
|
292 | 292 | # \ / rev2 renames foo to bar and changes it |
|
293 | 293 | # \- 2 -/ rev3 should have bar with all changes and |
|
294 | 294 | # should record that bar descends from |
|
295 | 295 | # bar in rev2 and foo in rev1 |
|
296 | 296 | # |
|
297 | 297 | # this allows this merge to succeed: |
|
298 | 298 | # |
|
299 | 299 | # 0 --- 1 --- 3 rev4 reverts the content change from rev2 |
|
300 | 300 | # \ / merging rev3 and rev4 should use bar@rev2 |
|
301 | 301 | # \- 2 --- 4 as the merge base |
|
302 | 302 | # |
|
303 | 303 | |
|
304 | 304 | cnode = manifest1.get(cfname) |
|
305 | 305 | newfparent = fparent2 |
|
306 | 306 | |
|
307 | 307 | if manifest2: # branch merge |
|
308 | 308 | if fparent2 == nullid or cnode is None: # copied on remote side |
|
309 | 309 | if cfname in manifest2: |
|
310 | 310 | cnode = manifest2[cfname] |
|
311 | 311 | newfparent = fparent1 |
|
312 | 312 | |
|
313 | 313 | # Here, we used to search backwards through history to try to find |
|
314 | 314 | # where the file copy came from if the source of a copy was not in |
|
315 | 315 | # the parent directory. However, this doesn't actually make sense to |
|
316 | 316 | # do (what does a copy from something not in your working copy even |
|
317 | 317 | # mean?) and it causes bugs (eg, issue4476). Instead, we will warn |
|
318 | 318 | # the user that copy information was dropped, so if they didn't |
|
319 | 319 | # expect this outcome it can be fixed, but this is the correct |
|
320 | 320 | # behavior in this circumstance. |
|
321 | 321 | |
|
322 | 322 | if cnode: |
|
323 | 323 | repo.ui.debug(b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode))) |
|
324 | 324 | if includecopymeta: |
|
325 | 325 | meta[b"copy"] = cfname |
|
326 | 326 | meta[b"copyrev"] = hex(cnode) |
|
327 | 327 | fparent1, fparent2 = nullid, newfparent |
|
328 | 328 | else: |
|
329 | 329 | repo.ui.warn( |
|
330 | 330 | _( |
|
331 | 331 | b"warning: can't find ancestor for '%s' " |
|
332 | 332 | b"copied from '%s'!\n" |
|
333 | 333 | ) |
|
334 | 334 | % (fname, cfname) |
|
335 | 335 | ) |
|
336 | 336 | |
|
337 | 337 | elif fparent1 == nullid: |
|
338 | 338 | fparent1, fparent2 = fparent2, nullid |
|
339 | 339 | elif fparent2 != nullid: |
|
340 | 340 | # is one parent an ancestor of the other? |
|
341 | 341 | fparentancestors = flog.commonancestorsheads(fparent1, fparent2) |
|
342 | 342 | if fparent1 in fparentancestors: |
|
343 | 343 | fparent1, fparent2 = fparent2, nullid |
|
344 | 344 | elif fparent2 in fparentancestors: |
|
345 | 345 | fparent2 = nullid |
|
346 | 346 | elif not fparentancestors: |
|
347 | 347 | # TODO: this whole if-else might be simplified much more |
|
348 | 348 | if ( |
|
349 | 349 | ms.active() |
|
350 | 350 | and ms.extras(fname).get(b'filenode-source') == b'other' |
|
351 | 351 | ): |
|
352 | 352 | fparent1, fparent2 = fparent2, nullid |
|
353 | 353 | |
|
354 | 354 | force_new_node = False |
|
355 | 355 | # The file might have been deleted by merge code and user explicitly choose |
|
356 | 356 | # to revert the file and keep it. The other case can be where there is |
|
357 | 357 | # change-delete or delete-change conflict and user explicitly choose to keep |
|
358 | 358 | # the file. The goal is to create a new filenode for users explicit choices |
|
359 | 359 | if ( |
|
360 | 360 | repo.ui.configbool(b'experimental', b'merge-track-salvaged') |
|
361 | 361 | and ms.active() |
|
362 | 362 | and ms.extras(fname).get(b'merge-removal-candidate') == b'yes' |
|
363 | 363 | ): |
|
364 | 364 | force_new_node = True |
|
365 | 365 | # is the file changed? |
|
366 | 366 | text = fctx.data() |
|
367 | 367 | if fparent2 != nullid or meta or flog.cmp(fparent1, text) or force_new_node: |
|
368 | 368 | if touched is None: # do not overwrite added |
|
369 | 369 | if fparent2 == nullid: |
|
370 | 370 | touched = 'modified' |
|
371 | 371 | else: |
|
372 | 372 | touched = 'merged' |
|
373 | 373 | fnode = flog.add(text, meta, tr, linkrev, fparent1, fparent2) |
|
374 | 374 | # are just the flags changed during merge? |
|
375 | 375 | elif fname in manifest1 and manifest1.flags(fname) != fctx.flags(): |
|
376 | 376 | touched = 'modified' |
|
377 | 377 | fnode = fparent1 |
|
378 | 378 | else: |
|
379 | 379 | fnode = fparent1 |
|
380 | 380 | return fnode, touched |
|
381 | 381 | |
|
382 | 382 | |
|
383 | 383 | def _commit_manifest(tr, linkrev, ctx, mctx, manifest, files, added, drop): |
|
384 | 384 | """make a new manifest entry (or reuse a new one) |
|
385 | 385 | |
|
386 | 386 | given an initialised manifest context and precomputed list of |
|
387 | 387 | - files: files affected by the commit |
|
388 | 388 | - added: new entries in the manifest |
|
389 | 389 | - drop: entries present in parents but absent of this one |
|
390 | 390 | |
|
391 | 391 | Create a new manifest revision, reuse existing ones if possible. |
|
392 | 392 | |
|
393 | 393 | Return the nodeid of the manifest revision. |
|
394 | 394 | """ |
|
395 | 395 | repo = ctx.repo() |
|
396 | 396 | |
|
397 | 397 | md = None |
|
398 | 398 | |
|
399 | 399 | # all this is cached, so it is find to get them all from the ctx. |
|
400 | 400 | p1 = ctx.p1() |
|
401 | 401 | p2 = ctx.p2() |
|
402 | 402 | m1ctx = p1.manifestctx() |
|
403 | 403 | |
|
404 | 404 | m1 = m1ctx.read() |
|
405 | 405 | |
|
406 | 406 | if not files: |
|
407 | 407 | # if no "files" actually changed in terms of the changelog, |
|
408 | 408 | # try hard to detect unmodified manifest entry so that the |
|
409 | 409 | # exact same commit can be reproduced later on convert. |
|
410 | 410 | md = m1.diff(manifest, scmutil.matchfiles(repo, ctx.files())) |
|
411 | 411 | if not files and md: |
|
412 | 412 | repo.ui.debug( |
|
413 | 413 | b'not reusing manifest (no file change in ' |
|
414 | 414 | b'changelog, but manifest differs)\n' |
|
415 | 415 | ) |
|
416 | 416 | if files or md: |
|
417 | 417 | repo.ui.note(_(b"committing manifest\n")) |
|
418 | 418 | # we're using narrowmatch here since it's already applied at |
|
419 | 419 | # other stages (such as dirstate.walk), so we're already |
|
420 | 420 | # ignoring things outside of narrowspec in most cases. The |
|
421 | 421 | # one case where we might have files outside the narrowspec |
|
422 | 422 | # at this point is merges, and we already error out in the |
|
423 | 423 | # case where the merge has files outside of the narrowspec, |
|
424 | 424 | # so this is safe. |
|
425 | 425 | mn = mctx.write( |
|
426 | 426 | tr, |
|
427 | 427 | linkrev, |
|
428 | 428 | p1.manifestnode(), |
|
429 | 429 | p2.manifestnode(), |
|
430 | 430 | added, |
|
431 | 431 | drop, |
|
432 | 432 | match=repo.narrowmatch(), |
|
433 | 433 | ) |
|
434 | 434 | else: |
|
435 | 435 | repo.ui.debug( |
|
436 | 436 | b'reusing manifest from p1 (listed files ' b'actually unchanged)\n' |
|
437 | 437 | ) |
|
438 | 438 | mn = p1.manifestnode() |
|
439 | 439 | |
|
440 | 440 | return mn |
|
441 | 441 | |
|
442 | 442 | |
|
443 | 443 | def _extra_with_copies(repo, extra, files): |
|
444 | 444 | """encode copy information into a `extra` dictionnary""" |
|
445 | 445 | p1copies = files.copied_from_p1 |
|
446 | 446 | p2copies = files.copied_from_p2 |
|
447 | 447 | filesadded = files.added |
|
448 | 448 | filesremoved = files.removed |
|
449 | 449 | files = sorted(files.touched) |
|
450 | 450 | if not _write_copy_meta(repo)[1]: |
|
451 | 451 | # If writing only to changeset extras, use None to indicate that |
|
452 | 452 | # no entry should be written. If writing to both, write an empty |
|
453 | 453 | # entry to prevent the reader from falling back to reading |
|
454 | 454 | # filelogs. |
|
455 | 455 | p1copies = p1copies or None |
|
456 | 456 | p2copies = p2copies or None |
|
457 | 457 | filesadded = filesadded or None |
|
458 | 458 | filesremoved = filesremoved or None |
|
459 | 459 | |
|
460 | 460 | extrasentries = p1copies, p2copies, filesadded, filesremoved |
|
461 | 461 | if extra is None and any(x is not None for x in extrasentries): |
|
462 | 462 | extra = {} |
|
463 | 463 | if p1copies is not None: |
|
464 | 464 | p1copies = metadata.encodecopies(files, p1copies) |
|
465 | 465 | extra[b'p1copies'] = p1copies |
|
466 | 466 | if p2copies is not None: |
|
467 | 467 | p2copies = metadata.encodecopies(files, p2copies) |
|
468 | 468 | extra[b'p2copies'] = p2copies |
|
469 | 469 | if filesadded is not None: |
|
470 | 470 | filesadded = metadata.encodefileindices(files, filesadded) |
|
471 | 471 | extra[b'filesadded'] = filesadded |
|
472 | 472 | if filesremoved is not None: |
|
473 | 473 | filesremoved = metadata.encodefileindices(files, filesremoved) |
|
474 | 474 | extra[b'filesremoved'] = filesremoved |
|
475 | 475 | return extra |
@@ -1,784 +1,786 b'' | |||
|
1 | 1 | # exchangev2.py - repository exchange for wire protocol version 2 |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import collections |
|
11 | 11 | import weakref |
|
12 | 12 | |
|
13 | 13 | from .i18n import _ |
|
14 | 14 | from .node import ( |
|
15 | 15 | nullid, |
|
16 | 16 | short, |
|
17 | 17 | ) |
|
18 | 18 | from . import ( |
|
19 | 19 | bookmarks, |
|
20 | 20 | error, |
|
21 | 21 | mdiff, |
|
22 | 22 | narrowspec, |
|
23 | 23 | phases, |
|
24 | 24 | pycompat, |
|
25 | 25 | setdiscovery, |
|
26 | 26 | ) |
|
27 | 27 | from .interfaces import repository |
|
28 | 28 | |
|
29 | 29 | |
|
30 | 30 | def pull(pullop): |
|
31 | 31 | """Pull using wire protocol version 2.""" |
|
32 | 32 | repo = pullop.repo |
|
33 | 33 | remote = pullop.remote |
|
34 | 34 | |
|
35 | 35 | usingrawchangelogandmanifest = _checkuserawstorefiledata(pullop) |
|
36 | 36 | |
|
37 | 37 | # If this is a clone and it was requested to perform a "stream clone", |
|
38 | 38 | # we obtain the raw files data from the remote then fall back to an |
|
39 | 39 | # incremental pull. This is somewhat hacky and is not nearly robust enough |
|
40 | 40 | # for long-term usage. |
|
41 | 41 | if usingrawchangelogandmanifest: |
|
42 | 42 | with repo.transaction(b'clone'): |
|
43 | 43 | _fetchrawstorefiles(repo, remote) |
|
44 | 44 | repo.invalidate(clearfilecache=True) |
|
45 | 45 | |
|
46 | 46 | tr = pullop.trmanager.transaction() |
|
47 | 47 | |
|
48 | 48 | # We don't use the repo's narrow matcher here because the patterns passed |
|
49 | 49 | # to exchange.pull() could be different. |
|
50 | 50 | narrowmatcher = narrowspec.match( |
|
51 | 51 | repo.root, |
|
52 | 52 | # Empty maps to nevermatcher. So always |
|
53 | 53 | # set includes if missing. |
|
54 | 54 | pullop.includepats or {b'path:.'}, |
|
55 | 55 | pullop.excludepats, |
|
56 | 56 | ) |
|
57 | 57 | |
|
58 | 58 | if pullop.includepats or pullop.excludepats: |
|
59 | 59 | pathfilter = {} |
|
60 | 60 | if pullop.includepats: |
|
61 | 61 | pathfilter[b'include'] = sorted(pullop.includepats) |
|
62 | 62 | if pullop.excludepats: |
|
63 | 63 | pathfilter[b'exclude'] = sorted(pullop.excludepats) |
|
64 | 64 | else: |
|
65 | 65 | pathfilter = None |
|
66 | 66 | |
|
67 | 67 | # Figure out what needs to be fetched. |
|
68 | 68 | common, fetch, remoteheads = _pullchangesetdiscovery( |
|
69 | 69 | repo, remote, pullop.heads, abortwhenunrelated=pullop.force |
|
70 | 70 | ) |
|
71 | 71 | |
|
72 | 72 | # And fetch the data. |
|
73 | 73 | pullheads = pullop.heads or remoteheads |
|
74 | 74 | csetres = _fetchchangesets(repo, tr, remote, common, fetch, pullheads) |
|
75 | 75 | |
|
76 | 76 | # New revisions are written to the changelog. But all other updates |
|
77 | 77 | # are deferred. Do those now. |
|
78 | 78 | |
|
79 | 79 | # Ensure all new changesets are draft by default. If the repo is |
|
80 | 80 | # publishing, the phase will be adjusted by the loop below. |
|
81 | 81 | if csetres[b'added']: |
|
82 | phases.registernew(repo, tr, phases.draft, csetres[b'added']) | |
|
82 | phases.registernew( | |
|
83 | repo, tr, phases.draft, [repo[n].rev() for n in csetres[b'added']] | |
|
84 | ) | |
|
83 | 85 | |
|
84 | 86 | # And adjust the phase of all changesets accordingly. |
|
85 | 87 | for phasenumber, phase in phases.phasenames.items(): |
|
86 | 88 | if phase == b'secret' or not csetres[b'nodesbyphase'][phase]: |
|
87 | 89 | continue |
|
88 | 90 | |
|
89 | 91 | phases.advanceboundary( |
|
90 | 92 | repo, tr, phasenumber, csetres[b'nodesbyphase'][phase], |
|
91 | 93 | ) |
|
92 | 94 | |
|
93 | 95 | # Write bookmark updates. |
|
94 | 96 | bookmarks.updatefromremote( |
|
95 | 97 | repo.ui, |
|
96 | 98 | repo, |
|
97 | 99 | csetres[b'bookmarks'], |
|
98 | 100 | remote.url(), |
|
99 | 101 | pullop.gettransaction, |
|
100 | 102 | explicit=pullop.explicitbookmarks, |
|
101 | 103 | ) |
|
102 | 104 | |
|
103 | 105 | manres = _fetchmanifests(repo, tr, remote, csetres[b'manifestnodes']) |
|
104 | 106 | |
|
105 | 107 | # We don't properly support shallow changeset and manifest yet. So we apply |
|
106 | 108 | # depth limiting locally. |
|
107 | 109 | if pullop.depth: |
|
108 | 110 | relevantcsetnodes = set() |
|
109 | 111 | clnode = repo.changelog.node |
|
110 | 112 | |
|
111 | 113 | for rev in repo.revs( |
|
112 | 114 | b'ancestors(%ln, %s)', pullheads, pullop.depth - 1 |
|
113 | 115 | ): |
|
114 | 116 | relevantcsetnodes.add(clnode(rev)) |
|
115 | 117 | |
|
116 | 118 | csetrelevantfilter = lambda n: n in relevantcsetnodes |
|
117 | 119 | |
|
118 | 120 | else: |
|
119 | 121 | csetrelevantfilter = lambda n: True |
|
120 | 122 | |
|
121 | 123 | # If obtaining the raw store files, we need to scan the full repo to |
|
122 | 124 | # derive all the changesets, manifests, and linkrevs. |
|
123 | 125 | if usingrawchangelogandmanifest: |
|
124 | 126 | csetsforfiles = [] |
|
125 | 127 | mnodesforfiles = [] |
|
126 | 128 | manifestlinkrevs = {} |
|
127 | 129 | |
|
128 | 130 | for rev in repo: |
|
129 | 131 | ctx = repo[rev] |
|
130 | 132 | node = ctx.node() |
|
131 | 133 | |
|
132 | 134 | if not csetrelevantfilter(node): |
|
133 | 135 | continue |
|
134 | 136 | |
|
135 | 137 | mnode = ctx.manifestnode() |
|
136 | 138 | |
|
137 | 139 | csetsforfiles.append(node) |
|
138 | 140 | mnodesforfiles.append(mnode) |
|
139 | 141 | manifestlinkrevs[mnode] = rev |
|
140 | 142 | |
|
141 | 143 | else: |
|
142 | 144 | csetsforfiles = [n for n in csetres[b'added'] if csetrelevantfilter(n)] |
|
143 | 145 | mnodesforfiles = manres[b'added'] |
|
144 | 146 | manifestlinkrevs = manres[b'linkrevs'] |
|
145 | 147 | |
|
146 | 148 | # Find all file nodes referenced by added manifests and fetch those |
|
147 | 149 | # revisions. |
|
148 | 150 | fnodes = _derivefilesfrommanifests(repo, narrowmatcher, mnodesforfiles) |
|
149 | 151 | _fetchfilesfromcsets( |
|
150 | 152 | repo, |
|
151 | 153 | tr, |
|
152 | 154 | remote, |
|
153 | 155 | pathfilter, |
|
154 | 156 | fnodes, |
|
155 | 157 | csetsforfiles, |
|
156 | 158 | manifestlinkrevs, |
|
157 | 159 | shallow=bool(pullop.depth), |
|
158 | 160 | ) |
|
159 | 161 | |
|
160 | 162 | |
|
161 | 163 | def _checkuserawstorefiledata(pullop): |
|
162 | 164 | """Check whether we should use rawstorefiledata command to retrieve data.""" |
|
163 | 165 | |
|
164 | 166 | repo = pullop.repo |
|
165 | 167 | remote = pullop.remote |
|
166 | 168 | |
|
167 | 169 | # Command to obtain raw store data isn't available. |
|
168 | 170 | if b'rawstorefiledata' not in remote.apidescriptor[b'commands']: |
|
169 | 171 | return False |
|
170 | 172 | |
|
171 | 173 | # Only honor if user requested stream clone operation. |
|
172 | 174 | if not pullop.streamclonerequested: |
|
173 | 175 | return False |
|
174 | 176 | |
|
175 | 177 | # Only works on empty repos. |
|
176 | 178 | if len(repo): |
|
177 | 179 | return False |
|
178 | 180 | |
|
179 | 181 | # TODO This is super hacky. There needs to be a storage API for this. We |
|
180 | 182 | # also need to check for compatibility with the remote. |
|
181 | 183 | if b'revlogv1' not in repo.requirements: |
|
182 | 184 | return False |
|
183 | 185 | |
|
184 | 186 | return True |
|
185 | 187 | |
|
186 | 188 | |
|
187 | 189 | def _fetchrawstorefiles(repo, remote): |
|
188 | 190 | with remote.commandexecutor() as e: |
|
189 | 191 | objs = e.callcommand( |
|
190 | 192 | b'rawstorefiledata', {b'files': [b'changelog', b'manifestlog'],} |
|
191 | 193 | ).result() |
|
192 | 194 | |
|
193 | 195 | # First object is a summary of files data that follows. |
|
194 | 196 | overall = next(objs) |
|
195 | 197 | |
|
196 | 198 | progress = repo.ui.makeprogress( |
|
197 | 199 | _(b'clone'), total=overall[b'totalsize'], unit=_(b'bytes') |
|
198 | 200 | ) |
|
199 | 201 | with progress: |
|
200 | 202 | progress.update(0) |
|
201 | 203 | |
|
202 | 204 | # Next are pairs of file metadata, data. |
|
203 | 205 | while True: |
|
204 | 206 | try: |
|
205 | 207 | filemeta = next(objs) |
|
206 | 208 | except StopIteration: |
|
207 | 209 | break |
|
208 | 210 | |
|
209 | 211 | for k in (b'location', b'path', b'size'): |
|
210 | 212 | if k not in filemeta: |
|
211 | 213 | raise error.Abort( |
|
212 | 214 | _(b'remote file data missing key: %s') % k |
|
213 | 215 | ) |
|
214 | 216 | |
|
215 | 217 | if filemeta[b'location'] == b'store': |
|
216 | 218 | vfs = repo.svfs |
|
217 | 219 | else: |
|
218 | 220 | raise error.Abort( |
|
219 | 221 | _(b'invalid location for raw file data: %s') |
|
220 | 222 | % filemeta[b'location'] |
|
221 | 223 | ) |
|
222 | 224 | |
|
223 | 225 | bytesremaining = filemeta[b'size'] |
|
224 | 226 | |
|
225 | 227 | with vfs.open(filemeta[b'path'], b'wb') as fh: |
|
226 | 228 | while True: |
|
227 | 229 | try: |
|
228 | 230 | chunk = next(objs) |
|
229 | 231 | except StopIteration: |
|
230 | 232 | break |
|
231 | 233 | |
|
232 | 234 | bytesremaining -= len(chunk) |
|
233 | 235 | |
|
234 | 236 | if bytesremaining < 0: |
|
235 | 237 | raise error.Abort( |
|
236 | 238 | _( |
|
237 | 239 | b'received invalid number of bytes for file ' |
|
238 | 240 | b'data; expected %d, got extra' |
|
239 | 241 | ) |
|
240 | 242 | % filemeta[b'size'] |
|
241 | 243 | ) |
|
242 | 244 | |
|
243 | 245 | progress.increment(step=len(chunk)) |
|
244 | 246 | fh.write(chunk) |
|
245 | 247 | |
|
246 | 248 | try: |
|
247 | 249 | if chunk.islast: |
|
248 | 250 | break |
|
249 | 251 | except AttributeError: |
|
250 | 252 | raise error.Abort( |
|
251 | 253 | _( |
|
252 | 254 | b'did not receive indefinite length bytestring ' |
|
253 | 255 | b'for file data' |
|
254 | 256 | ) |
|
255 | 257 | ) |
|
256 | 258 | |
|
257 | 259 | if bytesremaining: |
|
258 | 260 | raise error.Abort( |
|
259 | 261 | _( |
|
260 | 262 | b'received invalid number of bytes for' |
|
261 | 263 | b'file data; expected %d got %d' |
|
262 | 264 | ) |
|
263 | 265 | % ( |
|
264 | 266 | filemeta[b'size'], |
|
265 | 267 | filemeta[b'size'] - bytesremaining, |
|
266 | 268 | ) |
|
267 | 269 | ) |
|
268 | 270 | |
|
269 | 271 | |
|
270 | 272 | def _pullchangesetdiscovery(repo, remote, heads, abortwhenunrelated=True): |
|
271 | 273 | """Determine which changesets need to be pulled.""" |
|
272 | 274 | |
|
273 | 275 | if heads: |
|
274 | 276 | knownnode = repo.changelog.hasnode |
|
275 | 277 | if all(knownnode(head) for head in heads): |
|
276 | 278 | return heads, False, heads |
|
277 | 279 | |
|
278 | 280 | # TODO wire protocol version 2 is capable of more efficient discovery |
|
279 | 281 | # than setdiscovery. Consider implementing something better. |
|
280 | 282 | common, fetch, remoteheads = setdiscovery.findcommonheads( |
|
281 | 283 | repo.ui, repo, remote, abortwhenunrelated=abortwhenunrelated |
|
282 | 284 | ) |
|
283 | 285 | |
|
284 | 286 | common = set(common) |
|
285 | 287 | remoteheads = set(remoteheads) |
|
286 | 288 | |
|
287 | 289 | # If a remote head is filtered locally, put it back in the common set. |
|
288 | 290 | # See the comment in exchange._pulldiscoverychangegroup() for more. |
|
289 | 291 | |
|
290 | 292 | if fetch and remoteheads: |
|
291 | 293 | has_node = repo.unfiltered().changelog.index.has_node |
|
292 | 294 | |
|
293 | 295 | common |= {head for head in remoteheads if has_node(head)} |
|
294 | 296 | |
|
295 | 297 | if set(remoteheads).issubset(common): |
|
296 | 298 | fetch = [] |
|
297 | 299 | |
|
298 | 300 | common.discard(nullid) |
|
299 | 301 | |
|
300 | 302 | return common, fetch, remoteheads |
|
301 | 303 | |
|
302 | 304 | |
|
303 | 305 | def _fetchchangesets(repo, tr, remote, common, fetch, remoteheads): |
|
304 | 306 | # TODO consider adding a step here where we obtain the DAG shape first |
|
305 | 307 | # (or ask the server to slice changesets into chunks for us) so that |
|
306 | 308 | # we can perform multiple fetches in batches. This will facilitate |
|
307 | 309 | # resuming interrupted clones, higher server-side cache hit rates due |
|
308 | 310 | # to smaller segments, etc. |
|
309 | 311 | with remote.commandexecutor() as e: |
|
310 | 312 | objs = e.callcommand( |
|
311 | 313 | b'changesetdata', |
|
312 | 314 | { |
|
313 | 315 | b'revisions': [ |
|
314 | 316 | { |
|
315 | 317 | b'type': b'changesetdagrange', |
|
316 | 318 | b'roots': sorted(common), |
|
317 | 319 | b'heads': sorted(remoteheads), |
|
318 | 320 | } |
|
319 | 321 | ], |
|
320 | 322 | b'fields': {b'bookmarks', b'parents', b'phase', b'revision'}, |
|
321 | 323 | }, |
|
322 | 324 | ).result() |
|
323 | 325 | |
|
324 | 326 | # The context manager waits on all response data when exiting. So |
|
325 | 327 | # we need to remain in the context manager in order to stream data. |
|
326 | 328 | return _processchangesetdata(repo, tr, objs) |
|
327 | 329 | |
|
328 | 330 | |
|
329 | 331 | def _processchangesetdata(repo, tr, objs): |
|
330 | 332 | repo.hook(b'prechangegroup', throw=True, **pycompat.strkwargs(tr.hookargs)) |
|
331 | 333 | |
|
332 | 334 | urepo = repo.unfiltered() |
|
333 | 335 | cl = urepo.changelog |
|
334 | 336 | |
|
335 | 337 | cl.delayupdate(tr) |
|
336 | 338 | |
|
337 | 339 | # The first emitted object is a header describing the data that |
|
338 | 340 | # follows. |
|
339 | 341 | meta = next(objs) |
|
340 | 342 | |
|
341 | 343 | progress = repo.ui.makeprogress( |
|
342 | 344 | _(b'changesets'), unit=_(b'chunks'), total=meta.get(b'totalitems') |
|
343 | 345 | ) |
|
344 | 346 | |
|
345 | 347 | manifestnodes = {} |
|
346 | 348 | added = [] |
|
347 | 349 | |
|
348 | 350 | def linkrev(node): |
|
349 | 351 | repo.ui.debug(b'add changeset %s\n' % short(node)) |
|
350 | 352 | # Linkrev for changelog is always self. |
|
351 | 353 | return len(cl) |
|
352 | 354 | |
|
353 | 355 | def ondupchangeset(cl, node): |
|
354 | 356 | added.append(node) |
|
355 | 357 | |
|
356 | 358 | def onchangeset(cl, node): |
|
357 | 359 | progress.increment() |
|
358 | 360 | |
|
359 | 361 | revision = cl.changelogrevision(node) |
|
360 | 362 | added.append(node) |
|
361 | 363 | |
|
362 | 364 | # We need to preserve the mapping of changelog revision to node |
|
363 | 365 | # so we can set the linkrev accordingly when manifests are added. |
|
364 | 366 | manifestnodes[cl.rev(node)] = revision.manifest |
|
365 | 367 | |
|
366 | 368 | nodesbyphase = {phase: set() for phase in phases.phasenames.values()} |
|
367 | 369 | remotebookmarks = {} |
|
368 | 370 | |
|
369 | 371 | # addgroup() expects a 7-tuple describing revisions. This normalizes |
|
370 | 372 | # the wire data to that format. |
|
371 | 373 | # |
|
372 | 374 | # This loop also aggregates non-revision metadata, such as phase |
|
373 | 375 | # data. |
|
374 | 376 | def iterrevisions(): |
|
375 | 377 | for cset in objs: |
|
376 | 378 | node = cset[b'node'] |
|
377 | 379 | |
|
378 | 380 | if b'phase' in cset: |
|
379 | 381 | nodesbyphase[cset[b'phase']].add(node) |
|
380 | 382 | |
|
381 | 383 | for mark in cset.get(b'bookmarks', []): |
|
382 | 384 | remotebookmarks[mark] = node |
|
383 | 385 | |
|
384 | 386 | # TODO add mechanism for extensions to examine records so they |
|
385 | 387 | # can siphon off custom data fields. |
|
386 | 388 | |
|
387 | 389 | extrafields = {} |
|
388 | 390 | |
|
389 | 391 | for field, size in cset.get(b'fieldsfollowing', []): |
|
390 | 392 | extrafields[field] = next(objs) |
|
391 | 393 | |
|
392 | 394 | # Some entries might only be metadata only updates. |
|
393 | 395 | if b'revision' not in extrafields: |
|
394 | 396 | continue |
|
395 | 397 | |
|
396 | 398 | data = extrafields[b'revision'] |
|
397 | 399 | |
|
398 | 400 | yield ( |
|
399 | 401 | node, |
|
400 | 402 | cset[b'parents'][0], |
|
401 | 403 | cset[b'parents'][1], |
|
402 | 404 | # Linknode is always itself for changesets. |
|
403 | 405 | cset[b'node'], |
|
404 | 406 | # We always send full revisions. So delta base is not set. |
|
405 | 407 | nullid, |
|
406 | 408 | mdiff.trivialdiffheader(len(data)) + data, |
|
407 | 409 | # Flags not yet supported. |
|
408 | 410 | 0, |
|
409 | 411 | ) |
|
410 | 412 | |
|
411 | 413 | cl.addgroup( |
|
412 | 414 | iterrevisions(), |
|
413 | 415 | linkrev, |
|
414 | 416 | weakref.proxy(tr), |
|
415 | 417 | addrevisioncb=onchangeset, |
|
416 | 418 | duplicaterevisioncb=ondupchangeset, |
|
417 | 419 | ) |
|
418 | 420 | |
|
419 | 421 | progress.complete() |
|
420 | 422 | |
|
421 | 423 | return { |
|
422 | 424 | b'added': added, |
|
423 | 425 | b'nodesbyphase': nodesbyphase, |
|
424 | 426 | b'bookmarks': remotebookmarks, |
|
425 | 427 | b'manifestnodes': manifestnodes, |
|
426 | 428 | } |
|
427 | 429 | |
|
428 | 430 | |
|
429 | 431 | def _fetchmanifests(repo, tr, remote, manifestnodes): |
|
430 | 432 | rootmanifest = repo.manifestlog.getstorage(b'') |
|
431 | 433 | |
|
432 | 434 | # Some manifests can be shared between changesets. Filter out revisions |
|
433 | 435 | # we already know about. |
|
434 | 436 | fetchnodes = [] |
|
435 | 437 | linkrevs = {} |
|
436 | 438 | seen = set() |
|
437 | 439 | |
|
438 | 440 | for clrev, node in sorted(pycompat.iteritems(manifestnodes)): |
|
439 | 441 | if node in seen: |
|
440 | 442 | continue |
|
441 | 443 | |
|
442 | 444 | try: |
|
443 | 445 | rootmanifest.rev(node) |
|
444 | 446 | except error.LookupError: |
|
445 | 447 | fetchnodes.append(node) |
|
446 | 448 | linkrevs[node] = clrev |
|
447 | 449 | |
|
448 | 450 | seen.add(node) |
|
449 | 451 | |
|
450 | 452 | # TODO handle tree manifests |
|
451 | 453 | |
|
452 | 454 | # addgroup() expects 7-tuple describing revisions. This normalizes |
|
453 | 455 | # the wire data to that format. |
|
454 | 456 | def iterrevisions(objs, progress): |
|
455 | 457 | for manifest in objs: |
|
456 | 458 | node = manifest[b'node'] |
|
457 | 459 | |
|
458 | 460 | extrafields = {} |
|
459 | 461 | |
|
460 | 462 | for field, size in manifest.get(b'fieldsfollowing', []): |
|
461 | 463 | extrafields[field] = next(objs) |
|
462 | 464 | |
|
463 | 465 | if b'delta' in extrafields: |
|
464 | 466 | basenode = manifest[b'deltabasenode'] |
|
465 | 467 | delta = extrafields[b'delta'] |
|
466 | 468 | elif b'revision' in extrafields: |
|
467 | 469 | basenode = nullid |
|
468 | 470 | revision = extrafields[b'revision'] |
|
469 | 471 | delta = mdiff.trivialdiffheader(len(revision)) + revision |
|
470 | 472 | else: |
|
471 | 473 | continue |
|
472 | 474 | |
|
473 | 475 | yield ( |
|
474 | 476 | node, |
|
475 | 477 | manifest[b'parents'][0], |
|
476 | 478 | manifest[b'parents'][1], |
|
477 | 479 | # The value passed in is passed to the lookup function passed |
|
478 | 480 | # to addgroup(). We already have a map of manifest node to |
|
479 | 481 | # changelog revision number. So we just pass in the |
|
480 | 482 | # manifest node here and use linkrevs.__getitem__ as the |
|
481 | 483 | # resolution function. |
|
482 | 484 | node, |
|
483 | 485 | basenode, |
|
484 | 486 | delta, |
|
485 | 487 | # Flags not yet supported. |
|
486 | 488 | 0, |
|
487 | 489 | ) |
|
488 | 490 | |
|
489 | 491 | progress.increment() |
|
490 | 492 | |
|
491 | 493 | progress = repo.ui.makeprogress( |
|
492 | 494 | _(b'manifests'), unit=_(b'chunks'), total=len(fetchnodes) |
|
493 | 495 | ) |
|
494 | 496 | |
|
495 | 497 | commandmeta = remote.apidescriptor[b'commands'][b'manifestdata'] |
|
496 | 498 | batchsize = commandmeta.get(b'recommendedbatchsize', 10000) |
|
497 | 499 | # TODO make size configurable on client? |
|
498 | 500 | |
|
499 | 501 | # We send commands 1 at a time to the remote. This is not the most |
|
500 | 502 | # efficient because we incur a round trip at the end of each batch. |
|
501 | 503 | # However, the existing frame-based reactor keeps consuming server |
|
502 | 504 | # data in the background. And this results in response data buffering |
|
503 | 505 | # in memory. This can consume gigabytes of memory. |
|
504 | 506 | # TODO send multiple commands in a request once background buffering |
|
505 | 507 | # issues are resolved. |
|
506 | 508 | |
|
507 | 509 | added = [] |
|
508 | 510 | |
|
509 | 511 | for i in pycompat.xrange(0, len(fetchnodes), batchsize): |
|
510 | 512 | batch = [node for node in fetchnodes[i : i + batchsize]] |
|
511 | 513 | if not batch: |
|
512 | 514 | continue |
|
513 | 515 | |
|
514 | 516 | with remote.commandexecutor() as e: |
|
515 | 517 | objs = e.callcommand( |
|
516 | 518 | b'manifestdata', |
|
517 | 519 | { |
|
518 | 520 | b'tree': b'', |
|
519 | 521 | b'nodes': batch, |
|
520 | 522 | b'fields': {b'parents', b'revision'}, |
|
521 | 523 | b'haveparents': True, |
|
522 | 524 | }, |
|
523 | 525 | ).result() |
|
524 | 526 | |
|
525 | 527 | # Chomp off header object. |
|
526 | 528 | next(objs) |
|
527 | 529 | |
|
528 | 530 | def onchangeset(cl, node): |
|
529 | 531 | added.append(node) |
|
530 | 532 | |
|
531 | 533 | rootmanifest.addgroup( |
|
532 | 534 | iterrevisions(objs, progress), |
|
533 | 535 | linkrevs.__getitem__, |
|
534 | 536 | weakref.proxy(tr), |
|
535 | 537 | addrevisioncb=onchangeset, |
|
536 | 538 | duplicaterevisioncb=onchangeset, |
|
537 | 539 | ) |
|
538 | 540 | |
|
539 | 541 | progress.complete() |
|
540 | 542 | |
|
541 | 543 | return { |
|
542 | 544 | b'added': added, |
|
543 | 545 | b'linkrevs': linkrevs, |
|
544 | 546 | } |
|
545 | 547 | |
|
546 | 548 | |
|
547 | 549 | def _derivefilesfrommanifests(repo, matcher, manifestnodes): |
|
548 | 550 | """Determine what file nodes are relevant given a set of manifest nodes. |
|
549 | 551 | |
|
550 | 552 | Returns a dict mapping file paths to dicts of file node to first manifest |
|
551 | 553 | node. |
|
552 | 554 | """ |
|
553 | 555 | ml = repo.manifestlog |
|
554 | 556 | fnodes = collections.defaultdict(dict) |
|
555 | 557 | |
|
556 | 558 | progress = repo.ui.makeprogress( |
|
557 | 559 | _(b'scanning manifests'), total=len(manifestnodes) |
|
558 | 560 | ) |
|
559 | 561 | |
|
560 | 562 | with progress: |
|
561 | 563 | for manifestnode in manifestnodes: |
|
562 | 564 | m = ml.get(b'', manifestnode) |
|
563 | 565 | |
|
564 | 566 | # TODO this will pull in unwanted nodes because it takes the storage |
|
565 | 567 | # delta into consideration. What we really want is something that |
|
566 | 568 | # takes the delta between the manifest's parents. And ideally we |
|
567 | 569 | # would ignore file nodes that are known locally. For now, ignore |
|
568 | 570 | # both these limitations. This will result in incremental fetches |
|
569 | 571 | # requesting data we already have. So this is far from ideal. |
|
570 | 572 | md = m.readfast() |
|
571 | 573 | |
|
572 | 574 | for path, fnode in md.items(): |
|
573 | 575 | if matcher(path): |
|
574 | 576 | fnodes[path].setdefault(fnode, manifestnode) |
|
575 | 577 | |
|
576 | 578 | progress.increment() |
|
577 | 579 | |
|
578 | 580 | return fnodes |
|
579 | 581 | |
|
580 | 582 | |
|
581 | 583 | def _fetchfiles(repo, tr, remote, fnodes, linkrevs): |
|
582 | 584 | """Fetch file data from explicit file revisions.""" |
|
583 | 585 | |
|
584 | 586 | def iterrevisions(objs, progress): |
|
585 | 587 | for filerevision in objs: |
|
586 | 588 | node = filerevision[b'node'] |
|
587 | 589 | |
|
588 | 590 | extrafields = {} |
|
589 | 591 | |
|
590 | 592 | for field, size in filerevision.get(b'fieldsfollowing', []): |
|
591 | 593 | extrafields[field] = next(objs) |
|
592 | 594 | |
|
593 | 595 | if b'delta' in extrafields: |
|
594 | 596 | basenode = filerevision[b'deltabasenode'] |
|
595 | 597 | delta = extrafields[b'delta'] |
|
596 | 598 | elif b'revision' in extrafields: |
|
597 | 599 | basenode = nullid |
|
598 | 600 | revision = extrafields[b'revision'] |
|
599 | 601 | delta = mdiff.trivialdiffheader(len(revision)) + revision |
|
600 | 602 | else: |
|
601 | 603 | continue |
|
602 | 604 | |
|
603 | 605 | yield ( |
|
604 | 606 | node, |
|
605 | 607 | filerevision[b'parents'][0], |
|
606 | 608 | filerevision[b'parents'][1], |
|
607 | 609 | node, |
|
608 | 610 | basenode, |
|
609 | 611 | delta, |
|
610 | 612 | # Flags not yet supported. |
|
611 | 613 | 0, |
|
612 | 614 | ) |
|
613 | 615 | |
|
614 | 616 | progress.increment() |
|
615 | 617 | |
|
616 | 618 | progress = repo.ui.makeprogress( |
|
617 | 619 | _(b'files'), |
|
618 | 620 | unit=_(b'chunks'), |
|
619 | 621 | total=sum(len(v) for v in pycompat.itervalues(fnodes)), |
|
620 | 622 | ) |
|
621 | 623 | |
|
622 | 624 | # TODO make batch size configurable |
|
623 | 625 | batchsize = 10000 |
|
624 | 626 | fnodeslist = [x for x in sorted(fnodes.items())] |
|
625 | 627 | |
|
626 | 628 | for i in pycompat.xrange(0, len(fnodeslist), batchsize): |
|
627 | 629 | batch = [x for x in fnodeslist[i : i + batchsize]] |
|
628 | 630 | if not batch: |
|
629 | 631 | continue |
|
630 | 632 | |
|
631 | 633 | with remote.commandexecutor() as e: |
|
632 | 634 | fs = [] |
|
633 | 635 | locallinkrevs = {} |
|
634 | 636 | |
|
635 | 637 | for path, nodes in batch: |
|
636 | 638 | fs.append( |
|
637 | 639 | ( |
|
638 | 640 | path, |
|
639 | 641 | e.callcommand( |
|
640 | 642 | b'filedata', |
|
641 | 643 | { |
|
642 | 644 | b'path': path, |
|
643 | 645 | b'nodes': sorted(nodes), |
|
644 | 646 | b'fields': {b'parents', b'revision'}, |
|
645 | 647 | b'haveparents': True, |
|
646 | 648 | }, |
|
647 | 649 | ), |
|
648 | 650 | ) |
|
649 | 651 | ) |
|
650 | 652 | |
|
651 | 653 | locallinkrevs[path] = { |
|
652 | 654 | node: linkrevs[manifestnode] |
|
653 | 655 | for node, manifestnode in pycompat.iteritems(nodes) |
|
654 | 656 | } |
|
655 | 657 | |
|
656 | 658 | for path, f in fs: |
|
657 | 659 | objs = f.result() |
|
658 | 660 | |
|
659 | 661 | # Chomp off header objects. |
|
660 | 662 | next(objs) |
|
661 | 663 | |
|
662 | 664 | store = repo.file(path) |
|
663 | 665 | store.addgroup( |
|
664 | 666 | iterrevisions(objs, progress), |
|
665 | 667 | locallinkrevs[path].__getitem__, |
|
666 | 668 | weakref.proxy(tr), |
|
667 | 669 | ) |
|
668 | 670 | |
|
669 | 671 | |
|
670 | 672 | def _fetchfilesfromcsets( |
|
671 | 673 | repo, tr, remote, pathfilter, fnodes, csets, manlinkrevs, shallow=False |
|
672 | 674 | ): |
|
673 | 675 | """Fetch file data from explicit changeset revisions.""" |
|
674 | 676 | |
|
675 | 677 | def iterrevisions(objs, remaining, progress): |
|
676 | 678 | while remaining: |
|
677 | 679 | filerevision = next(objs) |
|
678 | 680 | |
|
679 | 681 | node = filerevision[b'node'] |
|
680 | 682 | |
|
681 | 683 | extrafields = {} |
|
682 | 684 | |
|
683 | 685 | for field, size in filerevision.get(b'fieldsfollowing', []): |
|
684 | 686 | extrafields[field] = next(objs) |
|
685 | 687 | |
|
686 | 688 | if b'delta' in extrafields: |
|
687 | 689 | basenode = filerevision[b'deltabasenode'] |
|
688 | 690 | delta = extrafields[b'delta'] |
|
689 | 691 | elif b'revision' in extrafields: |
|
690 | 692 | basenode = nullid |
|
691 | 693 | revision = extrafields[b'revision'] |
|
692 | 694 | delta = mdiff.trivialdiffheader(len(revision)) + revision |
|
693 | 695 | else: |
|
694 | 696 | continue |
|
695 | 697 | |
|
696 | 698 | if b'linknode' in filerevision: |
|
697 | 699 | linknode = filerevision[b'linknode'] |
|
698 | 700 | else: |
|
699 | 701 | linknode = node |
|
700 | 702 | |
|
701 | 703 | yield ( |
|
702 | 704 | node, |
|
703 | 705 | filerevision[b'parents'][0], |
|
704 | 706 | filerevision[b'parents'][1], |
|
705 | 707 | linknode, |
|
706 | 708 | basenode, |
|
707 | 709 | delta, |
|
708 | 710 | # Flags not yet supported. |
|
709 | 711 | 0, |
|
710 | 712 | ) |
|
711 | 713 | |
|
712 | 714 | progress.increment() |
|
713 | 715 | remaining -= 1 |
|
714 | 716 | |
|
715 | 717 | progress = repo.ui.makeprogress( |
|
716 | 718 | _(b'files'), |
|
717 | 719 | unit=_(b'chunks'), |
|
718 | 720 | total=sum(len(v) for v in pycompat.itervalues(fnodes)), |
|
719 | 721 | ) |
|
720 | 722 | |
|
721 | 723 | commandmeta = remote.apidescriptor[b'commands'][b'filesdata'] |
|
722 | 724 | batchsize = commandmeta.get(b'recommendedbatchsize', 50000) |
|
723 | 725 | |
|
724 | 726 | shallowfiles = repository.REPO_FEATURE_SHALLOW_FILE_STORAGE in repo.features |
|
725 | 727 | fields = {b'parents', b'revision'} |
|
726 | 728 | clrev = repo.changelog.rev |
|
727 | 729 | |
|
728 | 730 | # There are no guarantees that we'll have ancestor revisions if |
|
729 | 731 | # a) this repo has shallow file storage b) shallow data fetching is enabled. |
|
730 | 732 | # Force remote to not delta against possibly unknown revisions when these |
|
731 | 733 | # conditions hold. |
|
732 | 734 | haveparents = not (shallowfiles or shallow) |
|
733 | 735 | |
|
734 | 736 | # Similarly, we may not have calculated linkrevs for all incoming file |
|
735 | 737 | # revisions. Ask the remote to do work for us in this case. |
|
736 | 738 | if not haveparents: |
|
737 | 739 | fields.add(b'linknode') |
|
738 | 740 | |
|
739 | 741 | for i in pycompat.xrange(0, len(csets), batchsize): |
|
740 | 742 | batch = [x for x in csets[i : i + batchsize]] |
|
741 | 743 | if not batch: |
|
742 | 744 | continue |
|
743 | 745 | |
|
744 | 746 | with remote.commandexecutor() as e: |
|
745 | 747 | args = { |
|
746 | 748 | b'revisions': [ |
|
747 | 749 | {b'type': b'changesetexplicit', b'nodes': batch,} |
|
748 | 750 | ], |
|
749 | 751 | b'fields': fields, |
|
750 | 752 | b'haveparents': haveparents, |
|
751 | 753 | } |
|
752 | 754 | |
|
753 | 755 | if pathfilter: |
|
754 | 756 | args[b'pathfilter'] = pathfilter |
|
755 | 757 | |
|
756 | 758 | objs = e.callcommand(b'filesdata', args).result() |
|
757 | 759 | |
|
758 | 760 | # First object is an overall header. |
|
759 | 761 | overall = next(objs) |
|
760 | 762 | |
|
761 | 763 | # We have overall['totalpaths'] segments. |
|
762 | 764 | for i in pycompat.xrange(overall[b'totalpaths']): |
|
763 | 765 | header = next(objs) |
|
764 | 766 | |
|
765 | 767 | path = header[b'path'] |
|
766 | 768 | store = repo.file(path) |
|
767 | 769 | |
|
768 | 770 | linkrevs = { |
|
769 | 771 | fnode: manlinkrevs[mnode] |
|
770 | 772 | for fnode, mnode in pycompat.iteritems(fnodes[path]) |
|
771 | 773 | } |
|
772 | 774 | |
|
773 | 775 | def getlinkrev(node): |
|
774 | 776 | if node in linkrevs: |
|
775 | 777 | return linkrevs[node] |
|
776 | 778 | else: |
|
777 | 779 | return clrev(node) |
|
778 | 780 | |
|
779 | 781 | store.addgroup( |
|
780 | 782 | iterrevisions(objs, header[b'totalitems'], progress), |
|
781 | 783 | getlinkrev, |
|
782 | 784 | weakref.proxy(tr), |
|
783 | 785 | maybemissingparents=shallow, |
|
784 | 786 | ) |
@@ -1,943 +1,936 b'' | |||
|
1 | 1 | """ Mercurial phases support code |
|
2 | 2 | |
|
3 | 3 | --- |
|
4 | 4 | |
|
5 | 5 | Copyright 2011 Pierre-Yves David <pierre-yves.david@ens-lyon.org> |
|
6 | 6 | Logilab SA <contact@logilab.fr> |
|
7 | 7 | Augie Fackler <durin42@gmail.com> |
|
8 | 8 | |
|
9 | 9 | This software may be used and distributed according to the terms |
|
10 | 10 | of the GNU General Public License version 2 or any later version. |
|
11 | 11 | |
|
12 | 12 | --- |
|
13 | 13 | |
|
14 | 14 | This module implements most phase logic in mercurial. |
|
15 | 15 | |
|
16 | 16 | |
|
17 | 17 | Basic Concept |
|
18 | 18 | ============= |
|
19 | 19 | |
|
20 | 20 | A 'changeset phase' is an indicator that tells us how a changeset is |
|
21 | 21 | manipulated and communicated. The details of each phase is described |
|
22 | 22 | below, here we describe the properties they have in common. |
|
23 | 23 | |
|
24 | 24 | Like bookmarks, phases are not stored in history and thus are not |
|
25 | 25 | permanent and leave no audit trail. |
|
26 | 26 | |
|
27 | 27 | First, no changeset can be in two phases at once. Phases are ordered, |
|
28 | 28 | so they can be considered from lowest to highest. The default, lowest |
|
29 | 29 | phase is 'public' - this is the normal phase of existing changesets. A |
|
30 | 30 | child changeset can not be in a lower phase than its parents. |
|
31 | 31 | |
|
32 | 32 | These phases share a hierarchy of traits: |
|
33 | 33 | |
|
34 | 34 | immutable shared |
|
35 | 35 | public: X X |
|
36 | 36 | draft: X |
|
37 | 37 | secret: |
|
38 | 38 | |
|
39 | 39 | Local commits are draft by default. |
|
40 | 40 | |
|
41 | 41 | Phase Movement and Exchange |
|
42 | 42 | =========================== |
|
43 | 43 | |
|
44 | 44 | Phase data is exchanged by pushkey on pull and push. Some servers have |
|
45 | 45 | a publish option set, we call such a server a "publishing server". |
|
46 | 46 | Pushing a draft changeset to a publishing server changes the phase to |
|
47 | 47 | public. |
|
48 | 48 | |
|
49 | 49 | A small list of fact/rules define the exchange of phase: |
|
50 | 50 | |
|
51 | 51 | * old client never changes server states |
|
52 | 52 | * pull never changes server states |
|
53 | 53 | * publish and old server changesets are seen as public by client |
|
54 | 54 | * any secret changeset seen in another repository is lowered to at |
|
55 | 55 | least draft |
|
56 | 56 | |
|
57 | 57 | Here is the final table summing up the 49 possible use cases of phase |
|
58 | 58 | exchange: |
|
59 | 59 | |
|
60 | 60 | server |
|
61 | 61 | old publish non-publish |
|
62 | 62 | N X N D P N D P |
|
63 | 63 | old client |
|
64 | 64 | pull |
|
65 | 65 | N - X/X - X/D X/P - X/D X/P |
|
66 | 66 | X - X/X - X/D X/P - X/D X/P |
|
67 | 67 | push |
|
68 | 68 | X X/X X/X X/P X/P X/P X/D X/D X/P |
|
69 | 69 | new client |
|
70 | 70 | pull |
|
71 | 71 | N - P/X - P/D P/P - D/D P/P |
|
72 | 72 | D - P/X - P/D P/P - D/D P/P |
|
73 | 73 | P - P/X - P/D P/P - P/D P/P |
|
74 | 74 | push |
|
75 | 75 | D P/X P/X P/P P/P P/P D/D D/D P/P |
|
76 | 76 | P P/X P/X P/P P/P P/P P/P P/P P/P |
|
77 | 77 | |
|
78 | 78 | Legend: |
|
79 | 79 | |
|
80 | 80 | A/B = final state on client / state on server |
|
81 | 81 | |
|
82 | 82 | * N = new/not present, |
|
83 | 83 | * P = public, |
|
84 | 84 | * D = draft, |
|
85 | 85 | * X = not tracked (i.e., the old client or server has no internal |
|
86 | 86 | way of recording the phase.) |
|
87 | 87 | |
|
88 | 88 | passive = only pushes |
|
89 | 89 | |
|
90 | 90 | |
|
91 | 91 | A cell here can be read like this: |
|
92 | 92 | |
|
93 | 93 | "When a new client pushes a draft changeset (D) to a publishing |
|
94 | 94 | server where it's not present (N), it's marked public on both |
|
95 | 95 | sides (P/P)." |
|
96 | 96 | |
|
97 | 97 | Note: old client behave as a publishing server with draft only content |
|
98 | 98 | - other people see it as public |
|
99 | 99 | - content is pushed as draft |
|
100 | 100 | |
|
101 | 101 | """ |
|
102 | 102 | |
|
103 | 103 | from __future__ import absolute_import |
|
104 | 104 | |
|
105 | 105 | import errno |
|
106 | 106 | import struct |
|
107 | 107 | |
|
108 | 108 | from .i18n import _ |
|
109 | 109 | from .node import ( |
|
110 | 110 | bin, |
|
111 | 111 | hex, |
|
112 | 112 | nullid, |
|
113 | 113 | nullrev, |
|
114 | 114 | short, |
|
115 | 115 | wdirrev, |
|
116 | 116 | ) |
|
117 | 117 | from .pycompat import ( |
|
118 | 118 | getattr, |
|
119 | 119 | setattr, |
|
120 | 120 | ) |
|
121 | 121 | from . import ( |
|
122 | 122 | error, |
|
123 | 123 | pycompat, |
|
124 | 124 | requirements, |
|
125 | 125 | smartset, |
|
126 | 126 | txnutil, |
|
127 | 127 | util, |
|
128 | 128 | ) |
|
129 | 129 | |
|
130 | 130 | _fphasesentry = struct.Struct(b'>i20s') |
|
131 | 131 | |
|
132 | 132 | # record phase index |
|
133 | 133 | public, draft, secret = range(3) |
|
134 | 134 | archived = 32 # non-continuous for compatibility |
|
135 | 135 | internal = 96 # non-continuous for compatibility |
|
136 | 136 | allphases = (public, draft, secret, archived, internal) |
|
137 | 137 | trackedphases = (draft, secret, archived, internal) |
|
138 | 138 | # record phase names |
|
139 | 139 | cmdphasenames = [b'public', b'draft', b'secret'] # known to `hg phase` command |
|
140 | 140 | phasenames = dict(enumerate(cmdphasenames)) |
|
141 | 141 | phasenames[archived] = b'archived' |
|
142 | 142 | phasenames[internal] = b'internal' |
|
143 | 143 | # map phase name to phase number |
|
144 | 144 | phasenumber = {name: phase for phase, name in phasenames.items()} |
|
145 | 145 | # like phasenumber, but also include maps for the numeric and binary |
|
146 | 146 | # phase number to the phase number |
|
147 | 147 | phasenumber2 = phasenumber.copy() |
|
148 | 148 | phasenumber2.update({phase: phase for phase in phasenames}) |
|
149 | 149 | phasenumber2.update({b'%i' % phase: phase for phase in phasenames}) |
|
150 | 150 | # record phase property |
|
151 | 151 | mutablephases = (draft, secret, archived, internal) |
|
152 | 152 | remotehiddenphases = (secret, archived, internal) |
|
153 | 153 | localhiddenphases = (internal, archived) |
|
154 | 154 | |
|
155 | 155 | |
|
156 | 156 | def supportinternal(repo): |
|
157 | 157 | """True if the internal phase can be used on a repository""" |
|
158 | 158 | return requirements.INTERNAL_PHASE_REQUIREMENT in repo.requirements |
|
159 | 159 | |
|
160 | 160 | |
|
161 | 161 | def _readroots(repo, phasedefaults=None): |
|
162 | 162 | """Read phase roots from disk |
|
163 | 163 | |
|
164 | 164 | phasedefaults is a list of fn(repo, roots) callable, which are |
|
165 | 165 | executed if the phase roots file does not exist. When phases are |
|
166 | 166 | being initialized on an existing repository, this could be used to |
|
167 | 167 | set selected changesets phase to something else than public. |
|
168 | 168 | |
|
169 | 169 | Return (roots, dirty) where dirty is true if roots differ from |
|
170 | 170 | what is being stored. |
|
171 | 171 | """ |
|
172 | 172 | repo = repo.unfiltered() |
|
173 | 173 | dirty = False |
|
174 | 174 | roots = {i: set() for i in allphases} |
|
175 | 175 | try: |
|
176 | 176 | f, pending = txnutil.trypending(repo.root, repo.svfs, b'phaseroots') |
|
177 | 177 | try: |
|
178 | 178 | for line in f: |
|
179 | 179 | phase, nh = line.split() |
|
180 | 180 | roots[int(phase)].add(bin(nh)) |
|
181 | 181 | finally: |
|
182 | 182 | f.close() |
|
183 | 183 | except IOError as inst: |
|
184 | 184 | if inst.errno != errno.ENOENT: |
|
185 | 185 | raise |
|
186 | 186 | if phasedefaults: |
|
187 | 187 | for f in phasedefaults: |
|
188 | 188 | roots = f(repo, roots) |
|
189 | 189 | dirty = True |
|
190 | 190 | return roots, dirty |
|
191 | 191 | |
|
192 | 192 | |
|
193 | 193 | def binaryencode(phasemapping): |
|
194 | 194 | """encode a 'phase -> nodes' mapping into a binary stream |
|
195 | 195 | |
|
196 | 196 | The revision lists are encoded as (phase, root) pairs. |
|
197 | 197 | """ |
|
198 | 198 | binarydata = [] |
|
199 | 199 | for phase, nodes in pycompat.iteritems(phasemapping): |
|
200 | 200 | for head in nodes: |
|
201 | 201 | binarydata.append(_fphasesentry.pack(phase, head)) |
|
202 | 202 | return b''.join(binarydata) |
|
203 | 203 | |
|
204 | 204 | |
|
205 | 205 | def binarydecode(stream): |
|
206 | 206 | """decode a binary stream into a 'phase -> nodes' mapping |
|
207 | 207 | |
|
208 | 208 | The (phase, root) pairs are turned back into a dictionary with |
|
209 | 209 | the phase as index and the aggregated roots of that phase as value.""" |
|
210 | 210 | headsbyphase = {i: [] for i in allphases} |
|
211 | 211 | entrysize = _fphasesentry.size |
|
212 | 212 | while True: |
|
213 | 213 | entry = stream.read(entrysize) |
|
214 | 214 | if len(entry) < entrysize: |
|
215 | 215 | if entry: |
|
216 | 216 | raise error.Abort(_(b'bad phase-heads stream')) |
|
217 | 217 | break |
|
218 | 218 | phase, node = _fphasesentry.unpack(entry) |
|
219 | 219 | headsbyphase[phase].append(node) |
|
220 | 220 | return headsbyphase |
|
221 | 221 | |
|
222 | 222 | |
|
223 | 223 | def _sortedrange_insert(data, idx, rev, t): |
|
224 | 224 | merge_before = False |
|
225 | 225 | if idx: |
|
226 | 226 | r1, t1 = data[idx - 1] |
|
227 | 227 | merge_before = r1[-1] + 1 == rev and t1 == t |
|
228 | 228 | merge_after = False |
|
229 | 229 | if idx < len(data): |
|
230 | 230 | r2, t2 = data[idx] |
|
231 | 231 | merge_after = r2[0] == rev + 1 and t2 == t |
|
232 | 232 | |
|
233 | 233 | if merge_before and merge_after: |
|
234 | 234 | data[idx - 1] = (pycompat.xrange(r1[0], r2[-1] + 1), t) |
|
235 | 235 | data.pop(idx) |
|
236 | 236 | elif merge_before: |
|
237 | 237 | data[idx - 1] = (pycompat.xrange(r1[0], rev + 1), t) |
|
238 | 238 | elif merge_after: |
|
239 | 239 | data[idx] = (pycompat.xrange(rev, r2[-1] + 1), t) |
|
240 | 240 | else: |
|
241 | 241 | data.insert(idx, (pycompat.xrange(rev, rev + 1), t)) |
|
242 | 242 | |
|
243 | 243 | |
|
244 | 244 | def _sortedrange_split(data, idx, rev, t): |
|
245 | 245 | r1, t1 = data[idx] |
|
246 | 246 | if t == t1: |
|
247 | 247 | return |
|
248 | 248 | t = (t1[0], t[1]) |
|
249 | 249 | if len(r1) == 1: |
|
250 | 250 | data.pop(idx) |
|
251 | 251 | _sortedrange_insert(data, idx, rev, t) |
|
252 | 252 | elif r1[0] == rev: |
|
253 | 253 | data[idx] = (pycompat.xrange(rev + 1, r1[-1] + 1), t1) |
|
254 | 254 | _sortedrange_insert(data, idx, rev, t) |
|
255 | 255 | elif r1[-1] == rev: |
|
256 | 256 | data[idx] = (pycompat.xrange(r1[0], rev), t1) |
|
257 | 257 | _sortedrange_insert(data, idx + 1, rev, t) |
|
258 | 258 | else: |
|
259 | 259 | data[idx : idx + 1] = [ |
|
260 | 260 | (pycompat.xrange(r1[0], rev), t1), |
|
261 | 261 | (pycompat.xrange(rev, rev + 1), t), |
|
262 | 262 | (pycompat.xrange(rev + 1, r1[-1] + 1), t1), |
|
263 | 263 | ] |
|
264 | 264 | |
|
265 | 265 | |
|
266 | 266 | def _trackphasechange(data, rev, old, new): |
|
267 | 267 | """add a phase move to the <data> list of ranges |
|
268 | 268 | |
|
269 | 269 | If data is None, nothing happens. |
|
270 | 270 | """ |
|
271 | 271 | if data is None: |
|
272 | 272 | return |
|
273 | 273 | |
|
274 | 274 | # If data is empty, create a one-revision range and done |
|
275 | 275 | if not data: |
|
276 | 276 | data.insert(0, (pycompat.xrange(rev, rev + 1), (old, new))) |
|
277 | 277 | return |
|
278 | 278 | |
|
279 | 279 | low = 0 |
|
280 | 280 | high = len(data) |
|
281 | 281 | t = (old, new) |
|
282 | 282 | while low < high: |
|
283 | 283 | mid = (low + high) // 2 |
|
284 | 284 | revs = data[mid][0] |
|
285 | 285 | revs_low = revs[0] |
|
286 | 286 | revs_high = revs[-1] |
|
287 | 287 | |
|
288 | 288 | if rev >= revs_low and rev <= revs_high: |
|
289 | 289 | _sortedrange_split(data, mid, rev, t) |
|
290 | 290 | return |
|
291 | 291 | |
|
292 | 292 | if revs_low == rev + 1: |
|
293 | 293 | if mid and data[mid - 1][0][-1] == rev: |
|
294 | 294 | _sortedrange_split(data, mid - 1, rev, t) |
|
295 | 295 | else: |
|
296 | 296 | _sortedrange_insert(data, mid, rev, t) |
|
297 | 297 | return |
|
298 | 298 | |
|
299 | 299 | if revs_high == rev - 1: |
|
300 | 300 | if mid + 1 < len(data) and data[mid + 1][0][0] == rev: |
|
301 | 301 | _sortedrange_split(data, mid + 1, rev, t) |
|
302 | 302 | else: |
|
303 | 303 | _sortedrange_insert(data, mid + 1, rev, t) |
|
304 | 304 | return |
|
305 | 305 | |
|
306 | 306 | if revs_low > rev: |
|
307 | 307 | high = mid |
|
308 | 308 | else: |
|
309 | 309 | low = mid + 1 |
|
310 | 310 | |
|
311 | 311 | if low == len(data): |
|
312 | 312 | data.append((pycompat.xrange(rev, rev + 1), t)) |
|
313 | 313 | return |
|
314 | 314 | |
|
315 | 315 | r1, t1 = data[low] |
|
316 | 316 | if r1[0] > rev: |
|
317 | 317 | data.insert(low, (pycompat.xrange(rev, rev + 1), t)) |
|
318 | 318 | else: |
|
319 | 319 | data.insert(low + 1, (pycompat.xrange(rev, rev + 1), t)) |
|
320 | 320 | |
|
321 | 321 | |
|
322 | 322 | class phasecache(object): |
|
323 | 323 | def __init__(self, repo, phasedefaults, _load=True): |
|
324 | 324 | if _load: |
|
325 | 325 | # Cheap trick to allow shallow-copy without copy module |
|
326 | 326 | self.phaseroots, self.dirty = _readroots(repo, phasedefaults) |
|
327 | 327 | self._loadedrevslen = 0 |
|
328 | 328 | self._phasesets = None |
|
329 | 329 | self.filterunknown(repo) |
|
330 | 330 | self.opener = repo.svfs |
|
331 | 331 | |
|
332 | 332 | def hasnonpublicphases(self, repo): |
|
333 | 333 | """detect if there are revisions with non-public phase""" |
|
334 | 334 | repo = repo.unfiltered() |
|
335 | 335 | cl = repo.changelog |
|
336 | 336 | if len(cl) >= self._loadedrevslen: |
|
337 | 337 | self.invalidate() |
|
338 | 338 | self.loadphaserevs(repo) |
|
339 | 339 | return any( |
|
340 | 340 | revs |
|
341 | 341 | for phase, revs in pycompat.iteritems(self.phaseroots) |
|
342 | 342 | if phase != public |
|
343 | 343 | ) |
|
344 | 344 | |
|
345 | 345 | def nonpublicphaseroots(self, repo): |
|
346 | 346 | """returns the roots of all non-public phases |
|
347 | 347 | |
|
348 | 348 | The roots are not minimized, so if the secret revisions are |
|
349 | 349 | descendants of draft revisions, their roots will still be present. |
|
350 | 350 | """ |
|
351 | 351 | repo = repo.unfiltered() |
|
352 | 352 | cl = repo.changelog |
|
353 | 353 | if len(cl) >= self._loadedrevslen: |
|
354 | 354 | self.invalidate() |
|
355 | 355 | self.loadphaserevs(repo) |
|
356 | 356 | return set().union( |
|
357 | 357 | *[ |
|
358 | 358 | revs |
|
359 | 359 | for phase, revs in pycompat.iteritems(self.phaseroots) |
|
360 | 360 | if phase != public |
|
361 | 361 | ] |
|
362 | 362 | ) |
|
363 | 363 | |
|
364 | 364 | def getrevset(self, repo, phases, subset=None): |
|
365 | 365 | """return a smartset for the given phases""" |
|
366 | 366 | self.loadphaserevs(repo) # ensure phase's sets are loaded |
|
367 | 367 | phases = set(phases) |
|
368 | 368 | publicphase = public in phases |
|
369 | 369 | |
|
370 | 370 | if publicphase: |
|
371 | 371 | # In this case, phases keeps all the *other* phases. |
|
372 | 372 | phases = set(allphases).difference(phases) |
|
373 | 373 | if not phases: |
|
374 | 374 | return smartset.fullreposet(repo) |
|
375 | 375 | |
|
376 | 376 | # fast path: _phasesets contains the interesting sets, |
|
377 | 377 | # might only need a union and post-filtering. |
|
378 | 378 | revsneedscopy = False |
|
379 | 379 | if len(phases) == 1: |
|
380 | 380 | [p] = phases |
|
381 | 381 | revs = self._phasesets[p] |
|
382 | 382 | revsneedscopy = True # Don't modify _phasesets |
|
383 | 383 | else: |
|
384 | 384 | # revs has the revisions in all *other* phases. |
|
385 | 385 | revs = set.union(*[self._phasesets[p] for p in phases]) |
|
386 | 386 | |
|
387 | 387 | def _addwdir(wdirsubset, wdirrevs): |
|
388 | 388 | if wdirrev in wdirsubset and repo[None].phase() in phases: |
|
389 | 389 | if revsneedscopy: |
|
390 | 390 | wdirrevs = wdirrevs.copy() |
|
391 | 391 | # The working dir would never be in the # cache, but it was in |
|
392 | 392 | # the subset being filtered for its phase (or filtered out, |
|
393 | 393 | # depending on publicphase), so add it to the output to be |
|
394 | 394 | # included (or filtered out). |
|
395 | 395 | wdirrevs.add(wdirrev) |
|
396 | 396 | return wdirrevs |
|
397 | 397 | |
|
398 | 398 | if not publicphase: |
|
399 | 399 | if repo.changelog.filteredrevs: |
|
400 | 400 | revs = revs - repo.changelog.filteredrevs |
|
401 | 401 | |
|
402 | 402 | if subset is None: |
|
403 | 403 | return smartset.baseset(revs) |
|
404 | 404 | else: |
|
405 | 405 | revs = _addwdir(subset, revs) |
|
406 | 406 | return subset & smartset.baseset(revs) |
|
407 | 407 | else: |
|
408 | 408 | if subset is None: |
|
409 | 409 | subset = smartset.fullreposet(repo) |
|
410 | 410 | |
|
411 | 411 | revs = _addwdir(subset, revs) |
|
412 | 412 | |
|
413 | 413 | if not revs: |
|
414 | 414 | return subset |
|
415 | 415 | return subset.filter(lambda r: r not in revs) |
|
416 | 416 | |
|
417 | 417 | def copy(self): |
|
418 | 418 | # Shallow copy meant to ensure isolation in |
|
419 | 419 | # advance/retractboundary(), nothing more. |
|
420 | 420 | ph = self.__class__(None, None, _load=False) |
|
421 | 421 | ph.phaseroots = self.phaseroots.copy() |
|
422 | 422 | ph.dirty = self.dirty |
|
423 | 423 | ph.opener = self.opener |
|
424 | 424 | ph._loadedrevslen = self._loadedrevslen |
|
425 | 425 | ph._phasesets = self._phasesets |
|
426 | 426 | return ph |
|
427 | 427 | |
|
428 | 428 | def replace(self, phcache): |
|
429 | 429 | """replace all values in 'self' with content of phcache""" |
|
430 | 430 | for a in ( |
|
431 | 431 | b'phaseroots', |
|
432 | 432 | b'dirty', |
|
433 | 433 | b'opener', |
|
434 | 434 | b'_loadedrevslen', |
|
435 | 435 | b'_phasesets', |
|
436 | 436 | ): |
|
437 | 437 | setattr(self, a, getattr(phcache, a)) |
|
438 | 438 | |
|
439 | 439 | def _getphaserevsnative(self, repo): |
|
440 | 440 | repo = repo.unfiltered() |
|
441 | 441 | return repo.changelog.computephases(self.phaseroots) |
|
442 | 442 | |
|
443 | 443 | def _computephaserevspure(self, repo): |
|
444 | 444 | repo = repo.unfiltered() |
|
445 | 445 | cl = repo.changelog |
|
446 | 446 | self._phasesets = {phase: set() for phase in allphases} |
|
447 | 447 | lowerroots = set() |
|
448 | 448 | for phase in reversed(trackedphases): |
|
449 | 449 | roots = pycompat.maplist(cl.rev, self.phaseroots[phase]) |
|
450 | 450 | if roots: |
|
451 | 451 | ps = set(cl.descendants(roots)) |
|
452 | 452 | for root in roots: |
|
453 | 453 | ps.add(root) |
|
454 | 454 | ps.difference_update(lowerroots) |
|
455 | 455 | lowerroots.update(ps) |
|
456 | 456 | self._phasesets[phase] = ps |
|
457 | 457 | self._loadedrevslen = len(cl) |
|
458 | 458 | |
|
459 | 459 | def loadphaserevs(self, repo): |
|
460 | 460 | """ensure phase information is loaded in the object""" |
|
461 | 461 | if self._phasesets is None: |
|
462 | 462 | try: |
|
463 | 463 | res = self._getphaserevsnative(repo) |
|
464 | 464 | self._loadedrevslen, self._phasesets = res |
|
465 | 465 | except AttributeError: |
|
466 | 466 | self._computephaserevspure(repo) |
|
467 | 467 | |
|
468 | 468 | def invalidate(self): |
|
469 | 469 | self._loadedrevslen = 0 |
|
470 | 470 | self._phasesets = None |
|
471 | 471 | |
|
472 | 472 | def phase(self, repo, rev): |
|
473 | 473 | # We need a repo argument here to be able to build _phasesets |
|
474 | 474 | # if necessary. The repository instance is not stored in |
|
475 | 475 | # phasecache to avoid reference cycles. The changelog instance |
|
476 | 476 | # is not stored because it is a filecache() property and can |
|
477 | 477 | # be replaced without us being notified. |
|
478 | 478 | if rev == nullrev: |
|
479 | 479 | return public |
|
480 | 480 | if rev < nullrev: |
|
481 | 481 | raise ValueError(_(b'cannot lookup negative revision')) |
|
482 | 482 | if rev >= self._loadedrevslen: |
|
483 | 483 | self.invalidate() |
|
484 | 484 | self.loadphaserevs(repo) |
|
485 | 485 | for phase in trackedphases: |
|
486 | 486 | if rev in self._phasesets[phase]: |
|
487 | 487 | return phase |
|
488 | 488 | return public |
|
489 | 489 | |
|
490 | 490 | def write(self): |
|
491 | 491 | if not self.dirty: |
|
492 | 492 | return |
|
493 | 493 | f = self.opener(b'phaseroots', b'w', atomictemp=True, checkambig=True) |
|
494 | 494 | try: |
|
495 | 495 | self._write(f) |
|
496 | 496 | finally: |
|
497 | 497 | f.close() |
|
498 | 498 | |
|
499 | 499 | def _write(self, fp): |
|
500 | 500 | for phase, roots in pycompat.iteritems(self.phaseroots): |
|
501 | 501 | for h in sorted(roots): |
|
502 | 502 | fp.write(b'%i %s\n' % (phase, hex(h))) |
|
503 | 503 | self.dirty = False |
|
504 | 504 | |
|
505 | 505 | def _updateroots(self, phase, newroots, tr): |
|
506 | 506 | self.phaseroots[phase] = newroots |
|
507 | 507 | self.invalidate() |
|
508 | 508 | self.dirty = True |
|
509 | 509 | |
|
510 | 510 | tr.addfilegenerator(b'phase', (b'phaseroots',), self._write) |
|
511 | 511 | tr.hookargs[b'phases_moved'] = b'1' |
|
512 | 512 | |
|
513 |
def registernew(self, repo, tr, targetphase, |
|
|
514 | if revs is None: | |
|
515 | revs = [] | |
|
513 | def registernew(self, repo, tr, targetphase, revs): | |
|
516 | 514 | repo = repo.unfiltered() |
|
517 |
self._retractboundary(repo, tr, targetphase, |
|
|
515 | self._retractboundary(repo, tr, targetphase, [], revs=revs) | |
|
518 | 516 | if tr is not None and b'phases' in tr.changes: |
|
519 | 517 | phasetracking = tr.changes[b'phases'] |
|
520 | torev = repo.changelog.rev | |
|
521 | 518 | phase = self.phase |
|
522 |
|
|
|
523 | revs.sort() | |
|
524 | for rev in revs: | |
|
519 | for rev in sorted(revs): | |
|
525 | 520 | revphase = phase(repo, rev) |
|
526 | 521 | _trackphasechange(phasetracking, rev, None, revphase) |
|
527 | 522 | repo.invalidatevolatilesets() |
|
528 | 523 | |
|
529 | 524 | def advanceboundary( |
|
530 | 525 | self, repo, tr, targetphase, nodes, revs=None, dryrun=None |
|
531 | 526 | ): |
|
532 | 527 | """Set all 'nodes' to phase 'targetphase' |
|
533 | 528 | |
|
534 | 529 | Nodes with a phase lower than 'targetphase' are not affected. |
|
535 | 530 | |
|
536 | 531 | If dryrun is True, no actions will be performed |
|
537 | 532 | |
|
538 | 533 | Returns a set of revs whose phase is changed or should be changed |
|
539 | 534 | """ |
|
540 | 535 | # Be careful to preserve shallow-copied values: do not update |
|
541 | 536 | # phaseroots values, replace them. |
|
542 | 537 | if revs is None: |
|
543 | 538 | revs = [] |
|
544 | 539 | if tr is None: |
|
545 | 540 | phasetracking = None |
|
546 | 541 | else: |
|
547 | 542 | phasetracking = tr.changes.get(b'phases') |
|
548 | 543 | |
|
549 | 544 | repo = repo.unfiltered() |
|
550 | 545 | revs = [repo[n].rev() for n in nodes] + [r for r in revs] |
|
551 | 546 | |
|
552 | 547 | changes = set() # set of revisions to be changed |
|
553 | 548 | delroots = [] # set of root deleted by this path |
|
554 | 549 | for phase in (phase for phase in allphases if phase > targetphase): |
|
555 | 550 | # filter nodes that are not in a compatible phase already |
|
556 | 551 | revs = [rev for rev in revs if self.phase(repo, rev) >= phase] |
|
557 | 552 | if not revs: |
|
558 | 553 | break # no roots to move anymore |
|
559 | 554 | |
|
560 | 555 | olds = self.phaseroots[phase] |
|
561 | 556 | |
|
562 | 557 | affected = repo.revs(b'%ln::%ld', olds, revs) |
|
563 | 558 | changes.update(affected) |
|
564 | 559 | if dryrun: |
|
565 | 560 | continue |
|
566 | 561 | for r in affected: |
|
567 | 562 | _trackphasechange( |
|
568 | 563 | phasetracking, r, self.phase(repo, r), targetphase |
|
569 | 564 | ) |
|
570 | 565 | |
|
571 | 566 | roots = { |
|
572 | 567 | ctx.node() |
|
573 | 568 | for ctx in repo.set(b'roots((%ln::) - %ld)', olds, affected) |
|
574 | 569 | } |
|
575 | 570 | if olds != roots: |
|
576 | 571 | self._updateroots(phase, roots, tr) |
|
577 | 572 | # some roots may need to be declared for lower phases |
|
578 | 573 | delroots.extend(olds - roots) |
|
579 | 574 | if not dryrun: |
|
580 | 575 | # declare deleted root in the target phase |
|
581 | 576 | if targetphase != 0: |
|
582 | 577 | self._retractboundary(repo, tr, targetphase, delroots) |
|
583 | 578 | repo.invalidatevolatilesets() |
|
584 | 579 | return changes |
|
585 | 580 | |
|
586 | 581 | def retractboundary(self, repo, tr, targetphase, nodes): |
|
587 | 582 | oldroots = { |
|
588 | 583 | phase: revs |
|
589 | 584 | for phase, revs in pycompat.iteritems(self.phaseroots) |
|
590 | 585 | if phase <= targetphase |
|
591 | 586 | } |
|
592 | 587 | if tr is None: |
|
593 | 588 | phasetracking = None |
|
594 | 589 | else: |
|
595 | 590 | phasetracking = tr.changes.get(b'phases') |
|
596 | 591 | repo = repo.unfiltered() |
|
597 | 592 | if ( |
|
598 | 593 | self._retractboundary(repo, tr, targetphase, nodes) |
|
599 | 594 | and phasetracking is not None |
|
600 | 595 | ): |
|
601 | 596 | |
|
602 | 597 | # find the affected revisions |
|
603 | 598 | new = self.phaseroots[targetphase] |
|
604 | 599 | old = oldroots[targetphase] |
|
605 | 600 | affected = set(repo.revs(b'(%ln::) - (%ln::)', new, old)) |
|
606 | 601 | |
|
607 | 602 | # find the phase of the affected revision |
|
608 | 603 | for phase in pycompat.xrange(targetphase, -1, -1): |
|
609 | 604 | if phase: |
|
610 | 605 | roots = oldroots.get(phase, []) |
|
611 | 606 | revs = set(repo.revs(b'%ln::%ld', roots, affected)) |
|
612 | 607 | affected -= revs |
|
613 | 608 | else: # public phase |
|
614 | 609 | revs = affected |
|
615 | 610 | for r in sorted(revs): |
|
616 | 611 | _trackphasechange(phasetracking, r, phase, targetphase) |
|
617 | 612 | repo.invalidatevolatilesets() |
|
618 | 613 | |
|
619 | 614 | def _retractboundary(self, repo, tr, targetphase, nodes, revs=None): |
|
620 | 615 | # Be careful to preserve shallow-copied values: do not update |
|
621 | 616 | # phaseroots values, replace them. |
|
622 | 617 | if revs is None: |
|
623 | 618 | revs = [] |
|
624 | 619 | if targetphase in (archived, internal) and not supportinternal(repo): |
|
625 | 620 | name = phasenames[targetphase] |
|
626 | 621 | msg = b'this repository does not support the %s phase' % name |
|
627 | 622 | raise error.ProgrammingError(msg) |
|
628 | 623 | |
|
629 | 624 | repo = repo.unfiltered() |
|
630 | 625 | torev = repo.changelog.rev |
|
631 | 626 | tonode = repo.changelog.node |
|
632 | 627 | currentroots = {torev(node) for node in self.phaseroots[targetphase]} |
|
633 | 628 | finalroots = oldroots = set(currentroots) |
|
634 | 629 | newroots = [torev(node) for node in nodes] + [r for r in revs] |
|
635 | 630 | newroots = [ |
|
636 | 631 | rev for rev in newroots if self.phase(repo, rev) < targetphase |
|
637 | 632 | ] |
|
638 | 633 | |
|
639 | 634 | if newroots: |
|
640 | 635 | if nullrev in newroots: |
|
641 | 636 | raise error.Abort(_(b'cannot change null revision phase')) |
|
642 | 637 | currentroots.update(newroots) |
|
643 | 638 | |
|
644 | 639 | # Only compute new roots for revs above the roots that are being |
|
645 | 640 | # retracted. |
|
646 | 641 | minnewroot = min(newroots) |
|
647 | 642 | aboveroots = [rev for rev in currentroots if rev >= minnewroot] |
|
648 | 643 | updatedroots = repo.revs(b'roots(%ld::)', aboveroots) |
|
649 | 644 | |
|
650 | 645 | finalroots = {rev for rev in currentroots if rev < minnewroot} |
|
651 | 646 | finalroots.update(updatedroots) |
|
652 | 647 | if finalroots != oldroots: |
|
653 | 648 | self._updateroots( |
|
654 | 649 | targetphase, {tonode(rev) for rev in finalroots}, tr |
|
655 | 650 | ) |
|
656 | 651 | return True |
|
657 | 652 | return False |
|
658 | 653 | |
|
659 | 654 | def filterunknown(self, repo): |
|
660 | 655 | """remove unknown nodes from the phase boundary |
|
661 | 656 | |
|
662 | 657 | Nothing is lost as unknown nodes only hold data for their descendants. |
|
663 | 658 | """ |
|
664 | 659 | filtered = False |
|
665 | 660 | has_node = repo.changelog.index.has_node # to filter unknown nodes |
|
666 | 661 | for phase, nodes in pycompat.iteritems(self.phaseroots): |
|
667 | 662 | missing = sorted(node for node in nodes if not has_node(node)) |
|
668 | 663 | if missing: |
|
669 | 664 | for mnode in missing: |
|
670 | 665 | repo.ui.debug( |
|
671 | 666 | b'removing unknown node %s from %i-phase boundary\n' |
|
672 | 667 | % (short(mnode), phase) |
|
673 | 668 | ) |
|
674 | 669 | nodes.symmetric_difference_update(missing) |
|
675 | 670 | filtered = True |
|
676 | 671 | if filtered: |
|
677 | 672 | self.dirty = True |
|
678 | 673 | # filterunknown is called by repo.destroyed, we may have no changes in |
|
679 | 674 | # root but _phasesets contents is certainly invalid (or at least we |
|
680 | 675 | # have not proper way to check that). related to issue 3858. |
|
681 | 676 | # |
|
682 | 677 | # The other caller is __init__ that have no _phasesets initialized |
|
683 | 678 | # anyway. If this change we should consider adding a dedicated |
|
684 | 679 | # "destroyed" function to phasecache or a proper cache key mechanism |
|
685 | 680 | # (see branchmap one) |
|
686 | 681 | self.invalidate() |
|
687 | 682 | |
|
688 | 683 | |
|
689 | 684 | def advanceboundary(repo, tr, targetphase, nodes, revs=None, dryrun=None): |
|
690 | 685 | """Add nodes to a phase changing other nodes phases if necessary. |
|
691 | 686 | |
|
692 | 687 | This function move boundary *forward* this means that all nodes |
|
693 | 688 | are set in the target phase or kept in a *lower* phase. |
|
694 | 689 | |
|
695 | 690 | Simplify boundary to contains phase roots only. |
|
696 | 691 | |
|
697 | 692 | If dryrun is True, no actions will be performed |
|
698 | 693 | |
|
699 | 694 | Returns a set of revs whose phase is changed or should be changed |
|
700 | 695 | """ |
|
701 | 696 | if revs is None: |
|
702 | 697 | revs = [] |
|
703 | 698 | phcache = repo._phasecache.copy() |
|
704 | 699 | changes = phcache.advanceboundary( |
|
705 | 700 | repo, tr, targetphase, nodes, revs=revs, dryrun=dryrun |
|
706 | 701 | ) |
|
707 | 702 | if not dryrun: |
|
708 | 703 | repo._phasecache.replace(phcache) |
|
709 | 704 | return changes |
|
710 | 705 | |
|
711 | 706 | |
|
712 | 707 | def retractboundary(repo, tr, targetphase, nodes): |
|
713 | 708 | """Set nodes back to a phase changing other nodes phases if |
|
714 | 709 | necessary. |
|
715 | 710 | |
|
716 | 711 | This function move boundary *backward* this means that all nodes |
|
717 | 712 | are set in the target phase or kept in a *higher* phase. |
|
718 | 713 | |
|
719 | 714 | Simplify boundary to contains phase roots only.""" |
|
720 | 715 | phcache = repo._phasecache.copy() |
|
721 | 716 | phcache.retractboundary(repo, tr, targetphase, nodes) |
|
722 | 717 | repo._phasecache.replace(phcache) |
|
723 | 718 | |
|
724 | 719 | |
|
725 |
def registernew(repo, tr, targetphase, |
|
|
720 | def registernew(repo, tr, targetphase, revs): | |
|
726 | 721 | """register a new revision and its phase |
|
727 | 722 | |
|
728 | 723 | Code adding revisions to the repository should use this function to |
|
729 | 724 | set new changeset in their target phase (or higher). |
|
730 | 725 | """ |
|
731 | if revs is None: | |
|
732 | revs = [] | |
|
733 | 726 | phcache = repo._phasecache.copy() |
|
734 |
phcache.registernew(repo, tr, targetphase, |
|
|
727 | phcache.registernew(repo, tr, targetphase, revs) | |
|
735 | 728 | repo._phasecache.replace(phcache) |
|
736 | 729 | |
|
737 | 730 | |
|
738 | 731 | def listphases(repo): |
|
739 | 732 | """List phases root for serialization over pushkey""" |
|
740 | 733 | # Use ordered dictionary so behavior is deterministic. |
|
741 | 734 | keys = util.sortdict() |
|
742 | 735 | value = b'%i' % draft |
|
743 | 736 | cl = repo.unfiltered().changelog |
|
744 | 737 | for root in repo._phasecache.phaseroots[draft]: |
|
745 | 738 | if repo._phasecache.phase(repo, cl.rev(root)) <= draft: |
|
746 | 739 | keys[hex(root)] = value |
|
747 | 740 | |
|
748 | 741 | if repo.publishing(): |
|
749 | 742 | # Add an extra data to let remote know we are a publishing |
|
750 | 743 | # repo. Publishing repo can't just pretend they are old repo. |
|
751 | 744 | # When pushing to a publishing repo, the client still need to |
|
752 | 745 | # push phase boundary |
|
753 | 746 | # |
|
754 | 747 | # Push do not only push changeset. It also push phase data. |
|
755 | 748 | # New phase data may apply to common changeset which won't be |
|
756 | 749 | # push (as they are common). Here is a very simple example: |
|
757 | 750 | # |
|
758 | 751 | # 1) repo A push changeset X as draft to repo B |
|
759 | 752 | # 2) repo B make changeset X public |
|
760 | 753 | # 3) repo B push to repo A. X is not pushed but the data that |
|
761 | 754 | # X as now public should |
|
762 | 755 | # |
|
763 | 756 | # The server can't handle it on it's own as it has no idea of |
|
764 | 757 | # client phase data. |
|
765 | 758 | keys[b'publishing'] = b'True' |
|
766 | 759 | return keys |
|
767 | 760 | |
|
768 | 761 | |
|
769 | 762 | def pushphase(repo, nhex, oldphasestr, newphasestr): |
|
770 | 763 | """List phases root for serialization over pushkey""" |
|
771 | 764 | repo = repo.unfiltered() |
|
772 | 765 | with repo.lock(): |
|
773 | 766 | currentphase = repo[nhex].phase() |
|
774 | 767 | newphase = abs(int(newphasestr)) # let's avoid negative index surprise |
|
775 | 768 | oldphase = abs(int(oldphasestr)) # let's avoid negative index surprise |
|
776 | 769 | if currentphase == oldphase and newphase < oldphase: |
|
777 | 770 | with repo.transaction(b'pushkey-phase') as tr: |
|
778 | 771 | advanceboundary(repo, tr, newphase, [bin(nhex)]) |
|
779 | 772 | return True |
|
780 | 773 | elif currentphase == newphase: |
|
781 | 774 | # raced, but got correct result |
|
782 | 775 | return True |
|
783 | 776 | else: |
|
784 | 777 | return False |
|
785 | 778 | |
|
786 | 779 | |
|
787 | 780 | def subsetphaseheads(repo, subset): |
|
788 | 781 | """Finds the phase heads for a subset of a history |
|
789 | 782 | |
|
790 | 783 | Returns a list indexed by phase number where each item is a list of phase |
|
791 | 784 | head nodes. |
|
792 | 785 | """ |
|
793 | 786 | cl = repo.changelog |
|
794 | 787 | |
|
795 | 788 | headsbyphase = {i: [] for i in allphases} |
|
796 | 789 | # No need to keep track of secret phase; any heads in the subset that |
|
797 | 790 | # are not mentioned are implicitly secret. |
|
798 | 791 | for phase in allphases[:secret]: |
|
799 | 792 | revset = b"heads(%%ln & %s())" % phasenames[phase] |
|
800 | 793 | headsbyphase[phase] = [cl.node(r) for r in repo.revs(revset, subset)] |
|
801 | 794 | return headsbyphase |
|
802 | 795 | |
|
803 | 796 | |
|
804 | 797 | def updatephases(repo, trgetter, headsbyphase): |
|
805 | 798 | """Updates the repo with the given phase heads""" |
|
806 | 799 | # Now advance phase boundaries of all phases |
|
807 | 800 | # |
|
808 | 801 | # run the update (and fetch transaction) only if there are actually things |
|
809 | 802 | # to update. This avoid creating empty transaction during no-op operation. |
|
810 | 803 | |
|
811 | 804 | for phase in allphases: |
|
812 | 805 | revset = b'%ln - _phase(%s)' |
|
813 | 806 | heads = [c.node() for c in repo.set(revset, headsbyphase[phase], phase)] |
|
814 | 807 | if heads: |
|
815 | 808 | advanceboundary(repo, trgetter(), phase, heads) |
|
816 | 809 | |
|
817 | 810 | |
|
818 | 811 | def analyzeremotephases(repo, subset, roots): |
|
819 | 812 | """Compute phases heads and root in a subset of node from root dict |
|
820 | 813 | |
|
821 | 814 | * subset is heads of the subset |
|
822 | 815 | * roots is {<nodeid> => phase} mapping. key and value are string. |
|
823 | 816 | |
|
824 | 817 | Accept unknown element input |
|
825 | 818 | """ |
|
826 | 819 | repo = repo.unfiltered() |
|
827 | 820 | # build list from dictionary |
|
828 | 821 | draftroots = [] |
|
829 | 822 | has_node = repo.changelog.index.has_node # to filter unknown nodes |
|
830 | 823 | for nhex, phase in pycompat.iteritems(roots): |
|
831 | 824 | if nhex == b'publishing': # ignore data related to publish option |
|
832 | 825 | continue |
|
833 | 826 | node = bin(nhex) |
|
834 | 827 | phase = int(phase) |
|
835 | 828 | if phase == public: |
|
836 | 829 | if node != nullid: |
|
837 | 830 | repo.ui.warn( |
|
838 | 831 | _( |
|
839 | 832 | b'ignoring inconsistent public root' |
|
840 | 833 | b' from remote: %s\n' |
|
841 | 834 | ) |
|
842 | 835 | % nhex |
|
843 | 836 | ) |
|
844 | 837 | elif phase == draft: |
|
845 | 838 | if has_node(node): |
|
846 | 839 | draftroots.append(node) |
|
847 | 840 | else: |
|
848 | 841 | repo.ui.warn( |
|
849 | 842 | _(b'ignoring unexpected root from remote: %i %s\n') |
|
850 | 843 | % (phase, nhex) |
|
851 | 844 | ) |
|
852 | 845 | # compute heads |
|
853 | 846 | publicheads = newheads(repo, subset, draftroots) |
|
854 | 847 | return publicheads, draftroots |
|
855 | 848 | |
|
856 | 849 | |
|
857 | 850 | class remotephasessummary(object): |
|
858 | 851 | """summarize phase information on the remote side |
|
859 | 852 | |
|
860 | 853 | :publishing: True is the remote is publishing |
|
861 | 854 | :publicheads: list of remote public phase heads (nodes) |
|
862 | 855 | :draftheads: list of remote draft phase heads (nodes) |
|
863 | 856 | :draftroots: list of remote draft phase root (nodes) |
|
864 | 857 | """ |
|
865 | 858 | |
|
866 | 859 | def __init__(self, repo, remotesubset, remoteroots): |
|
867 | 860 | unfi = repo.unfiltered() |
|
868 | 861 | self._allremoteroots = remoteroots |
|
869 | 862 | |
|
870 | 863 | self.publishing = remoteroots.get(b'publishing', False) |
|
871 | 864 | |
|
872 | 865 | ana = analyzeremotephases(repo, remotesubset, remoteroots) |
|
873 | 866 | self.publicheads, self.draftroots = ana |
|
874 | 867 | # Get the list of all "heads" revs draft on remote |
|
875 | 868 | dheads = unfi.set(b'heads(%ln::%ln)', self.draftroots, remotesubset) |
|
876 | 869 | self.draftheads = [c.node() for c in dheads] |
|
877 | 870 | |
|
878 | 871 | |
|
879 | 872 | def newheads(repo, heads, roots): |
|
880 | 873 | """compute new head of a subset minus another |
|
881 | 874 | |
|
882 | 875 | * `heads`: define the first subset |
|
883 | 876 | * `roots`: define the second we subtract from the first""" |
|
884 | 877 | # prevent an import cycle |
|
885 | 878 | # phases > dagop > patch > copies > scmutil > obsolete > obsutil > phases |
|
886 | 879 | from . import dagop |
|
887 | 880 | |
|
888 | 881 | repo = repo.unfiltered() |
|
889 | 882 | cl = repo.changelog |
|
890 | 883 | rev = cl.index.get_rev |
|
891 | 884 | if not roots: |
|
892 | 885 | return heads |
|
893 | 886 | if not heads or heads == [nullid]: |
|
894 | 887 | return [] |
|
895 | 888 | # The logic operated on revisions, convert arguments early for convenience |
|
896 | 889 | new_heads = {rev(n) for n in heads if n != nullid} |
|
897 | 890 | roots = [rev(n) for n in roots] |
|
898 | 891 | # compute the area we need to remove |
|
899 | 892 | affected_zone = repo.revs(b"(%ld::%ld)", roots, new_heads) |
|
900 | 893 | # heads in the area are no longer heads |
|
901 | 894 | new_heads.difference_update(affected_zone) |
|
902 | 895 | # revisions in the area have children outside of it, |
|
903 | 896 | # They might be new heads |
|
904 | 897 | candidates = repo.revs( |
|
905 | 898 | b"parents(%ld + (%ld and merge())) and not null", roots, affected_zone |
|
906 | 899 | ) |
|
907 | 900 | candidates -= affected_zone |
|
908 | 901 | if new_heads or candidates: |
|
909 | 902 | # remove candidate that are ancestors of other heads |
|
910 | 903 | new_heads.update(candidates) |
|
911 | 904 | prunestart = repo.revs(b"parents(%ld) and not null", new_heads) |
|
912 | 905 | pruned = dagop.reachableroots(repo, candidates, prunestart) |
|
913 | 906 | new_heads.difference_update(pruned) |
|
914 | 907 | |
|
915 | 908 | return pycompat.maplist(cl.node, sorted(new_heads)) |
|
916 | 909 | |
|
917 | 910 | |
|
918 | 911 | def newcommitphase(ui): |
|
919 | 912 | """helper to get the target phase of new commit |
|
920 | 913 | |
|
921 | 914 | Handle all possible values for the phases.new-commit options. |
|
922 | 915 | |
|
923 | 916 | """ |
|
924 | 917 | v = ui.config(b'phases', b'new-commit') |
|
925 | 918 | try: |
|
926 | 919 | return phasenumber2[v] |
|
927 | 920 | except KeyError: |
|
928 | 921 | raise error.ConfigError( |
|
929 | 922 | _(b"phases.new-commit: not a valid phase name ('%s')") % v |
|
930 | 923 | ) |
|
931 | 924 | |
|
932 | 925 | |
|
933 | 926 | def hassecret(repo): |
|
934 | 927 | """utility function that check if a repo have any secret changeset.""" |
|
935 | 928 | return bool(repo._phasecache.phaseroots[secret]) |
|
936 | 929 | |
|
937 | 930 | |
|
938 | 931 | def preparehookargs(node, old, new): |
|
939 | 932 | if old is None: |
|
940 | 933 | old = b'' |
|
941 | 934 | else: |
|
942 | 935 | old = phasenames[old] |
|
943 | 936 | return {b'node': node, b'oldphase': old, b'phase': phasenames[new]} |
General Comments 0
You need to be logged in to leave comments.
Login now