##// END OF EJS Templates
refactoring: use unlinkpath with ignoremissing
Mads Kiilerich -
r18386:03442135 default
parent child Browse files
Show More

The requested changes are too big and content was truncated. Show full diff

@@ -1,1164 +1,1163 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10 10
11 11 import os
12 12 import copy
13 13
14 14 from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \
15 15 node, archival, error, merge, discovery
16 16 from mercurial.i18n import _
17 17 from mercurial.node import hex
18 18 from hgext import rebase
19 19
20 20 import lfutil
21 21 import lfcommands
22 22
23 23 # -- Utility functions: commonly/repeatedly needed functionality ---------------
24 24
25 25 def installnormalfilesmatchfn(manifest):
26 26 '''overrides scmutil.match so that the matcher it returns will ignore all
27 27 largefiles'''
28 28 oldmatch = None # for the closure
29 29 def overridematch(ctx, pats=[], opts={}, globbed=False,
30 30 default='relpath'):
31 31 match = oldmatch(ctx, pats, opts, globbed, default)
32 32 m = copy.copy(match)
33 33 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
34 34 manifest)
35 35 m._files = filter(notlfile, m._files)
36 36 m._fmap = set(m._files)
37 37 origmatchfn = m.matchfn
38 38 m.matchfn = lambda f: notlfile(f) and origmatchfn(f) or None
39 39 return m
40 40 oldmatch = installmatchfn(overridematch)
41 41
42 42 def installmatchfn(f):
43 43 oldmatch = scmutil.match
44 44 setattr(f, 'oldmatch', oldmatch)
45 45 scmutil.match = f
46 46 return oldmatch
47 47
48 48 def restorematchfn():
49 49 '''restores scmutil.match to what it was before installnormalfilesmatchfn
50 50 was called. no-op if scmutil.match is its original function.
51 51
52 52 Note that n calls to installnormalfilesmatchfn will require n calls to
53 53 restore matchfn to reverse'''
54 54 scmutil.match = getattr(scmutil.match, 'oldmatch', scmutil.match)
55 55
56 56 def addlargefiles(ui, repo, *pats, **opts):
57 57 large = opts.pop('large', None)
58 58 lfsize = lfutil.getminsize(
59 59 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
60 60
61 61 lfmatcher = None
62 62 if lfutil.islfilesrepo(repo):
63 63 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
64 64 if lfpats:
65 65 lfmatcher = match_.match(repo.root, '', list(lfpats))
66 66
67 67 lfnames = []
68 68 m = scmutil.match(repo[None], pats, opts)
69 69 m.bad = lambda x, y: None
70 70 wctx = repo[None]
71 71 for f in repo.walk(m):
72 72 exact = m.exact(f)
73 73 lfile = lfutil.standin(f) in wctx
74 74 nfile = f in wctx
75 75 exists = lfile or nfile
76 76
77 77 # Don't warn the user when they attempt to add a normal tracked file.
78 78 # The normal add code will do that for us.
79 79 if exact and exists:
80 80 if lfile:
81 81 ui.warn(_('%s already a largefile\n') % f)
82 82 continue
83 83
84 84 if (exact or not exists) and not lfutil.isstandin(f):
85 85 wfile = repo.wjoin(f)
86 86
87 87 # In case the file was removed previously, but not committed
88 88 # (issue3507)
89 89 if not os.path.exists(wfile):
90 90 continue
91 91
92 92 abovemin = (lfsize and
93 93 os.lstat(wfile).st_size >= lfsize * 1024 * 1024)
94 94 if large or abovemin or (lfmatcher and lfmatcher(f)):
95 95 lfnames.append(f)
96 96 if ui.verbose or not exact:
97 97 ui.status(_('adding %s as a largefile\n') % m.rel(f))
98 98
99 99 bad = []
100 100 standins = []
101 101
102 102 # Need to lock, otherwise there could be a race condition between
103 103 # when standins are created and added to the repo.
104 104 wlock = repo.wlock()
105 105 try:
106 106 if not opts.get('dry_run'):
107 107 lfdirstate = lfutil.openlfdirstate(ui, repo)
108 108 for f in lfnames:
109 109 standinname = lfutil.standin(f)
110 110 lfutil.writestandin(repo, standinname, hash='',
111 111 executable=lfutil.getexecutable(repo.wjoin(f)))
112 112 standins.append(standinname)
113 113 if lfdirstate[f] == 'r':
114 114 lfdirstate.normallookup(f)
115 115 else:
116 116 lfdirstate.add(f)
117 117 lfdirstate.write()
118 118 bad += [lfutil.splitstandin(f)
119 119 for f in repo[None].add(standins)
120 120 if f in m.files()]
121 121 finally:
122 122 wlock.release()
123 123 return bad
124 124
125 125 def removelargefiles(ui, repo, *pats, **opts):
126 126 after = opts.get('after')
127 127 if not pats and not after:
128 128 raise util.Abort(_('no files specified'))
129 129 m = scmutil.match(repo[None], pats, opts)
130 130 try:
131 131 repo.lfstatus = True
132 132 s = repo.status(match=m, clean=True)
133 133 finally:
134 134 repo.lfstatus = False
135 135 manifest = repo[None].manifest()
136 136 modified, added, deleted, clean = [[f for f in list
137 137 if lfutil.standin(f) in manifest]
138 138 for list in [s[0], s[1], s[3], s[6]]]
139 139
140 140 def warn(files, msg):
141 141 for f in files:
142 142 ui.warn(msg % m.rel(f))
143 143 return int(len(files) > 0)
144 144
145 145 result = 0
146 146
147 147 if after:
148 148 remove, forget = deleted, []
149 149 result = warn(modified + added + clean,
150 150 _('not removing %s: file still exists\n'))
151 151 else:
152 152 remove, forget = deleted + clean, []
153 153 result = warn(modified, _('not removing %s: file is modified (use -f'
154 154 ' to force removal)\n'))
155 155 result = warn(added, _('not removing %s: file has been marked for add'
156 156 ' (use forget to undo)\n')) or result
157 157
158 158 for f in sorted(remove + forget):
159 159 if ui.verbose or not m.exact(f):
160 160 ui.status(_('removing %s\n') % m.rel(f))
161 161
162 162 # Need to lock because standin files are deleted then removed from the
163 163 # repository and we could race in-between.
164 164 wlock = repo.wlock()
165 165 try:
166 166 lfdirstate = lfutil.openlfdirstate(ui, repo)
167 167 for f in remove:
168 168 if not after:
169 169 # If this is being called by addremove, notify the user that we
170 170 # are removing the file.
171 171 if getattr(repo, "_isaddremove", False):
172 172 ui.status(_('removing %s\n') % f)
173 if os.path.exists(repo.wjoin(f)):
174 util.unlinkpath(repo.wjoin(f))
173 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
175 174 lfdirstate.remove(f)
176 175 lfdirstate.write()
177 176 forget = [lfutil.standin(f) for f in forget]
178 177 remove = [lfutil.standin(f) for f in remove]
179 178 repo[None].forget(forget)
180 179 # If this is being called by addremove, let the original addremove
181 180 # function handle this.
182 181 if not getattr(repo, "_isaddremove", False):
183 182 for f in remove:
184 183 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
185 184 repo[None].forget(remove)
186 185 finally:
187 186 wlock.release()
188 187
189 188 return result
190 189
191 190 # For overriding mercurial.hgweb.webcommands so that largefiles will
192 191 # appear at their right place in the manifests.
193 192 def decodepath(orig, path):
194 193 return lfutil.splitstandin(path) or path
195 194
196 195 # -- Wrappers: modify existing commands --------------------------------
197 196
198 197 # Add works by going through the files that the user wanted to add and
199 198 # checking if they should be added as largefiles. Then it makes a new
200 199 # matcher which matches only the normal files and runs the original
201 200 # version of add.
202 201 def overrideadd(orig, ui, repo, *pats, **opts):
203 202 normal = opts.pop('normal')
204 203 if normal:
205 204 if opts.get('large'):
206 205 raise util.Abort(_('--normal cannot be used with --large'))
207 206 return orig(ui, repo, *pats, **opts)
208 207 bad = addlargefiles(ui, repo, *pats, **opts)
209 208 installnormalfilesmatchfn(repo[None].manifest())
210 209 result = orig(ui, repo, *pats, **opts)
211 210 restorematchfn()
212 211
213 212 return (result == 1 or bad) and 1 or 0
214 213
215 214 def overrideremove(orig, ui, repo, *pats, **opts):
216 215 installnormalfilesmatchfn(repo[None].manifest())
217 216 result = orig(ui, repo, *pats, **opts)
218 217 restorematchfn()
219 218 return removelargefiles(ui, repo, *pats, **opts) or result
220 219
221 220 def overridestatusfn(orig, repo, rev2, **opts):
222 221 try:
223 222 repo._repo.lfstatus = True
224 223 return orig(repo, rev2, **opts)
225 224 finally:
226 225 repo._repo.lfstatus = False
227 226
228 227 def overridestatus(orig, ui, repo, *pats, **opts):
229 228 try:
230 229 repo.lfstatus = True
231 230 return orig(ui, repo, *pats, **opts)
232 231 finally:
233 232 repo.lfstatus = False
234 233
235 234 def overridedirty(orig, repo, ignoreupdate=False):
236 235 try:
237 236 repo._repo.lfstatus = True
238 237 return orig(repo, ignoreupdate)
239 238 finally:
240 239 repo._repo.lfstatus = False
241 240
242 241 def overridelog(orig, ui, repo, *pats, **opts):
243 242 def overridematch(ctx, pats=[], opts={}, globbed=False,
244 243 default='relpath'):
245 244 """Matcher that merges root directory with .hglf, suitable for log.
246 245 It is still possible to match .hglf directly.
247 246 For any listed files run log on the standin too.
248 247 matchfn tries both the given filename and with .hglf stripped.
249 248 """
250 249 match = oldmatch(ctx, pats, opts, globbed, default)
251 250 m = copy.copy(match)
252 251 standins = [lfutil.standin(f) for f in m._files]
253 252 m._files.extend(standins)
254 253 m._fmap = set(m._files)
255 254 origmatchfn = m.matchfn
256 255 def lfmatchfn(f):
257 256 lf = lfutil.splitstandin(f)
258 257 if lf is not None and origmatchfn(lf):
259 258 return True
260 259 r = origmatchfn(f)
261 260 return r
262 261 m.matchfn = lfmatchfn
263 262 return m
264 263 oldmatch = installmatchfn(overridematch)
265 264 try:
266 265 repo.lfstatus = True
267 266 return orig(ui, repo, *pats, **opts)
268 267 finally:
269 268 repo.lfstatus = False
270 269 restorematchfn()
271 270
272 271 def overrideverify(orig, ui, repo, *pats, **opts):
273 272 large = opts.pop('large', False)
274 273 all = opts.pop('lfa', False)
275 274 contents = opts.pop('lfc', False)
276 275
277 276 result = orig(ui, repo, *pats, **opts)
278 277 if large:
279 278 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
280 279 return result
281 280
282 281 def overridedebugstate(orig, ui, repo, *pats, **opts):
283 282 large = opts.pop('large', False)
284 283 if large:
285 284 lfcommands.debugdirstate(ui, repo)
286 285 else:
287 286 orig(ui, repo, *pats, **opts)
288 287
289 288 # Override needs to refresh standins so that update's normal merge
290 289 # will go through properly. Then the other update hook (overriding repo.update)
291 290 # will get the new files. Filemerge is also overridden so that the merge
292 291 # will merge standins correctly.
293 292 def overrideupdate(orig, ui, repo, *pats, **opts):
294 293 lfdirstate = lfutil.openlfdirstate(ui, repo)
295 294 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
296 295 False, False)
297 296 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
298 297
299 298 # Need to lock between the standins getting updated and their
300 299 # largefiles getting updated
301 300 wlock = repo.wlock()
302 301 try:
303 302 if opts['check']:
304 303 mod = len(modified) > 0
305 304 for lfile in unsure:
306 305 standin = lfutil.standin(lfile)
307 306 if repo['.'][standin].data().strip() != \
308 307 lfutil.hashfile(repo.wjoin(lfile)):
309 308 mod = True
310 309 else:
311 310 lfdirstate.normal(lfile)
312 311 lfdirstate.write()
313 312 if mod:
314 313 raise util.Abort(_('uncommitted local changes'))
315 314 # XXX handle removed differently
316 315 if not opts['clean']:
317 316 for lfile in unsure + modified + added:
318 317 lfutil.updatestandin(repo, lfutil.standin(lfile))
319 318 finally:
320 319 wlock.release()
321 320 return orig(ui, repo, *pats, **opts)
322 321
323 322 # Before starting the manifest merge, merge.updates will call
324 323 # _checkunknown to check if there are any files in the merged-in
325 324 # changeset that collide with unknown files in the working copy.
326 325 #
327 326 # The largefiles are seen as unknown, so this prevents us from merging
328 327 # in a file 'foo' if we already have a largefile with the same name.
329 328 #
330 329 # The overridden function filters the unknown files by removing any
331 330 # largefiles. This makes the merge proceed and we can then handle this
332 331 # case further in the overridden manifestmerge function below.
333 332 def overridecheckunknownfile(origfn, repo, wctx, mctx, f):
334 333 if lfutil.standin(f) in wctx:
335 334 return False
336 335 return origfn(repo, wctx, mctx, f)
337 336
338 337 # The manifest merge handles conflicts on the manifest level. We want
339 338 # to handle changes in largefile-ness of files at this level too.
340 339 #
341 340 # The strategy is to run the original manifestmerge and then process
342 341 # the action list it outputs. There are two cases we need to deal with:
343 342 #
344 343 # 1. Normal file in p1, largefile in p2. Here the largefile is
345 344 # detected via its standin file, which will enter the working copy
346 345 # with a "get" action. It is not "merge" since the standin is all
347 346 # Mercurial is concerned with at this level -- the link to the
348 347 # existing normal file is not relevant here.
349 348 #
350 349 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
351 350 # since the largefile will be present in the working copy and
352 351 # different from the normal file in p2. Mercurial therefore
353 352 # triggers a merge action.
354 353 #
355 354 # In both cases, we prompt the user and emit new actions to either
356 355 # remove the standin (if the normal file was kept) or to remove the
357 356 # normal file and get the standin (if the largefile was kept). The
358 357 # default prompt answer is to use the largefile version since it was
359 358 # presumably changed on purpose.
360 359 #
361 360 # Finally, the merge.applyupdates function will then take care of
362 361 # writing the files into the working copy and lfcommands.updatelfiles
363 362 # will update the largefiles.
364 363 def overridemanifestmerge(origfn, repo, p1, p2, pa, overwrite, partial):
365 364 actions = origfn(repo, p1, p2, pa, overwrite, partial)
366 365 processed = []
367 366
368 367 for action in actions:
369 368 if overwrite:
370 369 processed.append(action)
371 370 continue
372 371 f, m = action[:2]
373 372
374 373 choices = (_('&Largefile'), _('&Normal file'))
375 374 if m == "g" and lfutil.splitstandin(f) in p1 and f in p2:
376 375 # Case 1: normal file in the working copy, largefile in
377 376 # the second parent
378 377 lfile = lfutil.splitstandin(f)
379 378 standin = f
380 379 msg = _('%s has been turned into a largefile\n'
381 380 'use (l)argefile or keep as (n)ormal file?') % lfile
382 381 if repo.ui.promptchoice(msg, choices, 0) == 0:
383 382 processed.append((lfile, "r"))
384 383 processed.append((standin, "g", p2.flags(standin)))
385 384 else:
386 385 processed.append((standin, "r"))
387 386 elif m == "g" and lfutil.standin(f) in p1 and f in p2:
388 387 # Case 2: largefile in the working copy, normal file in
389 388 # the second parent
390 389 standin = lfutil.standin(f)
391 390 lfile = f
392 391 msg = _('%s has been turned into a normal file\n'
393 392 'keep as (l)argefile or use (n)ormal file?') % lfile
394 393 if repo.ui.promptchoice(msg, choices, 0) == 0:
395 394 processed.append((lfile, "r"))
396 395 else:
397 396 processed.append((standin, "r"))
398 397 processed.append((lfile, "g", p2.flags(lfile)))
399 398 else:
400 399 processed.append(action)
401 400
402 401 return processed
403 402
404 403 # Override filemerge to prompt the user about how they wish to merge
405 404 # largefiles. This will handle identical edits, and copy/rename +
406 405 # edit without prompting the user.
407 406 def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca):
408 407 # Use better variable names here. Because this is a wrapper we cannot
409 408 # change the variable names in the function declaration.
410 409 fcdest, fcother, fcancestor = fcd, fco, fca
411 410 if not lfutil.isstandin(orig):
412 411 return origfn(repo, mynode, orig, fcdest, fcother, fcancestor)
413 412 else:
414 413 if not fcother.cmp(fcdest): # files identical?
415 414 return None
416 415
417 416 # backwards, use working dir parent as ancestor
418 417 if fcancestor == fcother:
419 418 fcancestor = fcdest.parents()[0]
420 419
421 420 if orig != fcother.path():
422 421 repo.ui.status(_('merging %s and %s to %s\n')
423 422 % (lfutil.splitstandin(orig),
424 423 lfutil.splitstandin(fcother.path()),
425 424 lfutil.splitstandin(fcdest.path())))
426 425 else:
427 426 repo.ui.status(_('merging %s\n')
428 427 % lfutil.splitstandin(fcdest.path()))
429 428
430 429 if fcancestor.path() != fcother.path() and fcother.data() == \
431 430 fcancestor.data():
432 431 return 0
433 432 if fcancestor.path() != fcdest.path() and fcdest.data() == \
434 433 fcancestor.data():
435 434 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
436 435 return 0
437 436
438 437 if repo.ui.promptchoice(_('largefile %s has a merge conflict\n'
439 438 'keep (l)ocal or take (o)ther?') %
440 439 lfutil.splitstandin(orig),
441 440 (_('&Local'), _('&Other')), 0) == 0:
442 441 return 0
443 442 else:
444 443 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
445 444 return 0
446 445
447 446 # Copy first changes the matchers to match standins instead of
448 447 # largefiles. Then it overrides util.copyfile in that function it
449 448 # checks if the destination largefile already exists. It also keeps a
450 449 # list of copied files so that the largefiles can be copied and the
451 450 # dirstate updated.
452 451 def overridecopy(orig, ui, repo, pats, opts, rename=False):
453 452 # doesn't remove largefile on rename
454 453 if len(pats) < 2:
455 454 # this isn't legal, let the original function deal with it
456 455 return orig(ui, repo, pats, opts, rename)
457 456
458 457 def makestandin(relpath):
459 458 path = scmutil.canonpath(repo.root, repo.getcwd(), relpath)
460 459 return os.path.join(repo.wjoin(lfutil.standin(path)))
461 460
462 461 fullpats = scmutil.expandpats(pats)
463 462 dest = fullpats[-1]
464 463
465 464 if os.path.isdir(dest):
466 465 if not os.path.isdir(makestandin(dest)):
467 466 os.makedirs(makestandin(dest))
468 467 # This could copy both lfiles and normal files in one command,
469 468 # but we don't want to do that. First replace their matcher to
470 469 # only match normal files and run it, then replace it to just
471 470 # match largefiles and run it again.
472 471 nonormalfiles = False
473 472 nolfiles = False
474 473 try:
475 474 try:
476 475 installnormalfilesmatchfn(repo[None].manifest())
477 476 result = orig(ui, repo, pats, opts, rename)
478 477 except util.Abort, e:
479 478 if str(e) != _('no files to copy'):
480 479 raise e
481 480 else:
482 481 nonormalfiles = True
483 482 result = 0
484 483 finally:
485 484 restorematchfn()
486 485
487 486 # The first rename can cause our current working directory to be removed.
488 487 # In that case there is nothing left to copy/rename so just quit.
489 488 try:
490 489 repo.getcwd()
491 490 except OSError:
492 491 return result
493 492
494 493 try:
495 494 try:
496 495 # When we call orig below it creates the standins but we don't add
497 496 # them to the dir state until later so lock during that time.
498 497 wlock = repo.wlock()
499 498
500 499 manifest = repo[None].manifest()
501 500 oldmatch = None # for the closure
502 501 def overridematch(ctx, pats=[], opts={}, globbed=False,
503 502 default='relpath'):
504 503 newpats = []
505 504 # The patterns were previously mangled to add the standin
506 505 # directory; we need to remove that now
507 506 for pat in pats:
508 507 if match_.patkind(pat) is None and lfutil.shortname in pat:
509 508 newpats.append(pat.replace(lfutil.shortname, ''))
510 509 else:
511 510 newpats.append(pat)
512 511 match = oldmatch(ctx, newpats, opts, globbed, default)
513 512 m = copy.copy(match)
514 513 lfile = lambda f: lfutil.standin(f) in manifest
515 514 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
516 515 m._fmap = set(m._files)
517 516 origmatchfn = m.matchfn
518 517 m.matchfn = lambda f: (lfutil.isstandin(f) and
519 518 (f in manifest) and
520 519 origmatchfn(lfutil.splitstandin(f)) or
521 520 None)
522 521 return m
523 522 oldmatch = installmatchfn(overridematch)
524 523 listpats = []
525 524 for pat in pats:
526 525 if match_.patkind(pat) is not None:
527 526 listpats.append(pat)
528 527 else:
529 528 listpats.append(makestandin(pat))
530 529
531 530 try:
532 531 origcopyfile = util.copyfile
533 532 copiedfiles = []
534 533 def overridecopyfile(src, dest):
535 534 if (lfutil.shortname in src and
536 535 dest.startswith(repo.wjoin(lfutil.shortname))):
537 536 destlfile = dest.replace(lfutil.shortname, '')
538 537 if not opts['force'] and os.path.exists(destlfile):
539 538 raise IOError('',
540 539 _('destination largefile already exists'))
541 540 copiedfiles.append((src, dest))
542 541 origcopyfile(src, dest)
543 542
544 543 util.copyfile = overridecopyfile
545 544 result += orig(ui, repo, listpats, opts, rename)
546 545 finally:
547 546 util.copyfile = origcopyfile
548 547
549 548 lfdirstate = lfutil.openlfdirstate(ui, repo)
550 549 for (src, dest) in copiedfiles:
551 550 if (lfutil.shortname in src and
552 551 dest.startswith(repo.wjoin(lfutil.shortname))):
553 552 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
554 553 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
555 554 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
556 555 if not os.path.isdir(destlfiledir):
557 556 os.makedirs(destlfiledir)
558 557 if rename:
559 558 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
560 559 lfdirstate.remove(srclfile)
561 560 else:
562 561 util.copyfile(repo.wjoin(srclfile),
563 562 repo.wjoin(destlfile))
564 563
565 564 lfdirstate.add(destlfile)
566 565 lfdirstate.write()
567 566 except util.Abort, e:
568 567 if str(e) != _('no files to copy'):
569 568 raise e
570 569 else:
571 570 nolfiles = True
572 571 finally:
573 572 restorematchfn()
574 573 wlock.release()
575 574
576 575 if nolfiles and nonormalfiles:
577 576 raise util.Abort(_('no files to copy'))
578 577
579 578 return result
580 579
581 580 # When the user calls revert, we have to be careful to not revert any
582 581 # changes to other largefiles accidentally. This means we have to keep
583 582 # track of the largefiles that are being reverted so we only pull down
584 583 # the necessary largefiles.
585 584 #
586 585 # Standins are only updated (to match the hash of largefiles) before
587 586 # commits. Update the standins then run the original revert, changing
588 587 # the matcher to hit standins instead of largefiles. Based on the
589 588 # resulting standins update the largefiles. Then return the standins
590 589 # to their proper state
591 590 def overriderevert(orig, ui, repo, *pats, **opts):
592 591 # Because we put the standins in a bad state (by updating them)
593 592 # and then return them to a correct state we need to lock to
594 593 # prevent others from changing them in their incorrect state.
595 594 wlock = repo.wlock()
596 595 try:
597 596 lfdirstate = lfutil.openlfdirstate(ui, repo)
598 597 (modified, added, removed, missing, unknown, ignored, clean) = \
599 598 lfutil.lfdirstatestatus(lfdirstate, repo, repo['.'].rev())
600 599 lfdirstate.write()
601 600 for lfile in modified:
602 601 lfutil.updatestandin(repo, lfutil.standin(lfile))
603 602 for lfile in missing:
604 603 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
605 604 os.unlink(repo.wjoin(lfutil.standin(lfile)))
606 605
607 606 try:
608 607 ctx = scmutil.revsingle(repo, opts.get('rev'))
609 608 oldmatch = None # for the closure
610 609 def overridematch(ctx, pats=[], opts={}, globbed=False,
611 610 default='relpath'):
612 611 match = oldmatch(ctx, pats, opts, globbed, default)
613 612 m = copy.copy(match)
614 613 def tostandin(f):
615 614 if lfutil.standin(f) in ctx:
616 615 return lfutil.standin(f)
617 616 elif lfutil.standin(f) in repo[None]:
618 617 return None
619 618 return f
620 619 m._files = [tostandin(f) for f in m._files]
621 620 m._files = [f for f in m._files if f is not None]
622 621 m._fmap = set(m._files)
623 622 origmatchfn = m.matchfn
624 623 def matchfn(f):
625 624 if lfutil.isstandin(f):
626 625 # We need to keep track of what largefiles are being
627 626 # matched so we know which ones to update later --
628 627 # otherwise we accidentally revert changes to other
629 628 # largefiles. This is repo-specific, so duckpunch the
630 629 # repo object to keep the list of largefiles for us
631 630 # later.
632 631 if origmatchfn(lfutil.splitstandin(f)) and \
633 632 (f in repo[None] or f in ctx):
634 633 lfileslist = getattr(repo, '_lfilestoupdate', [])
635 634 lfileslist.append(lfutil.splitstandin(f))
636 635 repo._lfilestoupdate = lfileslist
637 636 return True
638 637 else:
639 638 return False
640 639 return origmatchfn(f)
641 640 m.matchfn = matchfn
642 641 return m
643 642 oldmatch = installmatchfn(overridematch)
644 643 scmutil.match
645 644 matches = overridematch(repo[None], pats, opts)
646 645 orig(ui, repo, *pats, **opts)
647 646 finally:
648 647 restorematchfn()
649 648 lfileslist = getattr(repo, '_lfilestoupdate', [])
650 649 lfcommands.updatelfiles(ui, repo, filelist=lfileslist,
651 650 printmessage=False)
652 651
653 652 # empty out the largefiles list so we start fresh next time
654 653 repo._lfilestoupdate = []
655 654 for lfile in modified:
656 655 if lfile in lfileslist:
657 656 if os.path.exists(repo.wjoin(lfutil.standin(lfile))) and lfile\
658 657 in repo['.']:
659 658 lfutil.writestandin(repo, lfutil.standin(lfile),
660 659 repo['.'][lfile].data().strip(),
661 660 'x' in repo['.'][lfile].flags())
662 661 lfdirstate = lfutil.openlfdirstate(ui, repo)
663 662 for lfile in added:
664 663 standin = lfutil.standin(lfile)
665 664 if standin not in ctx and (standin in matches or opts.get('all')):
666 665 if lfile in lfdirstate:
667 666 lfdirstate.drop(lfile)
668 667 util.unlinkpath(repo.wjoin(standin))
669 668 lfdirstate.write()
670 669 finally:
671 670 wlock.release()
672 671
673 672 def hgupdate(orig, repo, node):
674 673 # Only call updatelfiles the standins that have changed to save time
675 674 oldstandins = lfutil.getstandinsstate(repo)
676 675 result = orig(repo, node)
677 676 newstandins = lfutil.getstandinsstate(repo)
678 677 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
679 678 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist, printmessage=True)
680 679 return result
681 680
682 681 def hgclean(orig, repo, node, show_stats=True):
683 682 result = orig(repo, node, show_stats)
684 683 lfcommands.updatelfiles(repo.ui, repo)
685 684 return result
686 685
687 686 def hgmerge(orig, repo, node, force=None, remind=True):
688 687 # Mark the repo as being in the middle of a merge, so that
689 688 # updatelfiles() will know that it needs to trust the standins in
690 689 # the working copy, not in the standins in the current node
691 690 repo._ismerging = True
692 691 try:
693 692 result = orig(repo, node, force, remind)
694 693 lfcommands.updatelfiles(repo.ui, repo)
695 694 finally:
696 695 repo._ismerging = False
697 696 return result
698 697
699 698 # When we rebase a repository with remotely changed largefiles, we need to
700 699 # take some extra care so that the largefiles are correctly updated in the
701 700 # working copy
702 701 def overridepull(orig, ui, repo, source=None, **opts):
703 702 revsprepull = len(repo)
704 703 if opts.get('rebase', False):
705 704 repo._isrebasing = True
706 705 try:
707 706 if opts.get('update'):
708 707 del opts['update']
709 708 ui.debug('--update and --rebase are not compatible, ignoring '
710 709 'the update flag\n')
711 710 del opts['rebase']
712 711 cmdutil.bailifchanged(repo)
713 712 origpostincoming = commands.postincoming
714 713 def _dummy(*args, **kwargs):
715 714 pass
716 715 commands.postincoming = _dummy
717 716 if not source:
718 717 source = 'default'
719 718 repo.lfpullsource = source
720 719 try:
721 720 result = commands.pull(ui, repo, source, **opts)
722 721 finally:
723 722 commands.postincoming = origpostincoming
724 723 revspostpull = len(repo)
725 724 if revspostpull > revsprepull:
726 725 result = result or rebase.rebase(ui, repo)
727 726 finally:
728 727 repo._isrebasing = False
729 728 else:
730 729 if not source:
731 730 source = 'default'
732 731 repo.lfpullsource = source
733 732 oldheads = lfutil.getcurrentheads(repo)
734 733 result = orig(ui, repo, source, **opts)
735 734 # If we do not have the new largefiles for any new heads we pulled, we
736 735 # will run into a problem later if we try to merge or rebase with one of
737 736 # these heads, so cache the largefiles now directly into the system
738 737 # cache.
739 738 ui.status(_("caching new largefiles\n"))
740 739 numcached = 0
741 740 heads = lfutil.getcurrentheads(repo)
742 741 newheads = set(heads).difference(set(oldheads))
743 742 for head in newheads:
744 743 (cached, missing) = lfcommands.cachelfiles(ui, repo, head)
745 744 numcached += len(cached)
746 745 ui.status(_("%d largefiles cached\n") % numcached)
747 746 if opts.get('all_largefiles'):
748 747 revspostpull = len(repo)
749 748 revs = []
750 749 for rev in xrange(revsprepull + 1, revspostpull):
751 750 revs.append(repo[rev].rev())
752 751 lfcommands.downloadlfiles(ui, repo, revs)
753 752 return result
754 753
755 754 def overrideclone(orig, ui, source, dest=None, **opts):
756 755 d = dest
757 756 if d is None:
758 757 d = hg.defaultdest(source)
759 758 if opts.get('all_largefiles') and not hg.islocal(d):
760 759 raise util.Abort(_(
761 760 '--all-largefiles is incompatible with non-local destination %s' %
762 761 d))
763 762
764 763 return orig(ui, source, dest, **opts)
765 764
766 765 def hgclone(orig, ui, opts, *args, **kwargs):
767 766 result = orig(ui, opts, *args, **kwargs)
768 767
769 768 if result is not None:
770 769 sourcerepo, destrepo = result
771 770 repo = destrepo.local()
772 771
773 772 # The .hglf directory must exist for the standin matcher to match
774 773 # anything (which listlfiles uses for each rev), and .hg/largefiles is
775 774 # assumed to exist by the code that caches the downloaded file. These
776 775 # directories exist if clone updated to any rev. (If the repo does not
777 776 # have largefiles, download never gets to the point of needing
778 777 # .hg/largefiles, and the standin matcher won't match anything anyway.)
779 778 if 'largefiles' in repo.requirements:
780 779 if opts.get('noupdate'):
781 780 util.makedirs(repo.wjoin(lfutil.shortname))
782 781 util.makedirs(repo.join(lfutil.longname))
783 782
784 783 # Caching is implicitly limited to 'rev' option, since the dest repo was
785 784 # truncated at that point. The user may expect a download count with
786 785 # this option, so attempt whether or not this is a largefile repo.
787 786 if opts.get('all_largefiles'):
788 787 success, missing = lfcommands.downloadlfiles(ui, repo, None)
789 788
790 789 if missing != 0:
791 790 return None
792 791
793 792 return result
794 793
795 794 def overriderebase(orig, ui, repo, **opts):
796 795 repo._isrebasing = True
797 796 try:
798 797 return orig(ui, repo, **opts)
799 798 finally:
800 799 repo._isrebasing = False
801 800
802 801 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
803 802 prefix=None, mtime=None, subrepos=None):
804 803 # No need to lock because we are only reading history and
805 804 # largefile caches, neither of which are modified.
806 805 lfcommands.cachelfiles(repo.ui, repo, node)
807 806
808 807 if kind not in archival.archivers:
809 808 raise util.Abort(_("unknown archive type '%s'") % kind)
810 809
811 810 ctx = repo[node]
812 811
813 812 if kind == 'files':
814 813 if prefix:
815 814 raise util.Abort(
816 815 _('cannot give prefix when archiving to files'))
817 816 else:
818 817 prefix = archival.tidyprefix(dest, kind, prefix)
819 818
820 819 def write(name, mode, islink, getdata):
821 820 if matchfn and not matchfn(name):
822 821 return
823 822 data = getdata()
824 823 if decode:
825 824 data = repo.wwritedata(name, data)
826 825 archiver.addfile(prefix + name, mode, islink, data)
827 826
828 827 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
829 828
830 829 if repo.ui.configbool("ui", "archivemeta", True):
831 830 def metadata():
832 831 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
833 832 hex(repo.changelog.node(0)), hex(node), ctx.branch())
834 833
835 834 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
836 835 if repo.tagtype(t) == 'global')
837 836 if not tags:
838 837 repo.ui.pushbuffer()
839 838 opts = {'template': '{latesttag}\n{latesttagdistance}',
840 839 'style': '', 'patch': None, 'git': None}
841 840 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
842 841 ltags, dist = repo.ui.popbuffer().split('\n')
843 842 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
844 843 tags += 'latesttagdistance: %s\n' % dist
845 844
846 845 return base + tags
847 846
848 847 write('.hg_archival.txt', 0644, False, metadata)
849 848
850 849 for f in ctx:
851 850 ff = ctx.flags(f)
852 851 getdata = ctx[f].data
853 852 if lfutil.isstandin(f):
854 853 path = lfutil.findfile(repo, getdata().strip())
855 854 if path is None:
856 855 raise util.Abort(
857 856 _('largefile %s not found in repo store or system cache')
858 857 % lfutil.splitstandin(f))
859 858 f = lfutil.splitstandin(f)
860 859
861 860 def getdatafn():
862 861 fd = None
863 862 try:
864 863 fd = open(path, 'rb')
865 864 return fd.read()
866 865 finally:
867 866 if fd:
868 867 fd.close()
869 868
870 869 getdata = getdatafn
871 870 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
872 871
873 872 if subrepos:
874 873 for subpath in sorted(ctx.substate):
875 874 sub = ctx.sub(subpath)
876 875 submatch = match_.narrowmatcher(subpath, matchfn)
877 876 sub.archive(repo.ui, archiver, prefix, submatch)
878 877
879 878 archiver.done()
880 879
881 880 def hgsubrepoarchive(orig, repo, ui, archiver, prefix, match=None):
882 881 repo._get(repo._state + ('hg',))
883 882 rev = repo._state[1]
884 883 ctx = repo._repo[rev]
885 884
886 885 lfcommands.cachelfiles(ui, repo._repo, ctx.node())
887 886
888 887 def write(name, mode, islink, getdata):
889 888 # At this point, the standin has been replaced with the largefile name,
890 889 # so the normal matcher works here without the lfutil variants.
891 890 if match and not match(f):
892 891 return
893 892 data = getdata()
894 893
895 894 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
896 895
897 896 for f in ctx:
898 897 ff = ctx.flags(f)
899 898 getdata = ctx[f].data
900 899 if lfutil.isstandin(f):
901 900 path = lfutil.findfile(repo._repo, getdata().strip())
902 901 if path is None:
903 902 raise util.Abort(
904 903 _('largefile %s not found in repo store or system cache')
905 904 % lfutil.splitstandin(f))
906 905 f = lfutil.splitstandin(f)
907 906
908 907 def getdatafn():
909 908 fd = None
910 909 try:
911 910 fd = open(os.path.join(prefix, path), 'rb')
912 911 return fd.read()
913 912 finally:
914 913 if fd:
915 914 fd.close()
916 915
917 916 getdata = getdatafn
918 917
919 918 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
920 919
921 920 for subpath in sorted(ctx.substate):
922 921 sub = ctx.sub(subpath)
923 922 submatch = match_.narrowmatcher(subpath, match)
924 923 sub.archive(ui, archiver, os.path.join(prefix, repo._path) + '/',
925 924 submatch)
926 925
927 926 # If a largefile is modified, the change is not reflected in its
928 927 # standin until a commit. cmdutil.bailifchanged() raises an exception
929 928 # if the repo has uncommitted changes. Wrap it to also check if
930 929 # largefiles were changed. This is used by bisect and backout.
931 930 def overridebailifchanged(orig, repo):
932 931 orig(repo)
933 932 repo.lfstatus = True
934 933 modified, added, removed, deleted = repo.status()[:4]
935 934 repo.lfstatus = False
936 935 if modified or added or removed or deleted:
937 936 raise util.Abort(_('outstanding uncommitted changes'))
938 937
939 938 # Fetch doesn't use cmdutil.bailifchanged so override it to add the check
940 939 def overridefetch(orig, ui, repo, *pats, **opts):
941 940 repo.lfstatus = True
942 941 modified, added, removed, deleted = repo.status()[:4]
943 942 repo.lfstatus = False
944 943 if modified or added or removed or deleted:
945 944 raise util.Abort(_('outstanding uncommitted changes'))
946 945 return orig(ui, repo, *pats, **opts)
947 946
948 947 def overrideforget(orig, ui, repo, *pats, **opts):
949 948 installnormalfilesmatchfn(repo[None].manifest())
950 949 result = orig(ui, repo, *pats, **opts)
951 950 restorematchfn()
952 951 m = scmutil.match(repo[None], pats, opts)
953 952
954 953 try:
955 954 repo.lfstatus = True
956 955 s = repo.status(match=m, clean=True)
957 956 finally:
958 957 repo.lfstatus = False
959 958 forget = sorted(s[0] + s[1] + s[3] + s[6])
960 959 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
961 960
962 961 for f in forget:
963 962 if lfutil.standin(f) not in repo.dirstate and not \
964 963 os.path.isdir(m.rel(lfutil.standin(f))):
965 964 ui.warn(_('not removing %s: file is already untracked\n')
966 965 % m.rel(f))
967 966 result = 1
968 967
969 968 for f in forget:
970 969 if ui.verbose or not m.exact(f):
971 970 ui.status(_('removing %s\n') % m.rel(f))
972 971
973 972 # Need to lock because standin files are deleted then removed from the
974 973 # repository and we could race in-between.
975 974 wlock = repo.wlock()
976 975 try:
977 976 lfdirstate = lfutil.openlfdirstate(ui, repo)
978 977 for f in forget:
979 978 if lfdirstate[f] == 'a':
980 979 lfdirstate.drop(f)
981 980 else:
982 981 lfdirstate.remove(f)
983 982 lfdirstate.write()
984 983 standins = [lfutil.standin(f) for f in forget]
985 984 for f in standins:
986 985 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
987 986 repo[None].forget(standins)
988 987 finally:
989 988 wlock.release()
990 989
991 990 return result
992 991
993 992 def getoutgoinglfiles(ui, repo, dest=None, **opts):
994 993 dest = ui.expandpath(dest or 'default-push', dest or 'default')
995 994 dest, branches = hg.parseurl(dest, opts.get('branch'))
996 995 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
997 996 if revs:
998 997 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
999 998
1000 999 try:
1001 1000 remote = hg.peer(repo, opts, dest)
1002 1001 except error.RepoError:
1003 1002 return None
1004 1003 outgoing = discovery.findcommonoutgoing(repo, remote.peer(), force=False)
1005 1004 if not outgoing.missing:
1006 1005 return outgoing.missing
1007 1006 o = repo.changelog.nodesbetween(outgoing.missing, revs)[0]
1008 1007 if opts.get('newest_first'):
1009 1008 o.reverse()
1010 1009
1011 1010 toupload = set()
1012 1011 for n in o:
1013 1012 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
1014 1013 ctx = repo[n]
1015 1014 files = set(ctx.files())
1016 1015 if len(parents) == 2:
1017 1016 mc = ctx.manifest()
1018 1017 mp1 = ctx.parents()[0].manifest()
1019 1018 mp2 = ctx.parents()[1].manifest()
1020 1019 for f in mp1:
1021 1020 if f not in mc:
1022 1021 files.add(f)
1023 1022 for f in mp2:
1024 1023 if f not in mc:
1025 1024 files.add(f)
1026 1025 for f in mc:
1027 1026 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
1028 1027 files.add(f)
1029 1028 toupload = toupload.union(
1030 1029 set([f for f in files if lfutil.isstandin(f) and f in ctx]))
1031 1030 return sorted(toupload)
1032 1031
1033 1032 def overrideoutgoing(orig, ui, repo, dest=None, **opts):
1034 1033 result = orig(ui, repo, dest, **opts)
1035 1034
1036 1035 if opts.pop('large', None):
1037 1036 toupload = getoutgoinglfiles(ui, repo, dest, **opts)
1038 1037 if toupload is None:
1039 1038 ui.status(_('largefiles: No remote repo\n'))
1040 1039 elif not toupload:
1041 1040 ui.status(_('largefiles: no files to upload\n'))
1042 1041 else:
1043 1042 ui.status(_('largefiles to upload:\n'))
1044 1043 for file in toupload:
1045 1044 ui.status(lfutil.splitstandin(file) + '\n')
1046 1045 ui.status('\n')
1047 1046
1048 1047 return result
1049 1048
1050 1049 def overridesummary(orig, ui, repo, *pats, **opts):
1051 1050 try:
1052 1051 repo.lfstatus = True
1053 1052 orig(ui, repo, *pats, **opts)
1054 1053 finally:
1055 1054 repo.lfstatus = False
1056 1055
1057 1056 if opts.pop('large', None):
1058 1057 toupload = getoutgoinglfiles(ui, repo, None, **opts)
1059 1058 if toupload is None:
1060 1059 # i18n: column positioning for "hg summary"
1061 1060 ui.status(_('largefiles: (no remote repo)\n'))
1062 1061 elif not toupload:
1063 1062 # i18n: column positioning for "hg summary"
1064 1063 ui.status(_('largefiles: (no files to upload)\n'))
1065 1064 else:
1066 1065 # i18n: column positioning for "hg summary"
1067 1066 ui.status(_('largefiles: %d to upload\n') % len(toupload))
1068 1067
1069 1068 def scmutiladdremove(orig, repo, pats=[], opts={}, dry_run=None,
1070 1069 similarity=None):
1071 1070 if not lfutil.islfilesrepo(repo):
1072 1071 return orig(repo, pats, opts, dry_run, similarity)
1073 1072 # Get the list of missing largefiles so we can remove them
1074 1073 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1075 1074 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
1076 1075 False, False)
1077 1076 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
1078 1077
1079 1078 # Call into the normal remove code, but the removing of the standin, we want
1080 1079 # to have handled by original addremove. Monkey patching here makes sure
1081 1080 # we don't remove the standin in the largefiles code, preventing a very
1082 1081 # confused state later.
1083 1082 if missing:
1084 1083 m = [repo.wjoin(f) for f in missing]
1085 1084 repo._isaddremove = True
1086 1085 removelargefiles(repo.ui, repo, *m, **opts)
1087 1086 repo._isaddremove = False
1088 1087 # Call into the normal add code, and any files that *should* be added as
1089 1088 # largefiles will be
1090 1089 addlargefiles(repo.ui, repo, *pats, **opts)
1091 1090 # Now that we've handled largefiles, hand off to the original addremove
1092 1091 # function to take care of the rest. Make sure it doesn't do anything with
1093 1092 # largefiles by installing a matcher that will ignore them.
1094 1093 installnormalfilesmatchfn(repo[None].manifest())
1095 1094 result = orig(repo, pats, opts, dry_run, similarity)
1096 1095 restorematchfn()
1097 1096 return result
1098 1097
1099 1098 # Calling purge with --all will cause the largefiles to be deleted.
1100 1099 # Override repo.status to prevent this from happening.
1101 1100 def overridepurge(orig, ui, repo, *dirs, **opts):
1102 1101 # XXX large file status is buggy when used on repo proxy.
1103 1102 # XXX this needs to be investigate.
1104 1103 repo = repo.unfiltered()
1105 1104 oldstatus = repo.status
1106 1105 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1107 1106 clean=False, unknown=False, listsubrepos=False):
1108 1107 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1109 1108 listsubrepos)
1110 1109 lfdirstate = lfutil.openlfdirstate(ui, repo)
1111 1110 modified, added, removed, deleted, unknown, ignored, clean = r
1112 1111 unknown = [f for f in unknown if lfdirstate[f] == '?']
1113 1112 ignored = [f for f in ignored if lfdirstate[f] == '?']
1114 1113 return modified, added, removed, deleted, unknown, ignored, clean
1115 1114 repo.status = overridestatus
1116 1115 orig(ui, repo, *dirs, **opts)
1117 1116 repo.status = oldstatus
1118 1117
1119 1118 def overriderollback(orig, ui, repo, **opts):
1120 1119 result = orig(ui, repo, **opts)
1121 1120 merge.update(repo, node=None, branchmerge=False, force=True,
1122 1121 partial=lfutil.isstandin)
1123 1122 wlock = repo.wlock()
1124 1123 try:
1125 1124 lfdirstate = lfutil.openlfdirstate(ui, repo)
1126 1125 lfiles = lfutil.listlfiles(repo)
1127 1126 oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev())
1128 1127 for file in lfiles:
1129 1128 if file in oldlfiles:
1130 1129 lfdirstate.normallookup(file)
1131 1130 else:
1132 1131 lfdirstate.add(file)
1133 1132 lfdirstate.write()
1134 1133 finally:
1135 1134 wlock.release()
1136 1135 return result
1137 1136
1138 1137 def overridetransplant(orig, ui, repo, *revs, **opts):
1139 1138 try:
1140 1139 oldstandins = lfutil.getstandinsstate(repo)
1141 1140 repo._istransplanting = True
1142 1141 result = orig(ui, repo, *revs, **opts)
1143 1142 newstandins = lfutil.getstandinsstate(repo)
1144 1143 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1145 1144 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1146 1145 printmessage=True)
1147 1146 finally:
1148 1147 repo._istransplanting = False
1149 1148 return result
1150 1149
1151 1150 def overridecat(orig, ui, repo, file1, *pats, **opts):
1152 1151 ctx = scmutil.revsingle(repo, opts.get('rev'))
1153 1152 if not lfutil.standin(file1) in ctx:
1154 1153 result = orig(ui, repo, file1, *pats, **opts)
1155 1154 return result
1156 1155 return lfcommands.catlfile(repo, file1, ctx.rev(), opts.get('output'))
1157 1156
1158 1157 def mercurialsinkbefore(orig, sink):
1159 1158 sink.repo._isconverting = True
1160 1159 orig(sink)
1161 1160
1162 1161 def mercurialsinkafter(orig, sink):
1163 1162 sink.repo._isconverting = False
1164 1163 orig(sink)
@@ -1,735 +1,733 b''
1 1 # rebase.py - rebasing feature for mercurial
2 2 #
3 3 # Copyright 2008 Stefano Tortarolo <stefano.tortarolo at gmail dot com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''command to move sets of revisions to a different ancestor
9 9
10 10 This extension lets you rebase changesets in an existing Mercurial
11 11 repository.
12 12
13 13 For more information:
14 14 http://mercurial.selenic.com/wiki/RebaseExtension
15 15 '''
16 16
17 17 from mercurial import hg, util, repair, merge, cmdutil, commands, bookmarks
18 18 from mercurial import extensions, patch, scmutil, phases, obsolete
19 19 from mercurial.commands import templateopts
20 20 from mercurial.node import nullrev
21 21 from mercurial.lock import release
22 22 from mercurial.i18n import _
23 23 import os, errno
24 24
25 25 nullmerge = -2
26 26
27 27 cmdtable = {}
28 28 command = cmdutil.command(cmdtable)
29 29 testedwith = 'internal'
30 30
31 31 @command('rebase',
32 32 [('s', 'source', '',
33 33 _('rebase from the specified changeset'), _('REV')),
34 34 ('b', 'base', '',
35 35 _('rebase from the base of the specified changeset '
36 36 '(up to greatest common ancestor of base and dest)'),
37 37 _('REV')),
38 38 ('r', 'rev', [],
39 39 _('rebase these revisions'),
40 40 _('REV')),
41 41 ('d', 'dest', '',
42 42 _('rebase onto the specified changeset'), _('REV')),
43 43 ('', 'collapse', False, _('collapse the rebased changesets')),
44 44 ('m', 'message', '',
45 45 _('use text as collapse commit message'), _('TEXT')),
46 46 ('e', 'edit', False, _('invoke editor on commit messages')),
47 47 ('l', 'logfile', '',
48 48 _('read collapse commit message from file'), _('FILE')),
49 49 ('', 'keep', False, _('keep original changesets')),
50 50 ('', 'keepbranches', False, _('keep original branch names')),
51 51 ('D', 'detach', False, _('(DEPRECATED)')),
52 52 ('t', 'tool', '', _('specify merge tool')),
53 53 ('c', 'continue', False, _('continue an interrupted rebase')),
54 54 ('a', 'abort', False, _('abort an interrupted rebase'))] +
55 55 templateopts,
56 56 _('[-s REV | -b REV] [-d REV] [OPTION]'))
57 57 def rebase(ui, repo, **opts):
58 58 """move changeset (and descendants) to a different branch
59 59
60 60 Rebase uses repeated merging to graft changesets from one part of
61 61 history (the source) onto another (the destination). This can be
62 62 useful for linearizing *local* changes relative to a master
63 63 development tree.
64 64
65 65 You should not rebase changesets that have already been shared
66 66 with others. Doing so will force everybody else to perform the
67 67 same rebase or they will end up with duplicated changesets after
68 68 pulling in your rebased changesets.
69 69
70 70 If you don't specify a destination changeset (``-d/--dest``),
71 71 rebase uses the tipmost head of the current named branch as the
72 72 destination. (The destination changeset is not modified by
73 73 rebasing, but new changesets are added as its descendants.)
74 74
75 75 You can specify which changesets to rebase in two ways: as a
76 76 "source" changeset or as a "base" changeset. Both are shorthand
77 77 for a topologically related set of changesets (the "source
78 78 branch"). If you specify source (``-s/--source``), rebase will
79 79 rebase that changeset and all of its descendants onto dest. If you
80 80 specify base (``-b/--base``), rebase will select ancestors of base
81 81 back to but not including the common ancestor with dest. Thus,
82 82 ``-b`` is less precise but more convenient than ``-s``: you can
83 83 specify any changeset in the source branch, and rebase will select
84 84 the whole branch. If you specify neither ``-s`` nor ``-b``, rebase
85 85 uses the parent of the working directory as the base.
86 86
87 87 By default, rebase recreates the changesets in the source branch
88 88 as descendants of dest and then destroys the originals. Use
89 89 ``--keep`` to preserve the original source changesets. Some
90 90 changesets in the source branch (e.g. merges from the destination
91 91 branch) may be dropped if they no longer contribute any change.
92 92
93 93 One result of the rules for selecting the destination changeset
94 94 and source branch is that, unlike ``merge``, rebase will do
95 95 nothing if you are at the latest (tipmost) head of a named branch
96 96 with two heads. You need to explicitly specify source and/or
97 97 destination (or ``update`` to the other head, if it's the head of
98 98 the intended source branch).
99 99
100 100 If a rebase is interrupted to manually resolve a merge, it can be
101 101 continued with --continue/-c or aborted with --abort/-a.
102 102
103 103 Returns 0 on success, 1 if nothing to rebase.
104 104 """
105 105 originalwd = target = None
106 106 external = nullrev
107 107 state = {}
108 108 skipped = set()
109 109 targetancestors = set()
110 110
111 111 editor = None
112 112 if opts.get('edit'):
113 113 editor = cmdutil.commitforceeditor
114 114
115 115 lock = wlock = None
116 116 try:
117 117 wlock = repo.wlock()
118 118 lock = repo.lock()
119 119
120 120 # Validate input and define rebasing points
121 121 destf = opts.get('dest', None)
122 122 srcf = opts.get('source', None)
123 123 basef = opts.get('base', None)
124 124 revf = opts.get('rev', [])
125 125 contf = opts.get('continue')
126 126 abortf = opts.get('abort')
127 127 collapsef = opts.get('collapse', False)
128 128 collapsemsg = cmdutil.logmessage(ui, opts)
129 129 extrafn = opts.get('extrafn') # internal, used by e.g. hgsubversion
130 130 keepf = opts.get('keep', False)
131 131 keepbranchesf = opts.get('keepbranches', False)
132 132 # keepopen is not meant for use on the command line, but by
133 133 # other extensions
134 134 keepopen = opts.get('keepopen', False)
135 135
136 136 if collapsemsg and not collapsef:
137 137 raise util.Abort(
138 138 _('message can only be specified with collapse'))
139 139
140 140 if contf or abortf:
141 141 if contf and abortf:
142 142 raise util.Abort(_('cannot use both abort and continue'))
143 143 if collapsef:
144 144 raise util.Abort(
145 145 _('cannot use collapse with continue or abort'))
146 146 if srcf or basef or destf:
147 147 raise util.Abort(
148 148 _('abort and continue do not allow specifying revisions'))
149 149 if opts.get('tool', False):
150 150 ui.warn(_('tool option will be ignored\n'))
151 151
152 152 (originalwd, target, state, skipped, collapsef, keepf,
153 153 keepbranchesf, external) = restorestatus(repo)
154 154 if abortf:
155 155 return abort(repo, originalwd, target, state)
156 156 else:
157 157 if srcf and basef:
158 158 raise util.Abort(_('cannot specify both a '
159 159 'source and a base'))
160 160 if revf and basef:
161 161 raise util.Abort(_('cannot specify both a '
162 162 'revision and a base'))
163 163 if revf and srcf:
164 164 raise util.Abort(_('cannot specify both a '
165 165 'revision and a source'))
166 166
167 167 cmdutil.bailifchanged(repo)
168 168
169 169 if not destf:
170 170 # Destination defaults to the latest revision in the
171 171 # current branch
172 172 branch = repo[None].branch()
173 173 dest = repo[branch]
174 174 else:
175 175 dest = scmutil.revsingle(repo, destf)
176 176
177 177 if revf:
178 178 rebaseset = repo.revs('%lr', revf)
179 179 elif srcf:
180 180 src = scmutil.revrange(repo, [srcf])
181 181 rebaseset = repo.revs('(%ld)::', src)
182 182 else:
183 183 base = scmutil.revrange(repo, [basef or '.'])
184 184 rebaseset = repo.revs(
185 185 '(children(ancestor(%ld, %d)) and ::(%ld))::',
186 186 base, dest, base)
187 187 if rebaseset:
188 188 root = min(rebaseset)
189 189 else:
190 190 root = None
191 191
192 192 if not rebaseset:
193 193 repo.ui.debug('base is ancestor of destination\n')
194 194 result = None
195 195 elif (not (keepf or obsolete._enabled)
196 196 and repo.revs('first(children(%ld) - %ld)',
197 197 rebaseset, rebaseset)):
198 198 raise util.Abort(
199 199 _("can't remove original changesets with"
200 200 " unrebased descendants"),
201 201 hint=_('use --keep to keep original changesets'))
202 202 elif not keepf and not repo[root].mutable():
203 203 raise util.Abort(_("can't rebase immutable changeset %s")
204 204 % repo[root],
205 205 hint=_('see hg help phases for details'))
206 206 else:
207 207 result = buildstate(repo, dest, rebaseset, collapsef)
208 208
209 209 if not result:
210 210 # Empty state built, nothing to rebase
211 211 ui.status(_('nothing to rebase\n'))
212 212 return 1
213 213 else:
214 214 originalwd, target, state = result
215 215 if collapsef:
216 216 targetancestors = repo.changelog.ancestors([target],
217 217 inclusive=True)
218 218 external = checkexternal(repo, state, targetancestors)
219 219
220 220 if keepbranchesf:
221 221 assert not extrafn, 'cannot use both keepbranches and extrafn'
222 222 def extrafn(ctx, extra):
223 223 extra['branch'] = ctx.branch()
224 224 if collapsef:
225 225 branches = set()
226 226 for rev in state:
227 227 branches.add(repo[rev].branch())
228 228 if len(branches) > 1:
229 229 raise util.Abort(_('cannot collapse multiple named '
230 230 'branches'))
231 231
232 232
233 233 # Rebase
234 234 if not targetancestors:
235 235 targetancestors = repo.changelog.ancestors([target], inclusive=True)
236 236
237 237 # Keep track of the current bookmarks in order to reset them later
238 238 currentbookmarks = repo._bookmarks.copy()
239 239 activebookmark = repo._bookmarkcurrent
240 240 if activebookmark:
241 241 bookmarks.unsetcurrent(repo)
242 242
243 243 sortedstate = sorted(state)
244 244 total = len(sortedstate)
245 245 pos = 0
246 246 for rev in sortedstate:
247 247 pos += 1
248 248 if state[rev] == -1:
249 249 ui.progress(_("rebasing"), pos, ("%d:%s" % (rev, repo[rev])),
250 250 _('changesets'), total)
251 251 storestatus(repo, originalwd, target, state, collapsef, keepf,
252 252 keepbranchesf, external)
253 253 p1, p2 = defineparents(repo, rev, target, state,
254 254 targetancestors)
255 255 if len(repo.parents()) == 2:
256 256 repo.ui.debug('resuming interrupted rebase\n')
257 257 else:
258 258 try:
259 259 ui.setconfig('ui', 'forcemerge', opts.get('tool', ''))
260 260 stats = rebasenode(repo, rev, p1, state, collapsef)
261 261 if stats and stats[3] > 0:
262 262 raise util.Abort(_('unresolved conflicts (see hg '
263 263 'resolve, then hg rebase --continue)'))
264 264 finally:
265 265 ui.setconfig('ui', 'forcemerge', '')
266 266 cmdutil.duplicatecopies(repo, rev, target)
267 267 if not collapsef:
268 268 newrev = concludenode(repo, rev, p1, p2, extrafn=extrafn,
269 269 editor=editor)
270 270 else:
271 271 # Skip commit if we are collapsing
272 272 repo.setparents(repo[p1].node())
273 273 newrev = None
274 274 # Update the state
275 275 if newrev is not None:
276 276 state[rev] = repo[newrev].rev()
277 277 else:
278 278 if not collapsef:
279 279 ui.note(_('no changes, revision %d skipped\n') % rev)
280 280 ui.debug('next revision set to %s\n' % p1)
281 281 skipped.add(rev)
282 282 state[rev] = p1
283 283
284 284 ui.progress(_('rebasing'), None)
285 285 ui.note(_('rebase merging completed\n'))
286 286
287 287 if collapsef and not keepopen:
288 288 p1, p2 = defineparents(repo, min(state), target,
289 289 state, targetancestors)
290 290 if collapsemsg:
291 291 commitmsg = collapsemsg
292 292 else:
293 293 commitmsg = 'Collapsed revision'
294 294 for rebased in state:
295 295 if rebased not in skipped and state[rebased] != nullmerge:
296 296 commitmsg += '\n* %s' % repo[rebased].description()
297 297 commitmsg = ui.edit(commitmsg, repo.ui.username())
298 298 newrev = concludenode(repo, rev, p1, external, commitmsg=commitmsg,
299 299 extrafn=extrafn, editor=editor)
300 300
301 301 if 'qtip' in repo.tags():
302 302 updatemq(repo, state, skipped, **opts)
303 303
304 304 if currentbookmarks:
305 305 # Nodeids are needed to reset bookmarks
306 306 nstate = {}
307 307 for k, v in state.iteritems():
308 308 if v != nullmerge:
309 309 nstate[repo[k].node()] = repo[v].node()
310 310
311 311 if not keepf:
312 312 collapsedas = None
313 313 if collapsef:
314 314 collapsedas = newrev
315 315 clearrebased(ui, repo, state, collapsedas)
316 316
317 317 if currentbookmarks:
318 318 updatebookmarks(repo, nstate, currentbookmarks, **opts)
319 319
320 320 clearstatus(repo)
321 321 ui.note(_("rebase completed\n"))
322 if os.path.exists(repo.sjoin('undo')):
323 util.unlinkpath(repo.sjoin('undo'))
322 util.unlinkpath(repo.sjoin('undo'), ignoremissing=True)
324 323 if skipped:
325 324 ui.note(_("%d revisions have been skipped\n") % len(skipped))
326 325
327 326 if (activebookmark and
328 327 repo['tip'].node() == repo._bookmarks[activebookmark]):
329 328 bookmarks.setcurrent(repo, activebookmark)
330 329
331 330 finally:
332 331 release(lock, wlock)
333 332
334 333 def checkexternal(repo, state, targetancestors):
335 334 """Check whether one or more external revisions need to be taken in
336 335 consideration. In the latter case, abort.
337 336 """
338 337 external = nullrev
339 338 source = min(state)
340 339 for rev in state:
341 340 if rev == source:
342 341 continue
343 342 # Check externals and fail if there are more than one
344 343 for p in repo[rev].parents():
345 344 if (p.rev() not in state
346 345 and p.rev() not in targetancestors):
347 346 if external != nullrev:
348 347 raise util.Abort(_('unable to collapse, there is more '
349 348 'than one external parent'))
350 349 external = p.rev()
351 350 return external
352 351
353 352 def concludenode(repo, rev, p1, p2, commitmsg=None, editor=None, extrafn=None):
354 353 'Commit the changes and store useful information in extra'
355 354 try:
356 355 repo.setparents(repo[p1].node(), repo[p2].node())
357 356 ctx = repo[rev]
358 357 if commitmsg is None:
359 358 commitmsg = ctx.description()
360 359 extra = {'rebase_source': ctx.hex()}
361 360 if extrafn:
362 361 extrafn(ctx, extra)
363 362 # Commit might fail if unresolved files exist
364 363 newrev = repo.commit(text=commitmsg, user=ctx.user(),
365 364 date=ctx.date(), extra=extra, editor=editor)
366 365 repo.dirstate.setbranch(repo[newrev].branch())
367 366 targetphase = max(ctx.phase(), phases.draft)
368 367 # retractboundary doesn't overwrite upper phase inherited from parent
369 368 newnode = repo[newrev].node()
370 369 if newnode:
371 370 phases.retractboundary(repo, targetphase, [newnode])
372 371 return newrev
373 372 except util.Abort:
374 373 # Invalidate the previous setparents
375 374 repo.dirstate.invalidate()
376 375 raise
377 376
378 377 def rebasenode(repo, rev, p1, state, collapse):
379 378 'Rebase a single revision'
380 379 # Merge phase
381 380 # Update to target and merge it with local
382 381 if repo['.'].rev() != repo[p1].rev():
383 382 repo.ui.debug(" update to %d:%s\n" % (repo[p1].rev(), repo[p1]))
384 383 merge.update(repo, p1, False, True, False)
385 384 else:
386 385 repo.ui.debug(" already in target\n")
387 386 repo.dirstate.write()
388 387 repo.ui.debug(" merge against %d:%s\n" % (repo[rev].rev(), repo[rev]))
389 388 base = None
390 389 if repo[rev].rev() != repo[min(state)].rev():
391 390 base = repo[rev].p1().node()
392 391 # When collapsing in-place, the parent is the common ancestor, we
393 392 # have to allow merging with it.
394 393 return merge.update(repo, rev, True, True, False, base, collapse)
395 394
396 395 def defineparents(repo, rev, target, state, targetancestors):
397 396 'Return the new parent relationship of the revision that will be rebased'
398 397 parents = repo[rev].parents()
399 398 p1 = p2 = nullrev
400 399
401 400 P1n = parents[0].rev()
402 401 if P1n in targetancestors:
403 402 p1 = target
404 403 elif P1n in state:
405 404 if state[P1n] == nullmerge:
406 405 p1 = target
407 406 else:
408 407 p1 = state[P1n]
409 408 else: # P1n external
410 409 p1 = target
411 410 p2 = P1n
412 411
413 412 if len(parents) == 2 and parents[1].rev() not in targetancestors:
414 413 P2n = parents[1].rev()
415 414 # interesting second parent
416 415 if P2n in state:
417 416 if p1 == target: # P1n in targetancestors or external
418 417 p1 = state[P2n]
419 418 else:
420 419 p2 = state[P2n]
421 420 else: # P2n external
422 421 if p2 != nullrev: # P1n external too => rev is a merged revision
423 422 raise util.Abort(_('cannot use revision %d as base, result '
424 423 'would have 3 parents') % rev)
425 424 p2 = P2n
426 425 repo.ui.debug(" future parents are %d and %d\n" %
427 426 (repo[p1].rev(), repo[p2].rev()))
428 427 return p1, p2
429 428
430 429 def isagitpatch(repo, patchname):
431 430 'Return true if the given patch is in git format'
432 431 mqpatch = os.path.join(repo.mq.path, patchname)
433 432 for line in patch.linereader(file(mqpatch, 'rb')):
434 433 if line.startswith('diff --git'):
435 434 return True
436 435 return False
437 436
438 437 def updatemq(repo, state, skipped, **opts):
439 438 'Update rebased mq patches - finalize and then import them'
440 439 mqrebase = {}
441 440 mq = repo.mq
442 441 original_series = mq.fullseries[:]
443 442 skippedpatches = set()
444 443
445 444 for p in mq.applied:
446 445 rev = repo[p.node].rev()
447 446 if rev in state:
448 447 repo.ui.debug('revision %d is an mq patch (%s), finalize it.\n' %
449 448 (rev, p.name))
450 449 mqrebase[rev] = (p.name, isagitpatch(repo, p.name))
451 450 else:
452 451 # Applied but not rebased, not sure this should happen
453 452 skippedpatches.add(p.name)
454 453
455 454 if mqrebase:
456 455 mq.finish(repo, mqrebase.keys())
457 456
458 457 # We must start import from the newest revision
459 458 for rev in sorted(mqrebase, reverse=True):
460 459 if rev not in skipped:
461 460 name, isgit = mqrebase[rev]
462 461 repo.ui.debug('import mq patch %d (%s)\n' % (state[rev], name))
463 462 mq.qimport(repo, (), patchname=name, git=isgit,
464 463 rev=[str(state[rev])])
465 464 else:
466 465 # Rebased and skipped
467 466 skippedpatches.add(mqrebase[rev][0])
468 467
469 468 # Patches were either applied and rebased and imported in
470 469 # order, applied and removed or unapplied. Discard the removed
471 470 # ones while preserving the original series order and guards.
472 471 newseries = [s for s in original_series
473 472 if mq.guard_re.split(s, 1)[0] not in skippedpatches]
474 473 mq.fullseries[:] = newseries
475 474 mq.seriesdirty = True
476 475 mq.savedirty()
477 476
478 477 def updatebookmarks(repo, nstate, originalbookmarks, **opts):
479 478 'Move bookmarks to their correct changesets'
480 479 marks = repo._bookmarks
481 480 for k, v in originalbookmarks.iteritems():
482 481 if v in nstate:
483 482 if nstate[v] != nullmerge:
484 483 # update the bookmarks for revs that have moved
485 484 marks[k] = nstate[v]
486 485
487 486 marks.write()
488 487
489 488 def storestatus(repo, originalwd, target, state, collapse, keep, keepbranches,
490 489 external):
491 490 'Store the current status to allow recovery'
492 491 f = repo.opener("rebasestate", "w")
493 492 f.write(repo[originalwd].hex() + '\n')
494 493 f.write(repo[target].hex() + '\n')
495 494 f.write(repo[external].hex() + '\n')
496 495 f.write('%d\n' % int(collapse))
497 496 f.write('%d\n' % int(keep))
498 497 f.write('%d\n' % int(keepbranches))
499 498 for d, v in state.iteritems():
500 499 oldrev = repo[d].hex()
501 500 if v != nullmerge:
502 501 newrev = repo[v].hex()
503 502 else:
504 503 newrev = v
505 504 f.write("%s:%s\n" % (oldrev, newrev))
506 505 f.close()
507 506 repo.ui.debug('rebase status stored\n')
508 507
509 508 def clearstatus(repo):
510 509 'Remove the status files'
511 if os.path.exists(repo.join("rebasestate")):
512 util.unlinkpath(repo.join("rebasestate"))
510 util.unlinkpath(repo.join("rebasestate"), ignoremissing=True)
513 511
514 512 def restorestatus(repo):
515 513 'Restore a previously stored status'
516 514 try:
517 515 target = None
518 516 collapse = False
519 517 external = nullrev
520 518 state = {}
521 519 f = repo.opener("rebasestate")
522 520 for i, l in enumerate(f.read().splitlines()):
523 521 if i == 0:
524 522 originalwd = repo[l].rev()
525 523 elif i == 1:
526 524 target = repo[l].rev()
527 525 elif i == 2:
528 526 external = repo[l].rev()
529 527 elif i == 3:
530 528 collapse = bool(int(l))
531 529 elif i == 4:
532 530 keep = bool(int(l))
533 531 elif i == 5:
534 532 keepbranches = bool(int(l))
535 533 else:
536 534 oldrev, newrev = l.split(':')
537 535 if newrev != str(nullmerge):
538 536 state[repo[oldrev].rev()] = repo[newrev].rev()
539 537 else:
540 538 state[repo[oldrev].rev()] = int(newrev)
541 539 skipped = set()
542 540 # recompute the set of skipped revs
543 541 if not collapse:
544 542 seen = set([target])
545 543 for old, new in sorted(state.items()):
546 544 if new != nullrev and new in seen:
547 545 skipped.add(old)
548 546 seen.add(new)
549 547 repo.ui.debug('computed skipped revs: %s\n' % skipped)
550 548 repo.ui.debug('rebase status resumed\n')
551 549 return (originalwd, target, state, skipped,
552 550 collapse, keep, keepbranches, external)
553 551 except IOError, err:
554 552 if err.errno != errno.ENOENT:
555 553 raise
556 554 raise util.Abort(_('no rebase in progress'))
557 555
558 556 def abort(repo, originalwd, target, state):
559 557 'Restore the repository to its original state'
560 558 dstates = [s for s in state.values() if s != nullrev]
561 559 immutable = [d for d in dstates if not repo[d].mutable()]
562 560 if immutable:
563 561 raise util.Abort(_("can't abort rebase due to immutable changesets %s")
564 562 % ', '.join(str(repo[r]) for r in immutable),
565 563 hint=_('see hg help phases for details'))
566 564
567 565 descendants = set()
568 566 if dstates:
569 567 descendants = set(repo.changelog.descendants(dstates))
570 568 if descendants - set(dstates):
571 569 repo.ui.warn(_("warning: new changesets detected on target branch, "
572 570 "can't abort\n"))
573 571 return -1
574 572 else:
575 573 # Strip from the first rebased revision
576 574 merge.update(repo, repo[originalwd].rev(), False, True, False)
577 575 rebased = filter(lambda x: x > -1 and x != target, state.values())
578 576 if rebased:
579 577 strippoint = min(rebased)
580 578 # no backup of rebased cset versions needed
581 579 repair.strip(repo.ui, repo, repo[strippoint].node())
582 580 clearstatus(repo)
583 581 repo.ui.warn(_('rebase aborted\n'))
584 582 return 0
585 583
586 584 def buildstate(repo, dest, rebaseset, collapse):
587 585 '''Define which revisions are going to be rebased and where
588 586
589 587 repo: repo
590 588 dest: context
591 589 rebaseset: set of rev
592 590 '''
593 591
594 592 # This check isn't strictly necessary, since mq detects commits over an
595 593 # applied patch. But it prevents messing up the working directory when
596 594 # a partially completed rebase is blocked by mq.
597 595 if 'qtip' in repo.tags() and (dest.node() in
598 596 [s.node for s in repo.mq.applied]):
599 597 raise util.Abort(_('cannot rebase onto an applied mq patch'))
600 598
601 599 roots = list(repo.set('roots(%ld)', rebaseset))
602 600 if not roots:
603 601 raise util.Abort(_('no matching revisions'))
604 602 if len(roots) > 1:
605 603 raise util.Abort(_("can't rebase multiple roots"))
606 604 root = roots[0]
607 605
608 606 commonbase = root.ancestor(dest)
609 607 if commonbase == root:
610 608 raise util.Abort(_('source is ancestor of destination'))
611 609 if commonbase == dest:
612 610 samebranch = root.branch() == dest.branch()
613 611 if not collapse and samebranch and root in dest.children():
614 612 repo.ui.debug('source is a child of destination\n')
615 613 return None
616 614
617 615 repo.ui.debug('rebase onto %d starting from %d\n' % (dest, root))
618 616 state = dict.fromkeys(rebaseset, nullrev)
619 617 # Rebase tries to turn <dest> into a parent of <root> while
620 618 # preserving the number of parents of rebased changesets:
621 619 #
622 620 # - A changeset with a single parent will always be rebased as a
623 621 # changeset with a single parent.
624 622 #
625 623 # - A merge will be rebased as merge unless its parents are both
626 624 # ancestors of <dest> or are themselves in the rebased set and
627 625 # pruned while rebased.
628 626 #
629 627 # If one parent of <root> is an ancestor of <dest>, the rebased
630 628 # version of this parent will be <dest>. This is always true with
631 629 # --base option.
632 630 #
633 631 # Otherwise, we need to *replace* the original parents with
634 632 # <dest>. This "detaches" the rebased set from its former location
635 633 # and rebases it onto <dest>. Changes introduced by ancestors of
636 634 # <root> not common with <dest> (the detachset, marked as
637 635 # nullmerge) are "removed" from the rebased changesets.
638 636 #
639 637 # - If <root> has a single parent, set it to <dest>.
640 638 #
641 639 # - If <root> is a merge, we cannot decide which parent to
642 640 # replace, the rebase operation is not clearly defined.
643 641 #
644 642 # The table below sums up this behavior:
645 643 #
646 644 # +--------------------+----------------------+-------------------------+
647 645 # | | one parent | merge |
648 646 # +--------------------+----------------------+-------------------------+
649 647 # | parent in ::<dest> | new parent is <dest> | parents in ::<dest> are |
650 648 # | | | remapped to <dest> |
651 649 # +--------------------+----------------------+-------------------------+
652 650 # | unrelated source | new parent is <dest> | ambiguous, abort |
653 651 # +--------------------+----------------------+-------------------------+
654 652 #
655 653 # The actual abort is handled by `defineparents`
656 654 if len(root.parents()) <= 1:
657 655 # ancestors of <root> not ancestors of <dest>
658 656 detachset = repo.changelog.findmissingrevs([commonbase.rev()],
659 657 [root.rev()])
660 658 state.update(dict.fromkeys(detachset, nullmerge))
661 659 # detachset can have root, and we definitely want to rebase that
662 660 state[root.rev()] = nullrev
663 661 return repo['.'].rev(), dest.rev(), state
664 662
665 663 def clearrebased(ui, repo, state, collapsedas=None):
666 664 """dispose of rebased revision at the end of the rebase
667 665
668 666 If `collapsedas` is not None, the rebase was a collapse whose result if the
669 667 `collapsedas` node."""
670 668 if obsolete._enabled:
671 669 markers = []
672 670 for rev, newrev in sorted(state.items()):
673 671 if newrev >= 0:
674 672 if collapsedas is not None:
675 673 newrev = collapsedas
676 674 markers.append((repo[rev], (repo[newrev],)))
677 675 if markers:
678 676 obsolete.createmarkers(repo, markers)
679 677 else:
680 678 rebased = [rev for rev in state if state[rev] != nullmerge]
681 679 if rebased:
682 680 if set(repo.changelog.descendants([min(rebased)])) - set(state):
683 681 ui.warn(_("warning: new changesets detected "
684 682 "on source branch, not stripping\n"))
685 683 else:
686 684 # backup the old csets by default
687 685 repair.strip(ui, repo, repo[min(rebased)].node(), "all")
688 686
689 687
690 688 def pullrebase(orig, ui, repo, *args, **opts):
691 689 'Call rebase after pull if the latter has been invoked with --rebase'
692 690 if opts.get('rebase'):
693 691 if opts.get('update'):
694 692 del opts['update']
695 693 ui.debug('--update and --rebase are not compatible, ignoring '
696 694 'the update flag\n')
697 695
698 696 movemarkfrom = repo['.'].node()
699 697 cmdutil.bailifchanged(repo)
700 698 revsprepull = len(repo)
701 699 origpostincoming = commands.postincoming
702 700 def _dummy(*args, **kwargs):
703 701 pass
704 702 commands.postincoming = _dummy
705 703 try:
706 704 orig(ui, repo, *args, **opts)
707 705 finally:
708 706 commands.postincoming = origpostincoming
709 707 revspostpull = len(repo)
710 708 if revspostpull > revsprepull:
711 709 # --rev option from pull conflict with rebase own --rev
712 710 # dropping it
713 711 if 'rev' in opts:
714 712 del opts['rev']
715 713 rebase(ui, repo, **opts)
716 714 branch = repo[None].branch()
717 715 dest = repo[branch].rev()
718 716 if dest != repo['.'].rev():
719 717 # there was nothing to rebase we force an update
720 718 hg.update(repo, dest)
721 719 if bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
722 720 ui.status(_("updating bookmark %s\n")
723 721 % repo._bookmarkcurrent)
724 722 else:
725 723 if opts.get('tool'):
726 724 raise util.Abort(_('--tool can only be used with --rebase'))
727 725 orig(ui, repo, *args, **opts)
728 726
729 727 def uisetup(ui):
730 728 'Replace pull with a decorator to provide --rebase option'
731 729 entry = extensions.wrapcommand(commands.table, 'pull', pullrebase)
732 730 entry[1].append(('', 'rebase', None,
733 731 _("rebase working directory to branch head")))
734 732 entry[1].append(('t', 'tool', '',
735 733 _("specify merge tool for rebase")))
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
General Comments 0
You need to be logged in to leave comments. Login now