##// END OF EJS Templates
status: update various other methods to return new class
Martin von Zweigbergk -
r22914:c95db320 default
parent child Browse files
Show More
@@ -1,1329 +1,1330 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10 10
11 11 import os
12 12 import copy
13 13
14 14 from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \
15 15 archival, pathutil, revset
16 16 from mercurial.i18n import _
17 17 from mercurial.node import hex
18 18 from hgext import rebase
19 19
20 20 import lfutil
21 21 import lfcommands
22 22 import basestore
23 23
24 24 # -- Utility functions: commonly/repeatedly needed functionality ---------------
25 25
26 26 def installnormalfilesmatchfn(manifest):
27 27 '''installmatchfn with a matchfn that ignores all largefiles'''
28 28 def overridematch(ctx, pats=[], opts={}, globbed=False,
29 29 default='relpath'):
30 30 match = oldmatch(ctx, pats, opts, globbed, default)
31 31 m = copy.copy(match)
32 32 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
33 33 manifest)
34 34 m._files = filter(notlfile, m._files)
35 35 m._fmap = set(m._files)
36 36 m._always = False
37 37 origmatchfn = m.matchfn
38 38 m.matchfn = lambda f: notlfile(f) and origmatchfn(f) or None
39 39 return m
40 40 oldmatch = installmatchfn(overridematch)
41 41
42 42 def installmatchfn(f):
43 43 '''monkey patch the scmutil module with a custom match function.
44 44 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
45 45 oldmatch = scmutil.match
46 46 setattr(f, 'oldmatch', oldmatch)
47 47 scmutil.match = f
48 48 return oldmatch
49 49
50 50 def restorematchfn():
51 51 '''restores scmutil.match to what it was before installmatchfn
52 52 was called. no-op if scmutil.match is its original function.
53 53
54 54 Note that n calls to installmatchfn will require n calls to
55 55 restore matchfn to reverse'''
56 56 scmutil.match = getattr(scmutil.match, 'oldmatch')
57 57
58 58 def installmatchandpatsfn(f):
59 59 oldmatchandpats = scmutil.matchandpats
60 60 setattr(f, 'oldmatchandpats', oldmatchandpats)
61 61 scmutil.matchandpats = f
62 62 return oldmatchandpats
63 63
64 64 def restorematchandpatsfn():
65 65 '''restores scmutil.matchandpats to what it was before
66 66 installnormalfilesmatchandpatsfn was called. no-op if scmutil.matchandpats
67 67 is its original function.
68 68
69 69 Note that n calls to installnormalfilesmatchandpatsfn will require n calls
70 70 to restore matchfn to reverse'''
71 71 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
72 72 scmutil.matchandpats)
73 73
74 74 def addlargefiles(ui, repo, *pats, **opts):
75 75 large = opts.pop('large', None)
76 76 lfsize = lfutil.getminsize(
77 77 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
78 78
79 79 lfmatcher = None
80 80 if lfutil.islfilesrepo(repo):
81 81 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
82 82 if lfpats:
83 83 lfmatcher = match_.match(repo.root, '', list(lfpats))
84 84
85 85 lfnames = []
86 86 m = scmutil.match(repo[None], pats, opts)
87 87 m.bad = lambda x, y: None
88 88 wctx = repo[None]
89 89 for f in repo.walk(m):
90 90 exact = m.exact(f)
91 91 lfile = lfutil.standin(f) in wctx
92 92 nfile = f in wctx
93 93 exists = lfile or nfile
94 94
95 95 # Don't warn the user when they attempt to add a normal tracked file.
96 96 # The normal add code will do that for us.
97 97 if exact and exists:
98 98 if lfile:
99 99 ui.warn(_('%s already a largefile\n') % f)
100 100 continue
101 101
102 102 if (exact or not exists) and not lfutil.isstandin(f):
103 103 wfile = repo.wjoin(f)
104 104
105 105 # In case the file was removed previously, but not committed
106 106 # (issue3507)
107 107 if not os.path.exists(wfile):
108 108 continue
109 109
110 110 abovemin = (lfsize and
111 111 os.lstat(wfile).st_size >= lfsize * 1024 * 1024)
112 112 if large or abovemin or (lfmatcher and lfmatcher(f)):
113 113 lfnames.append(f)
114 114 if ui.verbose or not exact:
115 115 ui.status(_('adding %s as a largefile\n') % m.rel(f))
116 116
117 117 bad = []
118 118 standins = []
119 119
120 120 # Need to lock, otherwise there could be a race condition between
121 121 # when standins are created and added to the repo.
122 122 wlock = repo.wlock()
123 123 try:
124 124 if not opts.get('dry_run'):
125 125 lfdirstate = lfutil.openlfdirstate(ui, repo)
126 126 for f in lfnames:
127 127 standinname = lfutil.standin(f)
128 128 lfutil.writestandin(repo, standinname, hash='',
129 129 executable=lfutil.getexecutable(repo.wjoin(f)))
130 130 standins.append(standinname)
131 131 if lfdirstate[f] == 'r':
132 132 lfdirstate.normallookup(f)
133 133 else:
134 134 lfdirstate.add(f)
135 135 lfdirstate.write()
136 136 bad += [lfutil.splitstandin(f)
137 137 for f in repo[None].add(standins)
138 138 if f in m.files()]
139 139 finally:
140 140 wlock.release()
141 141 return bad
142 142
143 143 def removelargefiles(ui, repo, *pats, **opts):
144 144 after = opts.get('after')
145 145 if not pats and not after:
146 146 raise util.Abort(_('no files specified'))
147 147 m = scmutil.match(repo[None], pats, opts)
148 148 try:
149 149 repo.lfstatus = True
150 150 s = repo.status(match=m, clean=True)
151 151 finally:
152 152 repo.lfstatus = False
153 153 manifest = repo[None].manifest()
154 154 modified, added, deleted, clean = [[f for f in list
155 155 if lfutil.standin(f) in manifest]
156 156 for list in [s[0], s[1], s[3], s[6]]]
157 157
158 158 def warn(files, msg):
159 159 for f in files:
160 160 ui.warn(msg % m.rel(f))
161 161 return int(len(files) > 0)
162 162
163 163 result = 0
164 164
165 165 if after:
166 166 remove = deleted
167 167 result = warn(modified + added + clean,
168 168 _('not removing %s: file still exists\n'))
169 169 else:
170 170 remove = deleted + clean
171 171 result = warn(modified, _('not removing %s: file is modified (use -f'
172 172 ' to force removal)\n'))
173 173 result = warn(added, _('not removing %s: file has been marked for add'
174 174 ' (use forget to undo)\n')) or result
175 175
176 176 for f in sorted(remove):
177 177 if ui.verbose or not m.exact(f):
178 178 ui.status(_('removing %s\n') % m.rel(f))
179 179
180 180 # Need to lock because standin files are deleted then removed from the
181 181 # repository and we could race in-between.
182 182 wlock = repo.wlock()
183 183 try:
184 184 lfdirstate = lfutil.openlfdirstate(ui, repo)
185 185 for f in remove:
186 186 if not after:
187 187 # If this is being called by addremove, notify the user that we
188 188 # are removing the file.
189 189 if getattr(repo, "_isaddremove", False):
190 190 ui.status(_('removing %s\n') % f)
191 191 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
192 192 lfdirstate.remove(f)
193 193 lfdirstate.write()
194 194 remove = [lfutil.standin(f) for f in remove]
195 195 # If this is being called by addremove, let the original addremove
196 196 # function handle this.
197 197 if not getattr(repo, "_isaddremove", False):
198 198 for f in remove:
199 199 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
200 200 repo[None].forget(remove)
201 201 finally:
202 202 wlock.release()
203 203
204 204 return result
205 205
206 206 # For overriding mercurial.hgweb.webcommands so that largefiles will
207 207 # appear at their right place in the manifests.
208 208 def decodepath(orig, path):
209 209 return lfutil.splitstandin(path) or path
210 210
211 211 # -- Wrappers: modify existing commands --------------------------------
212 212
213 213 # Add works by going through the files that the user wanted to add and
214 214 # checking if they should be added as largefiles. Then it makes a new
215 215 # matcher which matches only the normal files and runs the original
216 216 # version of add.
217 217 def overrideadd(orig, ui, repo, *pats, **opts):
218 218 normal = opts.pop('normal')
219 219 if normal:
220 220 if opts.get('large'):
221 221 raise util.Abort(_('--normal cannot be used with --large'))
222 222 return orig(ui, repo, *pats, **opts)
223 223 bad = addlargefiles(ui, repo, *pats, **opts)
224 224 installnormalfilesmatchfn(repo[None].manifest())
225 225 result = orig(ui, repo, *pats, **opts)
226 226 restorematchfn()
227 227
228 228 return (result == 1 or bad) and 1 or 0
229 229
230 230 def overrideremove(orig, ui, repo, *pats, **opts):
231 231 installnormalfilesmatchfn(repo[None].manifest())
232 232 result = orig(ui, repo, *pats, **opts)
233 233 restorematchfn()
234 234 return removelargefiles(ui, repo, *pats, **opts) or result
235 235
236 236 def overridestatusfn(orig, repo, rev2, **opts):
237 237 try:
238 238 repo._repo.lfstatus = True
239 239 return orig(repo, rev2, **opts)
240 240 finally:
241 241 repo._repo.lfstatus = False
242 242
243 243 def overridestatus(orig, ui, repo, *pats, **opts):
244 244 try:
245 245 repo.lfstatus = True
246 246 return orig(ui, repo, *pats, **opts)
247 247 finally:
248 248 repo.lfstatus = False
249 249
250 250 def overridedirty(orig, repo, ignoreupdate=False):
251 251 try:
252 252 repo._repo.lfstatus = True
253 253 return orig(repo, ignoreupdate)
254 254 finally:
255 255 repo._repo.lfstatus = False
256 256
257 257 def overridelog(orig, ui, repo, *pats, **opts):
258 258 def overridematchandpats(ctx, pats=[], opts={}, globbed=False,
259 259 default='relpath'):
260 260 """Matcher that merges root directory with .hglf, suitable for log.
261 261 It is still possible to match .hglf directly.
262 262 For any listed files run log on the standin too.
263 263 matchfn tries both the given filename and with .hglf stripped.
264 264 """
265 265 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default)
266 266 m, p = copy.copy(matchandpats)
267 267
268 268 if m.always():
269 269 # We want to match everything anyway, so there's no benefit trying
270 270 # to add standins.
271 271 return matchandpats
272 272
273 273 pats = set(p)
274 274 # TODO: handling of patterns in both cases below
275 275 if m._cwd:
276 276 if os.path.isabs(m._cwd):
277 277 # TODO: handle largefile magic when invoked from other cwd
278 278 return matchandpats
279 279 back = (m._cwd.count('/') + 1) * '../'
280 280 pats.update(back + lfutil.standin(m._cwd + '/' + f) for f in p)
281 281 else:
282 282 pats.update(lfutil.standin(f) for f in p)
283 283
284 284 for i in range(0, len(m._files)):
285 285 standin = lfutil.standin(m._files[i])
286 286 if standin in repo[ctx.node()]:
287 287 m._files[i] = standin
288 288 elif m._files[i] not in repo[ctx.node()]:
289 289 m._files.append(standin)
290 290 pats.add(standin)
291 291
292 292 m._fmap = set(m._files)
293 293 m._always = False
294 294 origmatchfn = m.matchfn
295 295 def lfmatchfn(f):
296 296 lf = lfutil.splitstandin(f)
297 297 if lf is not None and origmatchfn(lf):
298 298 return True
299 299 r = origmatchfn(f)
300 300 return r
301 301 m.matchfn = lfmatchfn
302 302
303 303 return m, pats
304 304
305 305 # For hg log --patch, the match object is used in two different senses:
306 306 # (1) to determine what revisions should be printed out, and
307 307 # (2) to determine what files to print out diffs for.
308 308 # The magic matchandpats override should be used for case (1) but not for
309 309 # case (2).
310 310 def overridemakelogfilematcher(repo, pats, opts):
311 311 pctx = repo[None]
312 312 match, pats = oldmatchandpats(pctx, pats, opts)
313 313 return lambda rev: match
314 314
315 315 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
316 316 oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
317 317 setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
318 318
319 319 try:
320 320 return orig(ui, repo, *pats, **opts)
321 321 finally:
322 322 restorematchandpatsfn()
323 323 setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
324 324
325 325 def overrideverify(orig, ui, repo, *pats, **opts):
326 326 large = opts.pop('large', False)
327 327 all = opts.pop('lfa', False)
328 328 contents = opts.pop('lfc', False)
329 329
330 330 result = orig(ui, repo, *pats, **opts)
331 331 if large or all or contents:
332 332 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
333 333 return result
334 334
335 335 def overridedebugstate(orig, ui, repo, *pats, **opts):
336 336 large = opts.pop('large', False)
337 337 if large:
338 338 class fakerepo(object):
339 339 dirstate = lfutil.openlfdirstate(ui, repo)
340 340 orig(ui, fakerepo, *pats, **opts)
341 341 else:
342 342 orig(ui, repo, *pats, **opts)
343 343
344 344 # Override needs to refresh standins so that update's normal merge
345 345 # will go through properly. Then the other update hook (overriding repo.update)
346 346 # will get the new files. Filemerge is also overridden so that the merge
347 347 # will merge standins correctly.
348 348 def overrideupdate(orig, ui, repo, *pats, **opts):
349 349 # Need to lock between the standins getting updated and their
350 350 # largefiles getting updated
351 351 wlock = repo.wlock()
352 352 try:
353 353 lfdirstate = lfutil.openlfdirstate(ui, repo)
354 354 unsure, s = lfdirstate.status(match_.always(repo.root, repo.getcwd()),
355 355 [], False, False, False)
356 356 modified = s[0]
357 357
358 358 if opts['check']:
359 359 mod = len(modified) > 0
360 360 for lfile in unsure:
361 361 standin = lfutil.standin(lfile)
362 362 if repo['.'][standin].data().strip() != \
363 363 lfutil.hashfile(repo.wjoin(lfile)):
364 364 mod = True
365 365 else:
366 366 lfdirstate.normal(lfile)
367 367 lfdirstate.write()
368 368 if mod:
369 369 raise util.Abort(_('uncommitted changes'))
370 370 return orig(ui, repo, *pats, **opts)
371 371 finally:
372 372 wlock.release()
373 373
374 374 # Before starting the manifest merge, merge.updates will call
375 375 # _checkunknown to check if there are any files in the merged-in
376 376 # changeset that collide with unknown files in the working copy.
377 377 #
378 378 # The largefiles are seen as unknown, so this prevents us from merging
379 379 # in a file 'foo' if we already have a largefile with the same name.
380 380 #
381 381 # The overridden function filters the unknown files by removing any
382 382 # largefiles. This makes the merge proceed and we can then handle this
383 383 # case further in the overridden manifestmerge function below.
384 384 def overridecheckunknownfile(origfn, repo, wctx, mctx, f):
385 385 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
386 386 return False
387 387 return origfn(repo, wctx, mctx, f)
388 388
389 389 # The manifest merge handles conflicts on the manifest level. We want
390 390 # to handle changes in largefile-ness of files at this level too.
391 391 #
392 392 # The strategy is to run the original manifestmerge and then process
393 393 # the action list it outputs. There are two cases we need to deal with:
394 394 #
395 395 # 1. Normal file in p1, largefile in p2. Here the largefile is
396 396 # detected via its standin file, which will enter the working copy
397 397 # with a "get" action. It is not "merge" since the standin is all
398 398 # Mercurial is concerned with at this level -- the link to the
399 399 # existing normal file is not relevant here.
400 400 #
401 401 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
402 402 # since the largefile will be present in the working copy and
403 403 # different from the normal file in p2. Mercurial therefore
404 404 # triggers a merge action.
405 405 #
406 406 # In both cases, we prompt the user and emit new actions to either
407 407 # remove the standin (if the normal file was kept) or to remove the
408 408 # normal file and get the standin (if the largefile was kept). The
409 409 # default prompt answer is to use the largefile version since it was
410 410 # presumably changed on purpose.
411 411 #
412 412 # Finally, the merge.applyupdates function will then take care of
413 413 # writing the files into the working copy and lfcommands.updatelfiles
414 414 # will update the largefiles.
415 415 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
416 416 partial, acceptremote, followcopies):
417 417 overwrite = force and not branchmerge
418 418 actions = origfn(repo, p1, p2, pas, branchmerge, force, partial,
419 419 acceptremote, followcopies)
420 420
421 421 if overwrite:
422 422 return actions
423 423
424 424 removes = set(a[0] for a in actions['r'])
425 425
426 426 newglist = []
427 427 lfmr = [] # LargeFiles: Mark as Removed
428 428 for action in actions['g']:
429 429 f, args, msg = action
430 430 splitstandin = f and lfutil.splitstandin(f)
431 431 if (splitstandin is not None and
432 432 splitstandin in p1 and splitstandin not in removes):
433 433 # Case 1: normal file in the working copy, largefile in
434 434 # the second parent
435 435 lfile = splitstandin
436 436 standin = f
437 437 msg = _('remote turned local normal file %s into a largefile\n'
438 438 'use (l)argefile or keep (n)ormal file?'
439 439 '$$ &Largefile $$ &Normal file') % lfile
440 440 if repo.ui.promptchoice(msg, 0) == 0:
441 441 actions['r'].append((lfile, None, msg))
442 442 newglist.append((standin, (p2.flags(standin),), msg))
443 443 else:
444 444 actions['r'].append((standin, None, msg))
445 445 elif lfutil.standin(f) in p1 and lfutil.standin(f) not in removes:
446 446 # Case 2: largefile in the working copy, normal file in
447 447 # the second parent
448 448 standin = lfutil.standin(f)
449 449 lfile = f
450 450 msg = _('remote turned local largefile %s into a normal file\n'
451 451 'keep (l)argefile or use (n)ormal file?'
452 452 '$$ &Largefile $$ &Normal file') % lfile
453 453 if repo.ui.promptchoice(msg, 0) == 0:
454 454 if branchmerge:
455 455 # largefile can be restored from standin safely
456 456 actions['r'].append((lfile, None, msg))
457 457 else:
458 458 # "lfile" should be marked as "removed" without
459 459 # removal of itself
460 460 lfmr.append((lfile, None, msg))
461 461
462 462 # linear-merge should treat this largefile as 're-added'
463 463 actions['a'].append((standin, None, msg))
464 464 else:
465 465 actions['r'].append((standin, None, msg))
466 466 newglist.append((lfile, (p2.flags(lfile),), msg))
467 467 else:
468 468 newglist.append(action)
469 469
470 470 newglist.sort()
471 471 actions['g'] = newglist
472 472 if lfmr:
473 473 lfmr.sort()
474 474 actions['lfmr'] = lfmr
475 475
476 476 return actions
477 477
478 478 def mergerecordupdates(orig, repo, actions, branchmerge):
479 479 if 'lfmr' in actions:
480 480 # this should be executed before 'orig', to execute 'remove'
481 481 # before all other actions
482 482 for lfile, args, msg in actions['lfmr']:
483 483 repo.dirstate.remove(lfile)
484 484
485 485 return orig(repo, actions, branchmerge)
486 486
487 487
488 488 # Override filemerge to prompt the user about how they wish to merge
489 489 # largefiles. This will handle identical edits without prompting the user.
490 490 def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca, labels=None):
491 491 if not lfutil.isstandin(orig):
492 492 return origfn(repo, mynode, orig, fcd, fco, fca, labels=labels)
493 493
494 494 ahash = fca.data().strip().lower()
495 495 dhash = fcd.data().strip().lower()
496 496 ohash = fco.data().strip().lower()
497 497 if (ohash != ahash and
498 498 ohash != dhash and
499 499 (dhash == ahash or
500 500 repo.ui.promptchoice(
501 501 _('largefile %s has a merge conflict\nancestor was %s\n'
502 502 'keep (l)ocal %s or\ntake (o)ther %s?'
503 503 '$$ &Local $$ &Other') %
504 504 (lfutil.splitstandin(orig), ahash, dhash, ohash),
505 505 0) == 1)):
506 506 repo.wwrite(fcd.path(), fco.data(), fco.flags())
507 507 return 0
508 508
509 509 # Copy first changes the matchers to match standins instead of
510 510 # largefiles. Then it overrides util.copyfile in that function it
511 511 # checks if the destination largefile already exists. It also keeps a
512 512 # list of copied files so that the largefiles can be copied and the
513 513 # dirstate updated.
514 514 def overridecopy(orig, ui, repo, pats, opts, rename=False):
515 515 # doesn't remove largefile on rename
516 516 if len(pats) < 2:
517 517 # this isn't legal, let the original function deal with it
518 518 return orig(ui, repo, pats, opts, rename)
519 519
520 520 def makestandin(relpath):
521 521 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
522 522 return os.path.join(repo.wjoin(lfutil.standin(path)))
523 523
524 524 fullpats = scmutil.expandpats(pats)
525 525 dest = fullpats[-1]
526 526
527 527 if os.path.isdir(dest):
528 528 if not os.path.isdir(makestandin(dest)):
529 529 os.makedirs(makestandin(dest))
530 530 # This could copy both lfiles and normal files in one command,
531 531 # but we don't want to do that. First replace their matcher to
532 532 # only match normal files and run it, then replace it to just
533 533 # match largefiles and run it again.
534 534 nonormalfiles = False
535 535 nolfiles = False
536 536 installnormalfilesmatchfn(repo[None].manifest())
537 537 try:
538 538 try:
539 539 result = orig(ui, repo, pats, opts, rename)
540 540 except util.Abort, e:
541 541 if str(e) != _('no files to copy'):
542 542 raise e
543 543 else:
544 544 nonormalfiles = True
545 545 result = 0
546 546 finally:
547 547 restorematchfn()
548 548
549 549 # The first rename can cause our current working directory to be removed.
550 550 # In that case there is nothing left to copy/rename so just quit.
551 551 try:
552 552 repo.getcwd()
553 553 except OSError:
554 554 return result
555 555
556 556 try:
557 557 try:
558 558 # When we call orig below it creates the standins but we don't add
559 559 # them to the dir state until later so lock during that time.
560 560 wlock = repo.wlock()
561 561
562 562 manifest = repo[None].manifest()
563 563 def overridematch(ctx, pats=[], opts={}, globbed=False,
564 564 default='relpath'):
565 565 newpats = []
566 566 # The patterns were previously mangled to add the standin
567 567 # directory; we need to remove that now
568 568 for pat in pats:
569 569 if match_.patkind(pat) is None and lfutil.shortname in pat:
570 570 newpats.append(pat.replace(lfutil.shortname, ''))
571 571 else:
572 572 newpats.append(pat)
573 573 match = oldmatch(ctx, newpats, opts, globbed, default)
574 574 m = copy.copy(match)
575 575 lfile = lambda f: lfutil.standin(f) in manifest
576 576 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
577 577 m._fmap = set(m._files)
578 578 m._always = False
579 579 origmatchfn = m.matchfn
580 580 m.matchfn = lambda f: (lfutil.isstandin(f) and
581 581 (f in manifest) and
582 582 origmatchfn(lfutil.splitstandin(f)) or
583 583 None)
584 584 return m
585 585 oldmatch = installmatchfn(overridematch)
586 586 listpats = []
587 587 for pat in pats:
588 588 if match_.patkind(pat) is not None:
589 589 listpats.append(pat)
590 590 else:
591 591 listpats.append(makestandin(pat))
592 592
593 593 try:
594 594 origcopyfile = util.copyfile
595 595 copiedfiles = []
596 596 def overridecopyfile(src, dest):
597 597 if (lfutil.shortname in src and
598 598 dest.startswith(repo.wjoin(lfutil.shortname))):
599 599 destlfile = dest.replace(lfutil.shortname, '')
600 600 if not opts['force'] and os.path.exists(destlfile):
601 601 raise IOError('',
602 602 _('destination largefile already exists'))
603 603 copiedfiles.append((src, dest))
604 604 origcopyfile(src, dest)
605 605
606 606 util.copyfile = overridecopyfile
607 607 result += orig(ui, repo, listpats, opts, rename)
608 608 finally:
609 609 util.copyfile = origcopyfile
610 610
611 611 lfdirstate = lfutil.openlfdirstate(ui, repo)
612 612 for (src, dest) in copiedfiles:
613 613 if (lfutil.shortname in src and
614 614 dest.startswith(repo.wjoin(lfutil.shortname))):
615 615 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
616 616 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
617 617 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
618 618 if not os.path.isdir(destlfiledir):
619 619 os.makedirs(destlfiledir)
620 620 if rename:
621 621 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
622 622
623 623 # The file is gone, but this deletes any empty parent
624 624 # directories as a side-effect.
625 625 util.unlinkpath(repo.wjoin(srclfile), True)
626 626 lfdirstate.remove(srclfile)
627 627 else:
628 628 util.copyfile(repo.wjoin(srclfile),
629 629 repo.wjoin(destlfile))
630 630
631 631 lfdirstate.add(destlfile)
632 632 lfdirstate.write()
633 633 except util.Abort, e:
634 634 if str(e) != _('no files to copy'):
635 635 raise e
636 636 else:
637 637 nolfiles = True
638 638 finally:
639 639 restorematchfn()
640 640 wlock.release()
641 641
642 642 if nolfiles and nonormalfiles:
643 643 raise util.Abort(_('no files to copy'))
644 644
645 645 return result
646 646
647 647 # When the user calls revert, we have to be careful to not revert any
648 648 # changes to other largefiles accidentally. This means we have to keep
649 649 # track of the largefiles that are being reverted so we only pull down
650 650 # the necessary largefiles.
651 651 #
652 652 # Standins are only updated (to match the hash of largefiles) before
653 653 # commits. Update the standins then run the original revert, changing
654 654 # the matcher to hit standins instead of largefiles. Based on the
655 655 # resulting standins update the largefiles.
656 656 def overriderevert(orig, ui, repo, *pats, **opts):
657 657 # Because we put the standins in a bad state (by updating them)
658 658 # and then return them to a correct state we need to lock to
659 659 # prevent others from changing them in their incorrect state.
660 660 wlock = repo.wlock()
661 661 try:
662 662 lfdirstate = lfutil.openlfdirstate(ui, repo)
663 663 (modified, added, removed, missing, unknown, ignored, clean) = \
664 664 lfutil.lfdirstatestatus(lfdirstate, repo, repo['.'].rev())
665 665 lfdirstate.write()
666 666 for lfile in modified:
667 667 lfutil.updatestandin(repo, lfutil.standin(lfile))
668 668 for lfile in missing:
669 669 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
670 670 os.unlink(repo.wjoin(lfutil.standin(lfile)))
671 671
672 672 oldstandins = lfutil.getstandinsstate(repo)
673 673
674 674 def overridematch(ctx, pats=[], opts={}, globbed=False,
675 675 default='relpath'):
676 676 match = oldmatch(ctx, pats, opts, globbed, default)
677 677 m = copy.copy(match)
678 678 def tostandin(f):
679 679 if lfutil.standin(f) in ctx:
680 680 return lfutil.standin(f)
681 681 elif lfutil.standin(f) in repo[None]:
682 682 return None
683 683 return f
684 684 m._files = [tostandin(f) for f in m._files]
685 685 m._files = [f for f in m._files if f is not None]
686 686 m._fmap = set(m._files)
687 687 m._always = False
688 688 origmatchfn = m.matchfn
689 689 def matchfn(f):
690 690 if lfutil.isstandin(f):
691 691 return (origmatchfn(lfutil.splitstandin(f)) and
692 692 (f in repo[None] or f in ctx))
693 693 return origmatchfn(f)
694 694 m.matchfn = matchfn
695 695 return m
696 696 oldmatch = installmatchfn(overridematch)
697 697 try:
698 698 orig(ui, repo, *pats, **opts)
699 699 finally:
700 700 restorematchfn()
701 701
702 702 newstandins = lfutil.getstandinsstate(repo)
703 703 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
704 704 # lfdirstate should be 'normallookup'-ed for updated files,
705 705 # because reverting doesn't touch dirstate for 'normal' files
706 706 # when target revision is explicitly specified: in such case,
707 707 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
708 708 # of target (standin) file.
709 709 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
710 710 normallookup=True)
711 711
712 712 finally:
713 713 wlock.release()
714 714
715 715 # When we rebase a repository with remotely changed largefiles, we need to
716 716 # take some extra care so that the largefiles are correctly updated in the
717 717 # working copy
718 718 def overridepull(orig, ui, repo, source=None, **opts):
719 719 revsprepull = len(repo)
720 720 if not source:
721 721 source = 'default'
722 722 repo.lfpullsource = source
723 723 if opts.get('rebase', False):
724 724 repo._isrebasing = True
725 725 try:
726 726 if opts.get('update'):
727 727 del opts['update']
728 728 ui.debug('--update and --rebase are not compatible, ignoring '
729 729 'the update flag\n')
730 730 del opts['rebase']
731 731 origpostincoming = commands.postincoming
732 732 def _dummy(*args, **kwargs):
733 733 pass
734 734 commands.postincoming = _dummy
735 735 try:
736 736 result = commands.pull(ui, repo, source, **opts)
737 737 finally:
738 738 commands.postincoming = origpostincoming
739 739 revspostpull = len(repo)
740 740 if revspostpull > revsprepull:
741 741 result = result or rebase.rebase(ui, repo)
742 742 finally:
743 743 repo._isrebasing = False
744 744 else:
745 745 result = orig(ui, repo, source, **opts)
746 746 revspostpull = len(repo)
747 747 lfrevs = opts.get('lfrev', [])
748 748 if opts.get('all_largefiles'):
749 749 lfrevs.append('pulled()')
750 750 if lfrevs and revspostpull > revsprepull:
751 751 numcached = 0
752 752 repo.firstpulled = revsprepull # for pulled() revset expression
753 753 try:
754 754 for rev in scmutil.revrange(repo, lfrevs):
755 755 ui.note(_('pulling largefiles for revision %s\n') % rev)
756 756 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
757 757 numcached += len(cached)
758 758 finally:
759 759 del repo.firstpulled
760 760 ui.status(_("%d largefiles cached\n") % numcached)
761 761 return result
762 762
763 763 def pulledrevsetsymbol(repo, subset, x):
764 764 """``pulled()``
765 765 Changesets that just has been pulled.
766 766
767 767 Only available with largefiles from pull --lfrev expressions.
768 768
769 769 .. container:: verbose
770 770
771 771 Some examples:
772 772
773 773 - pull largefiles for all new changesets::
774 774
775 775 hg pull -lfrev "pulled()"
776 776
777 777 - pull largefiles for all new branch heads::
778 778
779 779 hg pull -lfrev "head(pulled()) and not closed()"
780 780
781 781 """
782 782
783 783 try:
784 784 firstpulled = repo.firstpulled
785 785 except AttributeError:
786 786 raise util.Abort(_("pulled() only available in --lfrev"))
787 787 return revset.baseset([r for r in subset if r >= firstpulled])
788 788
789 789 def overrideclone(orig, ui, source, dest=None, **opts):
790 790 d = dest
791 791 if d is None:
792 792 d = hg.defaultdest(source)
793 793 if opts.get('all_largefiles') and not hg.islocal(d):
794 794 raise util.Abort(_(
795 795 '--all-largefiles is incompatible with non-local destination %s') %
796 796 d)
797 797
798 798 return orig(ui, source, dest, **opts)
799 799
800 800 def hgclone(orig, ui, opts, *args, **kwargs):
801 801 result = orig(ui, opts, *args, **kwargs)
802 802
803 803 if result is not None:
804 804 sourcerepo, destrepo = result
805 805 repo = destrepo.local()
806 806
807 807 # Caching is implicitly limited to 'rev' option, since the dest repo was
808 808 # truncated at that point. The user may expect a download count with
809 809 # this option, so attempt whether or not this is a largefile repo.
810 810 if opts.get('all_largefiles'):
811 811 success, missing = lfcommands.downloadlfiles(ui, repo, None)
812 812
813 813 if missing != 0:
814 814 return None
815 815
816 816 return result
817 817
818 818 def overriderebase(orig, ui, repo, **opts):
819 819 repo._isrebasing = True
820 820 try:
821 821 return orig(ui, repo, **opts)
822 822 finally:
823 823 repo._isrebasing = False
824 824
825 825 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
826 826 prefix=None, mtime=None, subrepos=None):
827 827 # No need to lock because we are only reading history and
828 828 # largefile caches, neither of which are modified.
829 829 lfcommands.cachelfiles(repo.ui, repo, node)
830 830
831 831 if kind not in archival.archivers:
832 832 raise util.Abort(_("unknown archive type '%s'") % kind)
833 833
834 834 ctx = repo[node]
835 835
836 836 if kind == 'files':
837 837 if prefix:
838 838 raise util.Abort(
839 839 _('cannot give prefix when archiving to files'))
840 840 else:
841 841 prefix = archival.tidyprefix(dest, kind, prefix)
842 842
843 843 def write(name, mode, islink, getdata):
844 844 if matchfn and not matchfn(name):
845 845 return
846 846 data = getdata()
847 847 if decode:
848 848 data = repo.wwritedata(name, data)
849 849 archiver.addfile(prefix + name, mode, islink, data)
850 850
851 851 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
852 852
853 853 if repo.ui.configbool("ui", "archivemeta", True):
854 854 def metadata():
855 855 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
856 856 hex(repo.changelog.node(0)), hex(node), ctx.branch())
857 857
858 858 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
859 859 if repo.tagtype(t) == 'global')
860 860 if not tags:
861 861 repo.ui.pushbuffer()
862 862 opts = {'template': '{latesttag}\n{latesttagdistance}',
863 863 'style': '', 'patch': None, 'git': None}
864 864 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
865 865 ltags, dist = repo.ui.popbuffer().split('\n')
866 866 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
867 867 tags += 'latesttagdistance: %s\n' % dist
868 868
869 869 return base + tags
870 870
871 871 write('.hg_archival.txt', 0644, False, metadata)
872 872
873 873 for f in ctx:
874 874 ff = ctx.flags(f)
875 875 getdata = ctx[f].data
876 876 if lfutil.isstandin(f):
877 877 path = lfutil.findfile(repo, getdata().strip())
878 878 if path is None:
879 879 raise util.Abort(
880 880 _('largefile %s not found in repo store or system cache')
881 881 % lfutil.splitstandin(f))
882 882 f = lfutil.splitstandin(f)
883 883
884 884 def getdatafn():
885 885 fd = None
886 886 try:
887 887 fd = open(path, 'rb')
888 888 return fd.read()
889 889 finally:
890 890 if fd:
891 891 fd.close()
892 892
893 893 getdata = getdatafn
894 894 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
895 895
896 896 if subrepos:
897 897 for subpath in sorted(ctx.substate):
898 898 sub = ctx.sub(subpath)
899 899 submatch = match_.narrowmatcher(subpath, matchfn)
900 900 sub.archive(repo.ui, archiver, prefix, submatch)
901 901
902 902 archiver.done()
903 903
904 904 def hgsubrepoarchive(orig, repo, ui, archiver, prefix, match=None):
905 905 repo._get(repo._state + ('hg',))
906 906 rev = repo._state[1]
907 907 ctx = repo._repo[rev]
908 908
909 909 lfcommands.cachelfiles(ui, repo._repo, ctx.node())
910 910
911 911 def write(name, mode, islink, getdata):
912 912 # At this point, the standin has been replaced with the largefile name,
913 913 # so the normal matcher works here without the lfutil variants.
914 914 if match and not match(f):
915 915 return
916 916 data = getdata()
917 917
918 918 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
919 919
920 920 for f in ctx:
921 921 ff = ctx.flags(f)
922 922 getdata = ctx[f].data
923 923 if lfutil.isstandin(f):
924 924 path = lfutil.findfile(repo._repo, getdata().strip())
925 925 if path is None:
926 926 raise util.Abort(
927 927 _('largefile %s not found in repo store or system cache')
928 928 % lfutil.splitstandin(f))
929 929 f = lfutil.splitstandin(f)
930 930
931 931 def getdatafn():
932 932 fd = None
933 933 try:
934 934 fd = open(os.path.join(prefix, path), 'rb')
935 935 return fd.read()
936 936 finally:
937 937 if fd:
938 938 fd.close()
939 939
940 940 getdata = getdatafn
941 941
942 942 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
943 943
944 944 for subpath in sorted(ctx.substate):
945 945 sub = ctx.sub(subpath)
946 946 submatch = match_.narrowmatcher(subpath, match)
947 947 sub.archive(ui, archiver, os.path.join(prefix, repo._path) + '/',
948 948 submatch)
949 949
950 950 # If a largefile is modified, the change is not reflected in its
951 951 # standin until a commit. cmdutil.bailifchanged() raises an exception
952 952 # if the repo has uncommitted changes. Wrap it to also check if
953 953 # largefiles were changed. This is used by bisect and backout.
954 954 def overridebailifchanged(orig, repo):
955 955 orig(repo)
956 956 repo.lfstatus = True
957 957 modified, added, removed, deleted = repo.status()[:4]
958 958 repo.lfstatus = False
959 959 if modified or added or removed or deleted:
960 960 raise util.Abort(_('uncommitted changes'))
961 961
962 962 # Fetch doesn't use cmdutil.bailifchanged so override it to add the check
963 963 def overridefetch(orig, ui, repo, *pats, **opts):
964 964 repo.lfstatus = True
965 965 modified, added, removed, deleted = repo.status()[:4]
966 966 repo.lfstatus = False
967 967 if modified or added or removed or deleted:
968 968 raise util.Abort(_('uncommitted changes'))
969 969 return orig(ui, repo, *pats, **opts)
970 970
971 971 def overrideforget(orig, ui, repo, *pats, **opts):
972 972 installnormalfilesmatchfn(repo[None].manifest())
973 973 result = orig(ui, repo, *pats, **opts)
974 974 restorematchfn()
975 975 m = scmutil.match(repo[None], pats, opts)
976 976
977 977 try:
978 978 repo.lfstatus = True
979 979 s = repo.status(match=m, clean=True)
980 980 finally:
981 981 repo.lfstatus = False
982 982 forget = sorted(s[0] + s[1] + s[3] + s[6])
983 983 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
984 984
985 985 for f in forget:
986 986 if lfutil.standin(f) not in repo.dirstate and not \
987 987 os.path.isdir(m.rel(lfutil.standin(f))):
988 988 ui.warn(_('not removing %s: file is already untracked\n')
989 989 % m.rel(f))
990 990 result = 1
991 991
992 992 for f in forget:
993 993 if ui.verbose or not m.exact(f):
994 994 ui.status(_('removing %s\n') % m.rel(f))
995 995
996 996 # Need to lock because standin files are deleted then removed from the
997 997 # repository and we could race in-between.
998 998 wlock = repo.wlock()
999 999 try:
1000 1000 lfdirstate = lfutil.openlfdirstate(ui, repo)
1001 1001 for f in forget:
1002 1002 if lfdirstate[f] == 'a':
1003 1003 lfdirstate.drop(f)
1004 1004 else:
1005 1005 lfdirstate.remove(f)
1006 1006 lfdirstate.write()
1007 1007 standins = [lfutil.standin(f) for f in forget]
1008 1008 for f in standins:
1009 1009 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
1010 1010 repo[None].forget(standins)
1011 1011 finally:
1012 1012 wlock.release()
1013 1013
1014 1014 return result
1015 1015
1016 1016 def _getoutgoings(repo, other, missing, addfunc):
1017 1017 """get pairs of filename and largefile hash in outgoing revisions
1018 1018 in 'missing'.
1019 1019
1020 1020 largefiles already existing on 'other' repository are ignored.
1021 1021
1022 1022 'addfunc' is invoked with each unique pairs of filename and
1023 1023 largefile hash value.
1024 1024 """
1025 1025 knowns = set()
1026 1026 lfhashes = set()
1027 1027 def dedup(fn, lfhash):
1028 1028 k = (fn, lfhash)
1029 1029 if k not in knowns:
1030 1030 knowns.add(k)
1031 1031 lfhashes.add(lfhash)
1032 1032 lfutil.getlfilestoupload(repo, missing, dedup)
1033 1033 if lfhashes:
1034 1034 lfexists = basestore._openstore(repo, other).exists(lfhashes)
1035 1035 for fn, lfhash in knowns:
1036 1036 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1037 1037 addfunc(fn, lfhash)
1038 1038
1039 1039 def outgoinghook(ui, repo, other, opts, missing):
1040 1040 if opts.pop('large', None):
1041 1041 lfhashes = set()
1042 1042 if ui.debugflag:
1043 1043 toupload = {}
1044 1044 def addfunc(fn, lfhash):
1045 1045 if fn not in toupload:
1046 1046 toupload[fn] = []
1047 1047 toupload[fn].append(lfhash)
1048 1048 lfhashes.add(lfhash)
1049 1049 def showhashes(fn):
1050 1050 for lfhash in sorted(toupload[fn]):
1051 1051 ui.debug(' %s\n' % (lfhash))
1052 1052 else:
1053 1053 toupload = set()
1054 1054 def addfunc(fn, lfhash):
1055 1055 toupload.add(fn)
1056 1056 lfhashes.add(lfhash)
1057 1057 def showhashes(fn):
1058 1058 pass
1059 1059 _getoutgoings(repo, other, missing, addfunc)
1060 1060
1061 1061 if not toupload:
1062 1062 ui.status(_('largefiles: no files to upload\n'))
1063 1063 else:
1064 1064 ui.status(_('largefiles to upload (%d entities):\n')
1065 1065 % (len(lfhashes)))
1066 1066 for file in sorted(toupload):
1067 1067 ui.status(lfutil.splitstandin(file) + '\n')
1068 1068 showhashes(file)
1069 1069 ui.status('\n')
1070 1070
1071 1071 def summaryremotehook(ui, repo, opts, changes):
1072 1072 largeopt = opts.get('large', False)
1073 1073 if changes is None:
1074 1074 if largeopt:
1075 1075 return (False, True) # only outgoing check is needed
1076 1076 else:
1077 1077 return (False, False)
1078 1078 elif largeopt:
1079 1079 url, branch, peer, outgoing = changes[1]
1080 1080 if peer is None:
1081 1081 # i18n: column positioning for "hg summary"
1082 1082 ui.status(_('largefiles: (no remote repo)\n'))
1083 1083 return
1084 1084
1085 1085 toupload = set()
1086 1086 lfhashes = set()
1087 1087 def addfunc(fn, lfhash):
1088 1088 toupload.add(fn)
1089 1089 lfhashes.add(lfhash)
1090 1090 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1091 1091
1092 1092 if not toupload:
1093 1093 # i18n: column positioning for "hg summary"
1094 1094 ui.status(_('largefiles: (no files to upload)\n'))
1095 1095 else:
1096 1096 # i18n: column positioning for "hg summary"
1097 1097 ui.status(_('largefiles: %d entities for %d files to upload\n')
1098 1098 % (len(lfhashes), len(toupload)))
1099 1099
1100 1100 def overridesummary(orig, ui, repo, *pats, **opts):
1101 1101 try:
1102 1102 repo.lfstatus = True
1103 1103 orig(ui, repo, *pats, **opts)
1104 1104 finally:
1105 1105 repo.lfstatus = False
1106 1106
1107 1107 def scmutiladdremove(orig, repo, pats=[], opts={}, dry_run=None,
1108 1108 similarity=None):
1109 1109 if not lfutil.islfilesrepo(repo):
1110 1110 return orig(repo, pats, opts, dry_run, similarity)
1111 1111 # Get the list of missing largefiles so we can remove them
1112 1112 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1113 1113 unsure, s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [],
1114 1114 False, False, False)
1115 1115 missing = s[3]
1116 1116
1117 1117 # Call into the normal remove code, but the removing of the standin, we want
1118 1118 # to have handled by original addremove. Monkey patching here makes sure
1119 1119 # we don't remove the standin in the largefiles code, preventing a very
1120 1120 # confused state later.
1121 1121 if missing:
1122 1122 m = [repo.wjoin(f) for f in missing]
1123 1123 repo._isaddremove = True
1124 1124 removelargefiles(repo.ui, repo, *m, **opts)
1125 1125 repo._isaddremove = False
1126 1126 # Call into the normal add code, and any files that *should* be added as
1127 1127 # largefiles will be
1128 1128 addlargefiles(repo.ui, repo, *pats, **opts)
1129 1129 # Now that we've handled largefiles, hand off to the original addremove
1130 1130 # function to take care of the rest. Make sure it doesn't do anything with
1131 1131 # largefiles by installing a matcher that will ignore them.
1132 1132 installnormalfilesmatchfn(repo[None].manifest())
1133 1133 result = orig(repo, pats, opts, dry_run, similarity)
1134 1134 restorematchfn()
1135 1135 return result
1136 1136
1137 1137 # Calling purge with --all will cause the largefiles to be deleted.
1138 1138 # Override repo.status to prevent this from happening.
1139 1139 def overridepurge(orig, ui, repo, *dirs, **opts):
1140 1140 # XXX large file status is buggy when used on repo proxy.
1141 1141 # XXX this needs to be investigate.
1142 1142 repo = repo.unfiltered()
1143 1143 oldstatus = repo.status
1144 1144 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1145 1145 clean=False, unknown=False, listsubrepos=False):
1146 1146 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1147 1147 listsubrepos)
1148 1148 lfdirstate = lfutil.openlfdirstate(ui, repo)
1149 1149 modified, added, removed, deleted, unknown, ignored, clean = r
1150 1150 unknown = [f for f in unknown if lfdirstate[f] == '?']
1151 1151 ignored = [f for f in ignored if lfdirstate[f] == '?']
1152 return modified, added, removed, deleted, unknown, ignored, clean
1152 return scmutil.status(modified, added, removed, deleted,
1153 unknown, ignored, clean)
1153 1154 repo.status = overridestatus
1154 1155 orig(ui, repo, *dirs, **opts)
1155 1156 repo.status = oldstatus
1156 1157
1157 1158 def overriderollback(orig, ui, repo, **opts):
1158 1159 wlock = repo.wlock()
1159 1160 try:
1160 1161 before = repo.dirstate.parents()
1161 1162 orphans = set(f for f in repo.dirstate
1162 1163 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1163 1164 result = orig(ui, repo, **opts)
1164 1165 after = repo.dirstate.parents()
1165 1166 if before == after:
1166 1167 return result # no need to restore standins
1167 1168
1168 1169 pctx = repo['.']
1169 1170 for f in repo.dirstate:
1170 1171 if lfutil.isstandin(f):
1171 1172 orphans.discard(f)
1172 1173 if repo.dirstate[f] == 'r':
1173 1174 repo.wvfs.unlinkpath(f, ignoremissing=True)
1174 1175 elif f in pctx:
1175 1176 fctx = pctx[f]
1176 1177 repo.wwrite(f, fctx.data(), fctx.flags())
1177 1178 else:
1178 1179 # content of standin is not so important in 'a',
1179 1180 # 'm' or 'n' (coming from the 2nd parent) cases
1180 1181 lfutil.writestandin(repo, f, '', False)
1181 1182 for standin in orphans:
1182 1183 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1183 1184
1184 1185 lfdirstate = lfutil.openlfdirstate(ui, repo)
1185 1186 orphans = set(lfdirstate)
1186 1187 lfiles = lfutil.listlfiles(repo)
1187 1188 for file in lfiles:
1188 1189 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1189 1190 orphans.discard(file)
1190 1191 for lfile in orphans:
1191 1192 lfdirstate.drop(lfile)
1192 1193 lfdirstate.write()
1193 1194 finally:
1194 1195 wlock.release()
1195 1196 return result
1196 1197
1197 1198 def overridetransplant(orig, ui, repo, *revs, **opts):
1198 1199 try:
1199 1200 oldstandins = lfutil.getstandinsstate(repo)
1200 1201 repo._istransplanting = True
1201 1202 result = orig(ui, repo, *revs, **opts)
1202 1203 newstandins = lfutil.getstandinsstate(repo)
1203 1204 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1204 1205 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1205 1206 printmessage=True)
1206 1207 finally:
1207 1208 repo._istransplanting = False
1208 1209 return result
1209 1210
1210 1211 def overridecat(orig, ui, repo, file1, *pats, **opts):
1211 1212 ctx = scmutil.revsingle(repo, opts.get('rev'))
1212 1213 err = 1
1213 1214 notbad = set()
1214 1215 m = scmutil.match(ctx, (file1,) + pats, opts)
1215 1216 origmatchfn = m.matchfn
1216 1217 def lfmatchfn(f):
1217 1218 if origmatchfn(f):
1218 1219 return True
1219 1220 lf = lfutil.splitstandin(f)
1220 1221 if lf is None:
1221 1222 return False
1222 1223 notbad.add(lf)
1223 1224 return origmatchfn(lf)
1224 1225 m.matchfn = lfmatchfn
1225 1226 origbadfn = m.bad
1226 1227 def lfbadfn(f, msg):
1227 1228 if not f in notbad:
1228 1229 origbadfn(f, msg)
1229 1230 m.bad = lfbadfn
1230 1231 for f in ctx.walk(m):
1231 1232 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1232 1233 pathname=f)
1233 1234 lf = lfutil.splitstandin(f)
1234 1235 if lf is None or origmatchfn(f):
1235 1236 # duplicating unreachable code from commands.cat
1236 1237 data = ctx[f].data()
1237 1238 if opts.get('decode'):
1238 1239 data = repo.wwritedata(f, data)
1239 1240 fp.write(data)
1240 1241 else:
1241 1242 hash = lfutil.readstandin(repo, lf, ctx.rev())
1242 1243 if not lfutil.inusercache(repo.ui, hash):
1243 1244 store = basestore._openstore(repo)
1244 1245 success, missing = store.get([(lf, hash)])
1245 1246 if len(success) != 1:
1246 1247 raise util.Abort(
1247 1248 _('largefile %s is not in cache and could not be '
1248 1249 'downloaded') % lf)
1249 1250 path = lfutil.usercachepath(repo.ui, hash)
1250 1251 fpin = open(path, "rb")
1251 1252 for chunk in util.filechunkiter(fpin, 128 * 1024):
1252 1253 fp.write(chunk)
1253 1254 fpin.close()
1254 1255 fp.close()
1255 1256 err = 0
1256 1257 return err
1257 1258
1258 1259 def mercurialsinkbefore(orig, sink):
1259 1260 sink.repo._isconverting = True
1260 1261 orig(sink)
1261 1262
1262 1263 def mercurialsinkafter(orig, sink):
1263 1264 sink.repo._isconverting = False
1264 1265 orig(sink)
1265 1266
1266 1267 def mergeupdate(orig, repo, node, branchmerge, force, partial,
1267 1268 *args, **kwargs):
1268 1269 wlock = repo.wlock()
1269 1270 try:
1270 1271 # branch | | |
1271 1272 # merge | force | partial | action
1272 1273 # -------+-------+---------+--------------
1273 1274 # x | x | x | linear-merge
1274 1275 # o | x | x | branch-merge
1275 1276 # x | o | x | overwrite (as clean update)
1276 1277 # o | o | x | force-branch-merge (*1)
1277 1278 # x | x | o | (*)
1278 1279 # o | x | o | (*)
1279 1280 # x | o | o | overwrite (as revert)
1280 1281 # o | o | o | (*)
1281 1282 #
1282 1283 # (*) don't care
1283 1284 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1284 1285
1285 1286 linearmerge = not branchmerge and not force and not partial
1286 1287
1287 1288 if linearmerge or (branchmerge and force and not partial):
1288 1289 # update standins for linear-merge or force-branch-merge,
1289 1290 # because largefiles in the working directory may be modified
1290 1291 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1291 1292 unsure, s = lfdirstate.status(match_.always(repo.root,
1292 1293 repo.getcwd()),
1293 1294 [], False, False, False)
1294 1295 modified, added = s[:2]
1295 1296 for lfile in unsure + modified + added:
1296 1297 lfutil.updatestandin(repo, lfutil.standin(lfile))
1297 1298
1298 1299 if linearmerge:
1299 1300 # Only call updatelfiles on the standins that have changed
1300 1301 # to save time
1301 1302 oldstandins = lfutil.getstandinsstate(repo)
1302 1303
1303 1304 result = orig(repo, node, branchmerge, force, partial, *args, **kwargs)
1304 1305
1305 1306 filelist = None
1306 1307 if linearmerge:
1307 1308 newstandins = lfutil.getstandinsstate(repo)
1308 1309 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1309 1310
1310 1311 # suppress status message while automated committing
1311 1312 printmessage = not (getattr(repo, "_isrebasing", False) or
1312 1313 getattr(repo, "_istransplanting", False))
1313 1314 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1314 1315 printmessage=printmessage,
1315 1316 normallookup=partial)
1316 1317
1317 1318 return result
1318 1319 finally:
1319 1320 wlock.release()
1320 1321
1321 1322 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1322 1323 result = orig(repo, files, *args, **kwargs)
1323 1324
1324 1325 filelist = [lfutil.splitstandin(f) for f in files if lfutil.isstandin(f)]
1325 1326 if filelist:
1326 1327 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1327 1328 printmessage=False, normallookup=True)
1328 1329
1329 1330 return result
@@ -1,484 +1,484 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''setup for largefiles repositories: reposetup'''
10 10 import copy
11 11 import os
12 12
13 13 from mercurial import error, manifest, match as match_, util
14 14 from mercurial.i18n import _
15 from mercurial import localrepo
15 from mercurial import localrepo, scmutil
16 16
17 17 import lfcommands
18 18 import lfutil
19 19
20 20 def reposetup(ui, repo):
21 21 # wire repositories should be given new wireproto functions
22 22 # by "proto.wirereposetup()" via "hg.wirepeersetupfuncs"
23 23 if not repo.local():
24 24 return
25 25
26 26 class lfilesrepo(repo.__class__):
27 27 lfstatus = False
28 28 def status_nolfiles(self, *args, **kwargs):
29 29 return super(lfilesrepo, self).status(*args, **kwargs)
30 30
31 31 # When lfstatus is set, return a context that gives the names
32 32 # of largefiles instead of their corresponding standins and
33 33 # identifies the largefiles as always binary, regardless of
34 34 # their actual contents.
35 35 def __getitem__(self, changeid):
36 36 ctx = super(lfilesrepo, self).__getitem__(changeid)
37 37 if self.lfstatus:
38 38 class lfilesmanifestdict(manifest.manifestdict):
39 39 def __contains__(self, filename):
40 40 orig = super(lfilesmanifestdict, self).__contains__
41 41 return orig(filename) or orig(lfutil.standin(filename))
42 42 class lfilesctx(ctx.__class__):
43 43 def files(self):
44 44 filenames = super(lfilesctx, self).files()
45 45 return [lfutil.splitstandin(f) or f for f in filenames]
46 46 def manifest(self):
47 47 man1 = super(lfilesctx, self).manifest()
48 48 man1.__class__ = lfilesmanifestdict
49 49 return man1
50 50 def filectx(self, path, fileid=None, filelog=None):
51 51 orig = super(lfilesctx, self).filectx
52 52 try:
53 53 if filelog is not None:
54 54 result = orig(path, fileid, filelog)
55 55 else:
56 56 result = orig(path, fileid)
57 57 except error.LookupError:
58 58 # Adding a null character will cause Mercurial to
59 59 # identify this as a binary file.
60 60 if filelog is not None:
61 61 result = orig(lfutil.standin(path), fileid,
62 62 filelog)
63 63 else:
64 64 result = orig(lfutil.standin(path), fileid)
65 65 olddata = result.data
66 66 result.data = lambda: olddata() + '\0'
67 67 return result
68 68 ctx.__class__ = lfilesctx
69 69 return ctx
70 70
71 71 # Figure out the status of big files and insert them into the
72 72 # appropriate list in the result. Also removes standin files
73 73 # from the listing. Revert to the original status if
74 74 # self.lfstatus is False.
75 75 # XXX large file status is buggy when used on repo proxy.
76 76 # XXX this needs to be investigated.
77 77 @localrepo.unfilteredmethod
78 78 def status(self, node1='.', node2=None, match=None, ignored=False,
79 79 clean=False, unknown=False, listsubrepos=False):
80 80 listignored, listclean, listunknown = ignored, clean, unknown
81 81 orig = super(lfilesrepo, self).status
82 82 if not self.lfstatus:
83 83 return orig(node1, node2, match, listignored, listclean,
84 84 listunknown, listsubrepos)
85 85
86 86 # some calls in this function rely on the old version of status
87 87 self.lfstatus = False
88 88 ctx1 = self[node1]
89 89 ctx2 = self[node2]
90 90 working = ctx2.rev() is None
91 91 parentworking = working and ctx1 == self['.']
92 92
93 93 def inctx(file, ctx):
94 94 try:
95 95 if ctx.rev() is None:
96 96 return file in ctx.manifest()
97 97 ctx[file]
98 98 return True
99 99 except KeyError:
100 100 return False
101 101
102 102 if match is None:
103 103 match = match_.always(self.root, self.getcwd())
104 104
105 105 wlock = None
106 106 try:
107 107 try:
108 108 # updating the dirstate is optional
109 109 # so we don't wait on the lock
110 110 wlock = self.wlock(False)
111 111 except error.LockError:
112 112 pass
113 113
114 114 # First check if there were files specified on the
115 115 # command line. If there were, and none of them were
116 116 # largefiles, we should just bail here and let super
117 117 # handle it -- thus gaining a big performance boost.
118 118 lfdirstate = lfutil.openlfdirstate(ui, self)
119 119 if match.files() and not match.anypats():
120 120 for f in lfdirstate:
121 121 if match(f):
122 122 break
123 123 else:
124 124 return orig(node1, node2, match, listignored, listclean,
125 125 listunknown, listsubrepos)
126 126
127 127 # Create a copy of match that matches standins instead
128 128 # of largefiles.
129 129 def tostandins(files):
130 130 if not working:
131 131 return files
132 132 newfiles = []
133 133 dirstate = self.dirstate
134 134 for f in files:
135 135 sf = lfutil.standin(f)
136 136 if sf in dirstate:
137 137 newfiles.append(sf)
138 138 elif sf in dirstate.dirs():
139 139 # Directory entries could be regular or
140 140 # standin, check both
141 141 newfiles.extend((f, sf))
142 142 else:
143 143 newfiles.append(f)
144 144 return newfiles
145 145
146 146 m = copy.copy(match)
147 147 m._files = tostandins(m._files)
148 148
149 149 result = orig(node1, node2, m, ignored, clean, unknown,
150 150 listsubrepos)
151 151 if working:
152 152
153 153 def sfindirstate(f):
154 154 sf = lfutil.standin(f)
155 155 dirstate = self.dirstate
156 156 return sf in dirstate or sf in dirstate.dirs()
157 157
158 158 match._files = [f for f in match._files
159 159 if sfindirstate(f)]
160 160 # Don't waste time getting the ignored and unknown
161 161 # files from lfdirstate
162 162 unsure, s = lfdirstate.status(match, [], False, listclean,
163 163 False)
164 164 (modified, added, removed, missing, _unknown, _ignored,
165 165 clean) = s
166 166 if parentworking:
167 167 for lfile in unsure:
168 168 standin = lfutil.standin(lfile)
169 169 if standin not in ctx1:
170 170 # from second parent
171 171 modified.append(lfile)
172 172 elif ctx1[standin].data().strip() \
173 173 != lfutil.hashfile(self.wjoin(lfile)):
174 174 modified.append(lfile)
175 175 else:
176 176 if listclean:
177 177 clean.append(lfile)
178 178 lfdirstate.normal(lfile)
179 179 else:
180 180 tocheck = unsure + modified + added + clean
181 181 modified, added, clean = [], [], []
182 182
183 183 for lfile in tocheck:
184 184 standin = lfutil.standin(lfile)
185 185 if inctx(standin, ctx1):
186 186 if ctx1[standin].data().strip() != \
187 187 lfutil.hashfile(self.wjoin(lfile)):
188 188 modified.append(lfile)
189 189 elif listclean:
190 190 clean.append(lfile)
191 191 else:
192 192 added.append(lfile)
193 193
194 194 # Standins no longer found in lfdirstate has been
195 195 # removed
196 196 for standin in ctx1.walk(lfutil.getstandinmatcher(self)):
197 197 lfile = lfutil.splitstandin(standin)
198 198 if not match(lfile):
199 199 continue
200 200 if lfile not in lfdirstate:
201 201 removed.append(lfile)
202 202
203 203 # Filter result lists
204 204 result = list(result)
205 205
206 206 # Largefiles are not really removed when they're
207 207 # still in the normal dirstate. Likewise, normal
208 208 # files are not really removed if they are still in
209 209 # lfdirstate. This happens in merges where files
210 210 # change type.
211 211 removed = [f for f in removed
212 212 if f not in self.dirstate]
213 213 result[2] = [f for f in result[2]
214 214 if f not in lfdirstate]
215 215
216 216 lfiles = set(lfdirstate._map)
217 217 # Unknown files
218 218 result[4] = set(result[4]).difference(lfiles)
219 219 # Ignored files
220 220 result[5] = set(result[5]).difference(lfiles)
221 221 # combine normal files and largefiles
222 222 normals = [[fn for fn in filelist
223 223 if not lfutil.isstandin(fn)]
224 224 for filelist in result]
225 225 lfstatus = (modified, added, removed, missing, [], [],
226 226 clean)
227 227 result = [sorted(list1 + list2)
228 228 for (list1, list2) in zip(normals, lfstatus)]
229 229 else:
230 230 def toname(f):
231 231 if lfutil.isstandin(f):
232 232 return lfutil.splitstandin(f)
233 233 return f
234 234 result = [[toname(f) for f in items]
235 235 for items in result]
236 236
237 237 if wlock:
238 238 lfdirstate.write()
239 239
240 240 finally:
241 241 if wlock:
242 242 wlock.release()
243 243
244 244 self.lfstatus = True
245 return result
245 return scmutil.status(*result)
246 246
247 247 # As part of committing, copy all of the largefiles into the
248 248 # cache.
249 249 def commitctx(self, *args, **kwargs):
250 250 node = super(lfilesrepo, self).commitctx(*args, **kwargs)
251 251 lfutil.copyalltostore(self, node)
252 252 return node
253 253
254 254 # Before commit, largefile standins have not had their
255 255 # contents updated to reflect the hash of their largefile.
256 256 # Do that here.
257 257 def commit(self, text="", user=None, date=None, match=None,
258 258 force=False, editor=False, extra={}):
259 259 orig = super(lfilesrepo, self).commit
260 260
261 261 wlock = self.wlock()
262 262 try:
263 263 # Case 0: Automated committing
264 264 #
265 265 # While automated committing (like rebase, transplant
266 266 # and so on), this code path is used to avoid:
267 267 # (1) updating standins, because standins should
268 268 # be already updated at this point
269 269 # (2) aborting when stadnins are matched by "match",
270 270 # because automated committing may specify them directly
271 271 #
272 272 if getattr(self, "_isrebasing", False) or \
273 273 getattr(self, "_istransplanting", False):
274 274 result = orig(text=text, user=user, date=date, match=match,
275 275 force=force, editor=editor, extra=extra)
276 276
277 277 if result:
278 278 lfdirstate = lfutil.openlfdirstate(ui, self)
279 279 for f in self[result].files():
280 280 if lfutil.isstandin(f):
281 281 lfile = lfutil.splitstandin(f)
282 282 lfutil.synclfdirstate(self, lfdirstate, lfile,
283 283 False)
284 284 lfdirstate.write()
285 285
286 286 return result
287 287 # Case 1: user calls commit with no specific files or
288 288 # include/exclude patterns: refresh and commit all files that
289 289 # are "dirty".
290 290 if ((match is None) or
291 291 (not match.anypats() and not match.files())):
292 292 # Spend a bit of time here to get a list of files we know
293 293 # are modified so we can compare only against those.
294 294 # It can cost a lot of time (several seconds)
295 295 # otherwise to update all standins if the largefiles are
296 296 # large.
297 297 lfdirstate = lfutil.openlfdirstate(ui, self)
298 298 dirtymatch = match_.always(self.root, self.getcwd())
299 299 unsure, s = lfdirstate.status(dirtymatch, [], False, False,
300 300 False)
301 301 modified, added, removed = s[:3]
302 302 modifiedfiles = unsure + modified + added + removed
303 303 lfiles = lfutil.listlfiles(self)
304 304 # this only loops through largefiles that exist (not
305 305 # removed/renamed)
306 306 for lfile in lfiles:
307 307 if lfile in modifiedfiles:
308 308 if os.path.exists(
309 309 self.wjoin(lfutil.standin(lfile))):
310 310 # this handles the case where a rebase is being
311 311 # performed and the working copy is not updated
312 312 # yet.
313 313 if os.path.exists(self.wjoin(lfile)):
314 314 lfutil.updatestandin(self,
315 315 lfutil.standin(lfile))
316 316 lfdirstate.normal(lfile)
317 317
318 318 result = orig(text=text, user=user, date=date, match=match,
319 319 force=force, editor=editor, extra=extra)
320 320
321 321 if result is not None:
322 322 for lfile in lfdirstate:
323 323 if lfile in modifiedfiles:
324 324 if (not os.path.exists(self.wjoin(
325 325 lfutil.standin(lfile)))) or \
326 326 (not os.path.exists(self.wjoin(lfile))):
327 327 lfdirstate.drop(lfile)
328 328
329 329 # This needs to be after commit; otherwise precommit hooks
330 330 # get the wrong status
331 331 lfdirstate.write()
332 332 return result
333 333
334 334 lfiles = lfutil.listlfiles(self)
335 335 match._files = self._subdirlfs(match.files(), lfiles)
336 336
337 337 # Case 2: user calls commit with specified patterns: refresh
338 338 # any matching big files.
339 339 smatcher = lfutil.composestandinmatcher(self, match)
340 340 standins = self.dirstate.walk(smatcher, [], False, False)
341 341
342 342 # No matching big files: get out of the way and pass control to
343 343 # the usual commit() method.
344 344 if not standins:
345 345 return orig(text=text, user=user, date=date, match=match,
346 346 force=force, editor=editor, extra=extra)
347 347
348 348 # Refresh all matching big files. It's possible that the
349 349 # commit will end up failing, in which case the big files will
350 350 # stay refreshed. No harm done: the user modified them and
351 351 # asked to commit them, so sooner or later we're going to
352 352 # refresh the standins. Might as well leave them refreshed.
353 353 lfdirstate = lfutil.openlfdirstate(ui, self)
354 354 for standin in standins:
355 355 lfile = lfutil.splitstandin(standin)
356 356 if lfdirstate[lfile] != 'r':
357 357 lfutil.updatestandin(self, standin)
358 358 lfdirstate.normal(lfile)
359 359 else:
360 360 lfdirstate.drop(lfile)
361 361
362 362 # Cook up a new matcher that only matches regular files or
363 363 # standins corresponding to the big files requested by the
364 364 # user. Have to modify _files to prevent commit() from
365 365 # complaining "not tracked" for big files.
366 366 match = copy.copy(match)
367 367 origmatchfn = match.matchfn
368 368
369 369 # Check both the list of largefiles and the list of
370 370 # standins because if a largefile was removed, it
371 371 # won't be in the list of largefiles at this point
372 372 match._files += sorted(standins)
373 373
374 374 actualfiles = []
375 375 for f in match._files:
376 376 fstandin = lfutil.standin(f)
377 377
378 378 # ignore known largefiles and standins
379 379 if f in lfiles or fstandin in standins:
380 380 continue
381 381
382 382 actualfiles.append(f)
383 383 match._files = actualfiles
384 384
385 385 def matchfn(f):
386 386 if origmatchfn(f):
387 387 return f not in lfiles
388 388 else:
389 389 return f in standins
390 390
391 391 match.matchfn = matchfn
392 392 result = orig(text=text, user=user, date=date, match=match,
393 393 force=force, editor=editor, extra=extra)
394 394 # This needs to be after commit; otherwise precommit hooks
395 395 # get the wrong status
396 396 lfdirstate.write()
397 397 return result
398 398 finally:
399 399 wlock.release()
400 400
401 401 def push(self, remote, force=False, revs=None, newbranch=False):
402 402 if remote.local():
403 403 missing = set(self.requirements) - remote.local().supported
404 404 if missing:
405 405 msg = _("required features are not"
406 406 " supported in the destination:"
407 407 " %s") % (', '.join(sorted(missing)))
408 408 raise util.Abort(msg)
409 409 return super(lfilesrepo, self).push(remote, force=force, revs=revs,
410 410 newbranch=newbranch)
411 411
412 412 def _subdirlfs(self, files, lfiles):
413 413 '''
414 414 Adjust matched file list
415 415 If we pass a directory to commit whose only commitable files
416 416 are largefiles, the core commit code aborts before finding
417 417 the largefiles.
418 418 So we do the following:
419 419 For directories that only have largefiles as matches,
420 420 we explicitly add the largefiles to the match list and remove
421 421 the directory.
422 422 In other cases, we leave the match list unmodified.
423 423 '''
424 424 actualfiles = []
425 425 dirs = []
426 426 regulars = []
427 427
428 428 for f in files:
429 429 if lfutil.isstandin(f + '/'):
430 430 raise util.Abort(
431 431 _('file "%s" is a largefile standin') % f,
432 432 hint=('commit the largefile itself instead'))
433 433 # Scan directories
434 434 if os.path.isdir(self.wjoin(f)):
435 435 dirs.append(f)
436 436 else:
437 437 regulars.append(f)
438 438
439 439 for f in dirs:
440 440 matcheddir = False
441 441 d = self.dirstate.normalize(f) + '/'
442 442 # Check for matched normal files
443 443 for mf in regulars:
444 444 if self.dirstate.normalize(mf).startswith(d):
445 445 actualfiles.append(f)
446 446 matcheddir = True
447 447 break
448 448 if not matcheddir:
449 449 # If no normal match, manually append
450 450 # any matching largefiles
451 451 for lf in lfiles:
452 452 if self.dirstate.normalize(lf).startswith(d):
453 453 actualfiles.append(lf)
454 454 if not matcheddir:
455 455 actualfiles.append(lfutil.standin(f))
456 456 matcheddir = True
457 457 # Nothing in dir, so readd it
458 458 # and let commit reject it
459 459 if not matcheddir:
460 460 actualfiles.append(f)
461 461
462 462 # Always add normal files
463 463 actualfiles += regulars
464 464 return actualfiles
465 465
466 466 repo.__class__ = lfilesrepo
467 467
468 468 def prepushoutgoinghook(local, remote, outgoing):
469 469 if outgoing.missing:
470 470 toupload = set()
471 471 addfunc = lambda fn, lfhash: toupload.add(lfhash)
472 472 lfutil.getlfilestoupload(local, outgoing.missing, addfunc)
473 473 lfcommands.uploadlfiles(ui, local, remote, toupload)
474 474 repo.prepushoutgoinghooks.add("largefiles", prepushoutgoinghook)
475 475
476 476 def checkrequireslfiles(ui, repo, **kwargs):
477 477 if 'largefiles' not in repo.requirements and util.any(
478 478 lfutil.shortname+'/' in f[0] for f in repo.store.datafiles()):
479 479 repo.requirements.add('largefiles')
480 480 repo._writerequirements()
481 481
482 482 ui.setconfig('hooks', 'changegroup.lfiles', checkrequireslfiles,
483 483 'largefiles')
484 484 ui.setconfig('hooks', 'commit.lfiles', checkrequireslfiles, 'largefiles')
@@ -1,1710 +1,1710 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import nullid, nullrev, short, hex, bin
9 9 from i18n import _
10 10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
11 11 import match as matchmod
12 12 import os, errno, stat
13 13 import obsolete as obsmod
14 14 import repoview
15 15 import fileset
16 16 import revlog
17 17
18 18 propertycache = util.propertycache
19 19
20 20 class basectx(object):
21 21 """A basectx object represents the common logic for its children:
22 22 changectx: read-only context that is already present in the repo,
23 23 workingctx: a context that represents the working directory and can
24 24 be committed,
25 25 memctx: a context that represents changes in-memory and can also
26 26 be committed."""
27 27 def __new__(cls, repo, changeid='', *args, **kwargs):
28 28 if isinstance(changeid, basectx):
29 29 return changeid
30 30
31 31 o = super(basectx, cls).__new__(cls)
32 32
33 33 o._repo = repo
34 34 o._rev = nullrev
35 35 o._node = nullid
36 36
37 37 return o
38 38
39 39 def __str__(self):
40 40 return short(self.node())
41 41
42 42 def __int__(self):
43 43 return self.rev()
44 44
45 45 def __repr__(self):
46 46 return "<%s %s>" % (type(self).__name__, str(self))
47 47
48 48 def __eq__(self, other):
49 49 try:
50 50 return type(self) == type(other) and self._rev == other._rev
51 51 except AttributeError:
52 52 return False
53 53
54 54 def __ne__(self, other):
55 55 return not (self == other)
56 56
57 57 def __contains__(self, key):
58 58 return key in self._manifest
59 59
60 60 def __getitem__(self, key):
61 61 return self.filectx(key)
62 62
63 63 def __iter__(self):
64 64 for f in sorted(self._manifest):
65 65 yield f
66 66
67 67 def _manifestmatches(self, match, s):
68 68 """generate a new manifest filtered by the match argument
69 69
70 70 This method is for internal use only and mainly exists to provide an
71 71 object oriented way for other contexts to customize the manifest
72 72 generation.
73 73 """
74 74 if match.always():
75 75 return self.manifest().copy()
76 76
77 77 files = match.files()
78 78 if (match.matchfn == match.exact or
79 79 (not match.anypats() and util.all(fn in self for fn in files))):
80 80 return self.manifest().intersectfiles(files)
81 81
82 82 mf = self.manifest().copy()
83 83 for fn in mf.keys():
84 84 if not match(fn):
85 85 del mf[fn]
86 86 return mf
87 87
88 88 def _matchstatus(self, other, s, match, listignored, listclean,
89 89 listunknown):
90 90 """return match.always if match is none
91 91
92 92 This internal method provides a way for child objects to override the
93 93 match operator.
94 94 """
95 95 return match or matchmod.always(self._repo.root, self._repo.getcwd())
96 96
97 97 def _prestatus(self, other, s, match, listignored, listclean, listunknown):
98 98 """provide a hook to allow child objects to preprocess status results
99 99
100 100 For example, this allows other contexts, such as workingctx, to query
101 101 the dirstate before comparing the manifests.
102 102 """
103 103 # load earliest manifest first for caching reasons
104 104 if self.rev() < other.rev():
105 105 self.manifest()
106 106 return s
107 107
108 108 def _poststatus(self, other, s, match, listignored, listclean, listunknown):
109 109 """provide a hook to allow child objects to postprocess status results
110 110
111 111 For example, this allows other contexts, such as workingctx, to filter
112 112 suspect symlinks in the case of FAT32 and NTFS filesytems.
113 113 """
114 114 return s
115 115
116 116 def _buildstatus(self, other, s, match, listignored, listclean,
117 117 listunknown):
118 118 """build a status with respect to another context"""
119 119 mf1 = other._manifestmatches(match, s)
120 120 mf2 = self._manifestmatches(match, s)
121 121
122 122 modified, added, clean = [], [], []
123 123 deleted, unknown, ignored = s[3], s[4], s[5]
124 124 withflags = mf1.withflags() | mf2.withflags()
125 125 for fn, mf2node in mf2.iteritems():
126 126 if fn in mf1:
127 127 if (fn not in deleted and
128 128 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
129 129 (mf1[fn] != mf2node and
130 130 (mf2node or self[fn].cmp(other[fn]))))):
131 131 modified.append(fn)
132 132 elif listclean:
133 133 clean.append(fn)
134 134 del mf1[fn]
135 135 elif fn not in deleted:
136 136 added.append(fn)
137 137 removed = mf1.keys()
138 138 if removed:
139 139 # need to filter files if they are already reported as removed
140 140 unknown = [fn for fn in unknown if fn not in mf1]
141 141 ignored = [fn for fn in ignored if fn not in mf1]
142 142
143 143 return [modified, added, removed, deleted, unknown, ignored, clean]
144 144
145 145 @propertycache
146 146 def substate(self):
147 147 return subrepo.state(self, self._repo.ui)
148 148
149 149 def subrev(self, subpath):
150 150 return self.substate[subpath][1]
151 151
152 152 def rev(self):
153 153 return self._rev
154 154 def node(self):
155 155 return self._node
156 156 def hex(self):
157 157 return hex(self.node())
158 158 def manifest(self):
159 159 return self._manifest
160 160 def phasestr(self):
161 161 return phases.phasenames[self.phase()]
162 162 def mutable(self):
163 163 return self.phase() > phases.public
164 164
165 165 def getfileset(self, expr):
166 166 return fileset.getfileset(self, expr)
167 167
168 168 def obsolete(self):
169 169 """True if the changeset is obsolete"""
170 170 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
171 171
172 172 def extinct(self):
173 173 """True if the changeset is extinct"""
174 174 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
175 175
176 176 def unstable(self):
177 177 """True if the changeset is not obsolete but it's ancestor are"""
178 178 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
179 179
180 180 def bumped(self):
181 181 """True if the changeset try to be a successor of a public changeset
182 182
183 183 Only non-public and non-obsolete changesets may be bumped.
184 184 """
185 185 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
186 186
187 187 def divergent(self):
188 188 """Is a successors of a changeset with multiple possible successors set
189 189
190 190 Only non-public and non-obsolete changesets may be divergent.
191 191 """
192 192 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
193 193
194 194 def troubled(self):
195 195 """True if the changeset is either unstable, bumped or divergent"""
196 196 return self.unstable() or self.bumped() or self.divergent()
197 197
198 198 def troubles(self):
199 199 """return the list of troubles affecting this changesets.
200 200
201 201 Troubles are returned as strings. possible values are:
202 202 - unstable,
203 203 - bumped,
204 204 - divergent.
205 205 """
206 206 troubles = []
207 207 if self.unstable():
208 208 troubles.append('unstable')
209 209 if self.bumped():
210 210 troubles.append('bumped')
211 211 if self.divergent():
212 212 troubles.append('divergent')
213 213 return troubles
214 214
215 215 def parents(self):
216 216 """return contexts for each parent changeset"""
217 217 return self._parents
218 218
219 219 def p1(self):
220 220 return self._parents[0]
221 221
222 222 def p2(self):
223 223 if len(self._parents) == 2:
224 224 return self._parents[1]
225 225 return changectx(self._repo, -1)
226 226
227 227 def _fileinfo(self, path):
228 228 if '_manifest' in self.__dict__:
229 229 try:
230 230 return self._manifest[path], self._manifest.flags(path)
231 231 except KeyError:
232 232 raise error.ManifestLookupError(self._node, path,
233 233 _('not found in manifest'))
234 234 if '_manifestdelta' in self.__dict__ or path in self.files():
235 235 if path in self._manifestdelta:
236 236 return (self._manifestdelta[path],
237 237 self._manifestdelta.flags(path))
238 238 node, flag = self._repo.manifest.find(self._changeset[0], path)
239 239 if not node:
240 240 raise error.ManifestLookupError(self._node, path,
241 241 _('not found in manifest'))
242 242
243 243 return node, flag
244 244
245 245 def filenode(self, path):
246 246 return self._fileinfo(path)[0]
247 247
248 248 def flags(self, path):
249 249 try:
250 250 return self._fileinfo(path)[1]
251 251 except error.LookupError:
252 252 return ''
253 253
254 254 def sub(self, path):
255 255 return subrepo.subrepo(self, path)
256 256
257 257 def match(self, pats=[], include=None, exclude=None, default='glob'):
258 258 r = self._repo
259 259 return matchmod.match(r.root, r.getcwd(), pats,
260 260 include, exclude, default,
261 261 auditor=r.auditor, ctx=self)
262 262
263 263 def diff(self, ctx2=None, match=None, **opts):
264 264 """Returns a diff generator for the given contexts and matcher"""
265 265 if ctx2 is None:
266 266 ctx2 = self.p1()
267 267 if ctx2 is not None:
268 268 ctx2 = self._repo[ctx2]
269 269 diffopts = patch.diffopts(self._repo.ui, opts)
270 270 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
271 271
272 272 @propertycache
273 273 def _dirs(self):
274 274 return scmutil.dirs(self._manifest)
275 275
276 276 def dirs(self):
277 277 return self._dirs
278 278
279 279 def dirty(self, missing=False, merge=True, branch=True):
280 280 return False
281 281
282 282 def status(self, other=None, match=None, listignored=False,
283 283 listclean=False, listunknown=False, listsubrepos=False):
284 284 """return status of files between two nodes or node and working
285 285 directory.
286 286
287 287 If other is None, compare this node with working directory.
288 288
289 289 returns (modified, added, removed, deleted, unknown, ignored, clean)
290 290 """
291 291
292 292 ctx1 = self
293 293 ctx2 = self._repo[other]
294 294
295 295 # This next code block is, admittedly, fragile logic that tests for
296 296 # reversing the contexts and wouldn't need to exist if it weren't for
297 297 # the fast (and common) code path of comparing the working directory
298 298 # with its first parent.
299 299 #
300 300 # What we're aiming for here is the ability to call:
301 301 #
302 302 # workingctx.status(parentctx)
303 303 #
304 304 # If we always built the manifest for each context and compared those,
305 305 # then we'd be done. But the special case of the above call means we
306 306 # just copy the manifest of the parent.
307 307 reversed = False
308 308 if (not isinstance(ctx1, changectx)
309 309 and isinstance(ctx2, changectx)):
310 310 reversed = True
311 311 ctx1, ctx2 = ctx2, ctx1
312 312
313 313 r = [[], [], [], [], [], [], []]
314 314 match = ctx2._matchstatus(ctx1, r, match, listignored, listclean,
315 315 listunknown)
316 316 r = ctx2._prestatus(ctx1, r, match, listignored, listclean, listunknown)
317 317 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
318 318 listunknown)
319 319 r = ctx2._poststatus(ctx1, r, match, listignored, listclean,
320 320 listunknown)
321 321
322 322 if reversed:
323 323 # reverse added and removed
324 324 r[1], r[2] = r[2], r[1]
325 325
326 326 if listsubrepos:
327 327 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
328 328 rev2 = ctx2.subrev(subpath)
329 329 try:
330 330 submatch = matchmod.narrowmatcher(subpath, match)
331 331 s = sub.status(rev2, match=submatch, ignored=listignored,
332 332 clean=listclean, unknown=listunknown,
333 333 listsubrepos=True)
334 334 for rfiles, sfiles in zip(r, s):
335 335 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
336 336 except error.LookupError:
337 337 self._repo.ui.status(_("skipping missing "
338 338 "subrepository: %s\n") % subpath)
339 339
340 340 for l in r:
341 341 l.sort()
342 342
343 343 # we return a tuple to signify that this list isn't changing
344 return tuple(r)
344 return scmutil.status(*r)
345 345
346 346
347 347 def makememctx(repo, parents, text, user, date, branch, files, store,
348 348 editor=None):
349 349 def getfilectx(repo, memctx, path):
350 350 data, mode, copied = store.getfile(path)
351 351 if data is None:
352 352 return None
353 353 islink, isexec = mode
354 354 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
355 355 copied=copied, memctx=memctx)
356 356 extra = {}
357 357 if branch:
358 358 extra['branch'] = encoding.fromlocal(branch)
359 359 ctx = memctx(repo, parents, text, files, getfilectx, user,
360 360 date, extra, editor)
361 361 return ctx
362 362
363 363 class changectx(basectx):
364 364 """A changecontext object makes access to data related to a particular
365 365 changeset convenient. It represents a read-only context already present in
366 366 the repo."""
367 367 def __init__(self, repo, changeid=''):
368 368 """changeid is a revision number, node, or tag"""
369 369
370 370 # since basectx.__new__ already took care of copying the object, we
371 371 # don't need to do anything in __init__, so we just exit here
372 372 if isinstance(changeid, basectx):
373 373 return
374 374
375 375 if changeid == '':
376 376 changeid = '.'
377 377 self._repo = repo
378 378
379 379 if isinstance(changeid, int):
380 380 try:
381 381 self._node = repo.changelog.node(changeid)
382 382 except IndexError:
383 383 raise error.RepoLookupError(
384 384 _("unknown revision '%s'") % changeid)
385 385 self._rev = changeid
386 386 return
387 387 if isinstance(changeid, long):
388 388 changeid = str(changeid)
389 389 if changeid == '.':
390 390 self._node = repo.dirstate.p1()
391 391 self._rev = repo.changelog.rev(self._node)
392 392 return
393 393 if changeid == 'null':
394 394 self._node = nullid
395 395 self._rev = nullrev
396 396 return
397 397 if changeid == 'tip':
398 398 self._node = repo.changelog.tip()
399 399 self._rev = repo.changelog.rev(self._node)
400 400 return
401 401 if len(changeid) == 20:
402 402 try:
403 403 self._node = changeid
404 404 self._rev = repo.changelog.rev(changeid)
405 405 return
406 406 except LookupError:
407 407 pass
408 408
409 409 try:
410 410 r = int(changeid)
411 411 if str(r) != changeid:
412 412 raise ValueError
413 413 l = len(repo.changelog)
414 414 if r < 0:
415 415 r += l
416 416 if r < 0 or r >= l:
417 417 raise ValueError
418 418 self._rev = r
419 419 self._node = repo.changelog.node(r)
420 420 return
421 421 except (ValueError, OverflowError, IndexError):
422 422 pass
423 423
424 424 if len(changeid) == 40:
425 425 try:
426 426 self._node = bin(changeid)
427 427 self._rev = repo.changelog.rev(self._node)
428 428 return
429 429 except (TypeError, LookupError):
430 430 pass
431 431
432 432 if changeid in repo._bookmarks:
433 433 self._node = repo._bookmarks[changeid]
434 434 self._rev = repo.changelog.rev(self._node)
435 435 return
436 436 if changeid in repo._tagscache.tags:
437 437 self._node = repo._tagscache.tags[changeid]
438 438 self._rev = repo.changelog.rev(self._node)
439 439 return
440 440 try:
441 441 self._node = repo.branchtip(changeid)
442 442 self._rev = repo.changelog.rev(self._node)
443 443 return
444 444 except error.RepoLookupError:
445 445 pass
446 446
447 447 self._node = repo.changelog._partialmatch(changeid)
448 448 if self._node is not None:
449 449 self._rev = repo.changelog.rev(self._node)
450 450 return
451 451
452 452 # lookup failed
453 453 # check if it might have come from damaged dirstate
454 454 #
455 455 # XXX we could avoid the unfiltered if we had a recognizable exception
456 456 # for filtered changeset access
457 457 if changeid in repo.unfiltered().dirstate.parents():
458 458 raise error.Abort(_("working directory has unknown parent '%s'!")
459 459 % short(changeid))
460 460 try:
461 461 if len(changeid) == 20:
462 462 changeid = hex(changeid)
463 463 except TypeError:
464 464 pass
465 465 raise error.RepoLookupError(
466 466 _("unknown revision '%s'") % changeid)
467 467
468 468 def __hash__(self):
469 469 try:
470 470 return hash(self._rev)
471 471 except AttributeError:
472 472 return id(self)
473 473
474 474 def __nonzero__(self):
475 475 return self._rev != nullrev
476 476
477 477 @propertycache
478 478 def _changeset(self):
479 479 return self._repo.changelog.read(self.rev())
480 480
481 481 @propertycache
482 482 def _manifest(self):
483 483 return self._repo.manifest.read(self._changeset[0])
484 484
485 485 @propertycache
486 486 def _manifestdelta(self):
487 487 return self._repo.manifest.readdelta(self._changeset[0])
488 488
489 489 @propertycache
490 490 def _parents(self):
491 491 p = self._repo.changelog.parentrevs(self._rev)
492 492 if p[1] == nullrev:
493 493 p = p[:-1]
494 494 return [changectx(self._repo, x) for x in p]
495 495
496 496 def changeset(self):
497 497 return self._changeset
498 498 def manifestnode(self):
499 499 return self._changeset[0]
500 500
501 501 def user(self):
502 502 return self._changeset[1]
503 503 def date(self):
504 504 return self._changeset[2]
505 505 def files(self):
506 506 return self._changeset[3]
507 507 def description(self):
508 508 return self._changeset[4]
509 509 def branch(self):
510 510 return encoding.tolocal(self._changeset[5].get("branch"))
511 511 def closesbranch(self):
512 512 return 'close' in self._changeset[5]
513 513 def extra(self):
514 514 return self._changeset[5]
515 515 def tags(self):
516 516 return self._repo.nodetags(self._node)
517 517 def bookmarks(self):
518 518 return self._repo.nodebookmarks(self._node)
519 519 def phase(self):
520 520 return self._repo._phasecache.phase(self._repo, self._rev)
521 521 def hidden(self):
522 522 return self._rev in repoview.filterrevs(self._repo, 'visible')
523 523
524 524 def children(self):
525 525 """return contexts for each child changeset"""
526 526 c = self._repo.changelog.children(self._node)
527 527 return [changectx(self._repo, x) for x in c]
528 528
529 529 def ancestors(self):
530 530 for a in self._repo.changelog.ancestors([self._rev]):
531 531 yield changectx(self._repo, a)
532 532
533 533 def descendants(self):
534 534 for d in self._repo.changelog.descendants([self._rev]):
535 535 yield changectx(self._repo, d)
536 536
537 537 def filectx(self, path, fileid=None, filelog=None):
538 538 """get a file context from this changeset"""
539 539 if fileid is None:
540 540 fileid = self.filenode(path)
541 541 return filectx(self._repo, path, fileid=fileid,
542 542 changectx=self, filelog=filelog)
543 543
544 544 def ancestor(self, c2, warn=False):
545 545 """return the "best" ancestor context of self and c2
546 546
547 547 If there are multiple candidates, it will show a message and check
548 548 merge.preferancestor configuration before falling back to the
549 549 revlog ancestor."""
550 550 # deal with workingctxs
551 551 n2 = c2._node
552 552 if n2 is None:
553 553 n2 = c2._parents[0]._node
554 554 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
555 555 if not cahs:
556 556 anc = nullid
557 557 elif len(cahs) == 1:
558 558 anc = cahs[0]
559 559 else:
560 560 for r in self._repo.ui.configlist('merge', 'preferancestor'):
561 561 try:
562 562 ctx = changectx(self._repo, r)
563 563 except error.RepoLookupError:
564 564 continue
565 565 anc = ctx.node()
566 566 if anc in cahs:
567 567 break
568 568 else:
569 569 anc = self._repo.changelog.ancestor(self._node, n2)
570 570 if warn:
571 571 self._repo.ui.status(
572 572 (_("note: using %s as ancestor of %s and %s\n") %
573 573 (short(anc), short(self._node), short(n2))) +
574 574 ''.join(_(" alternatively, use --config "
575 575 "merge.preferancestor=%s\n") %
576 576 short(n) for n in sorted(cahs) if n != anc))
577 577 return changectx(self._repo, anc)
578 578
579 579 def descendant(self, other):
580 580 """True if other is descendant of this changeset"""
581 581 return self._repo.changelog.descendant(self._rev, other._rev)
582 582
583 583 def walk(self, match):
584 584 fset = set(match.files())
585 585 # for dirstate.walk, files=['.'] means "walk the whole tree".
586 586 # follow that here, too
587 587 fset.discard('.')
588 588
589 589 # avoid the entire walk if we're only looking for specific files
590 590 if fset and not match.anypats():
591 591 if util.all([fn in self for fn in fset]):
592 592 for fn in sorted(fset):
593 593 if match(fn):
594 594 yield fn
595 595 raise StopIteration
596 596
597 597 for fn in self:
598 598 if fn in fset:
599 599 # specified pattern is the exact name
600 600 fset.remove(fn)
601 601 if match(fn):
602 602 yield fn
603 603 for fn in sorted(fset):
604 604 if fn in self._dirs:
605 605 # specified pattern is a directory
606 606 continue
607 607 match.bad(fn, _('no such file in rev %s') % self)
608 608
609 609 def matches(self, match):
610 610 return self.walk(match)
611 611
612 612 class basefilectx(object):
613 613 """A filecontext object represents the common logic for its children:
614 614 filectx: read-only access to a filerevision that is already present
615 615 in the repo,
616 616 workingfilectx: a filecontext that represents files from the working
617 617 directory,
618 618 memfilectx: a filecontext that represents files in-memory."""
619 619 def __new__(cls, repo, path, *args, **kwargs):
620 620 return super(basefilectx, cls).__new__(cls)
621 621
622 622 @propertycache
623 623 def _filelog(self):
624 624 return self._repo.file(self._path)
625 625
626 626 @propertycache
627 627 def _changeid(self):
628 628 if '_changeid' in self.__dict__:
629 629 return self._changeid
630 630 elif '_changectx' in self.__dict__:
631 631 return self._changectx.rev()
632 632 else:
633 633 return self._filelog.linkrev(self._filerev)
634 634
635 635 @propertycache
636 636 def _filenode(self):
637 637 if '_fileid' in self.__dict__:
638 638 return self._filelog.lookup(self._fileid)
639 639 else:
640 640 return self._changectx.filenode(self._path)
641 641
642 642 @propertycache
643 643 def _filerev(self):
644 644 return self._filelog.rev(self._filenode)
645 645
646 646 @propertycache
647 647 def _repopath(self):
648 648 return self._path
649 649
650 650 def __nonzero__(self):
651 651 try:
652 652 self._filenode
653 653 return True
654 654 except error.LookupError:
655 655 # file is missing
656 656 return False
657 657
658 658 def __str__(self):
659 659 return "%s@%s" % (self.path(), self._changectx)
660 660
661 661 def __repr__(self):
662 662 return "<%s %s>" % (type(self).__name__, str(self))
663 663
664 664 def __hash__(self):
665 665 try:
666 666 return hash((self._path, self._filenode))
667 667 except AttributeError:
668 668 return id(self)
669 669
670 670 def __eq__(self, other):
671 671 try:
672 672 return (type(self) == type(other) and self._path == other._path
673 673 and self._filenode == other._filenode)
674 674 except AttributeError:
675 675 return False
676 676
677 677 def __ne__(self, other):
678 678 return not (self == other)
679 679
680 680 def filerev(self):
681 681 return self._filerev
682 682 def filenode(self):
683 683 return self._filenode
684 684 def flags(self):
685 685 return self._changectx.flags(self._path)
686 686 def filelog(self):
687 687 return self._filelog
688 688 def rev(self):
689 689 return self._changeid
690 690 def linkrev(self):
691 691 return self._filelog.linkrev(self._filerev)
692 692 def node(self):
693 693 return self._changectx.node()
694 694 def hex(self):
695 695 return self._changectx.hex()
696 696 def user(self):
697 697 return self._changectx.user()
698 698 def date(self):
699 699 return self._changectx.date()
700 700 def files(self):
701 701 return self._changectx.files()
702 702 def description(self):
703 703 return self._changectx.description()
704 704 def branch(self):
705 705 return self._changectx.branch()
706 706 def extra(self):
707 707 return self._changectx.extra()
708 708 def phase(self):
709 709 return self._changectx.phase()
710 710 def phasestr(self):
711 711 return self._changectx.phasestr()
712 712 def manifest(self):
713 713 return self._changectx.manifest()
714 714 def changectx(self):
715 715 return self._changectx
716 716
717 717 def path(self):
718 718 return self._path
719 719
720 720 def isbinary(self):
721 721 try:
722 722 return util.binary(self.data())
723 723 except IOError:
724 724 return False
725 725 def isexec(self):
726 726 return 'x' in self.flags()
727 727 def islink(self):
728 728 return 'l' in self.flags()
729 729
730 730 def cmp(self, fctx):
731 731 """compare with other file context
732 732
733 733 returns True if different than fctx.
734 734 """
735 735 if (fctx._filerev is None
736 736 and (self._repo._encodefilterpats
737 737 # if file data starts with '\1\n', empty metadata block is
738 738 # prepended, which adds 4 bytes to filelog.size().
739 739 or self.size() - 4 == fctx.size())
740 740 or self.size() == fctx.size()):
741 741 return self._filelog.cmp(self._filenode, fctx.data())
742 742
743 743 return True
744 744
745 745 def parents(self):
746 746 _path = self._path
747 747 fl = self._filelog
748 748 pl = [(_path, n, fl) for n in self._filelog.parents(self._filenode)]
749 749
750 750 r = self._filelog.renamed(self._filenode)
751 751 if r:
752 752 pl[0] = (r[0], r[1], None)
753 753
754 754 return [filectx(self._repo, p, fileid=n, filelog=l)
755 755 for p, n, l in pl if n != nullid]
756 756
757 757 def p1(self):
758 758 return self.parents()[0]
759 759
760 760 def p2(self):
761 761 p = self.parents()
762 762 if len(p) == 2:
763 763 return p[1]
764 764 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
765 765
766 766 def annotate(self, follow=False, linenumber=None, diffopts=None):
767 767 '''returns a list of tuples of (ctx, line) for each line
768 768 in the file, where ctx is the filectx of the node where
769 769 that line was last changed.
770 770 This returns tuples of ((ctx, linenumber), line) for each line,
771 771 if "linenumber" parameter is NOT "None".
772 772 In such tuples, linenumber means one at the first appearance
773 773 in the managed file.
774 774 To reduce annotation cost,
775 775 this returns fixed value(False is used) as linenumber,
776 776 if "linenumber" parameter is "False".'''
777 777
778 778 if linenumber is None:
779 779 def decorate(text, rev):
780 780 return ([rev] * len(text.splitlines()), text)
781 781 elif linenumber:
782 782 def decorate(text, rev):
783 783 size = len(text.splitlines())
784 784 return ([(rev, i) for i in xrange(1, size + 1)], text)
785 785 else:
786 786 def decorate(text, rev):
787 787 return ([(rev, False)] * len(text.splitlines()), text)
788 788
789 789 def pair(parent, child):
790 790 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
791 791 refine=True)
792 792 for (a1, a2, b1, b2), t in blocks:
793 793 # Changed blocks ('!') or blocks made only of blank lines ('~')
794 794 # belong to the child.
795 795 if t == '=':
796 796 child[0][b1:b2] = parent[0][a1:a2]
797 797 return child
798 798
799 799 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
800 800
801 801 def parents(f):
802 802 pl = f.parents()
803 803
804 804 # Don't return renamed parents if we aren't following.
805 805 if not follow:
806 806 pl = [p for p in pl if p.path() == f.path()]
807 807
808 808 # renamed filectx won't have a filelog yet, so set it
809 809 # from the cache to save time
810 810 for p in pl:
811 811 if not '_filelog' in p.__dict__:
812 812 p._filelog = getlog(p.path())
813 813
814 814 return pl
815 815
816 816 # use linkrev to find the first changeset where self appeared
817 817 if self.rev() != self.linkrev():
818 818 base = self.filectx(self.filenode())
819 819 else:
820 820 base = self
821 821
822 822 # This algorithm would prefer to be recursive, but Python is a
823 823 # bit recursion-hostile. Instead we do an iterative
824 824 # depth-first search.
825 825
826 826 visit = [base]
827 827 hist = {}
828 828 pcache = {}
829 829 needed = {base: 1}
830 830 while visit:
831 831 f = visit[-1]
832 832 pcached = f in pcache
833 833 if not pcached:
834 834 pcache[f] = parents(f)
835 835
836 836 ready = True
837 837 pl = pcache[f]
838 838 for p in pl:
839 839 if p not in hist:
840 840 ready = False
841 841 visit.append(p)
842 842 if not pcached:
843 843 needed[p] = needed.get(p, 0) + 1
844 844 if ready:
845 845 visit.pop()
846 846 reusable = f in hist
847 847 if reusable:
848 848 curr = hist[f]
849 849 else:
850 850 curr = decorate(f.data(), f)
851 851 for p in pl:
852 852 if not reusable:
853 853 curr = pair(hist[p], curr)
854 854 if needed[p] == 1:
855 855 del hist[p]
856 856 del needed[p]
857 857 else:
858 858 needed[p] -= 1
859 859
860 860 hist[f] = curr
861 861 pcache[f] = []
862 862
863 863 return zip(hist[base][0], hist[base][1].splitlines(True))
864 864
865 865 def ancestors(self, followfirst=False):
866 866 visit = {}
867 867 c = self
868 868 cut = followfirst and 1 or None
869 869 while True:
870 870 for parent in c.parents()[:cut]:
871 871 visit[(parent.rev(), parent.node())] = parent
872 872 if not visit:
873 873 break
874 874 c = visit.pop(max(visit))
875 875 yield c
876 876
877 877 class filectx(basefilectx):
878 878 """A filecontext object makes access to data related to a particular
879 879 filerevision convenient."""
880 880 def __init__(self, repo, path, changeid=None, fileid=None,
881 881 filelog=None, changectx=None):
882 882 """changeid can be a changeset revision, node, or tag.
883 883 fileid can be a file revision or node."""
884 884 self._repo = repo
885 885 self._path = path
886 886
887 887 assert (changeid is not None
888 888 or fileid is not None
889 889 or changectx is not None), \
890 890 ("bad args: changeid=%r, fileid=%r, changectx=%r"
891 891 % (changeid, fileid, changectx))
892 892
893 893 if filelog is not None:
894 894 self._filelog = filelog
895 895
896 896 if changeid is not None:
897 897 self._changeid = changeid
898 898 if changectx is not None:
899 899 self._changectx = changectx
900 900 if fileid is not None:
901 901 self._fileid = fileid
902 902
903 903 @propertycache
904 904 def _changectx(self):
905 905 try:
906 906 return changectx(self._repo, self._changeid)
907 907 except error.RepoLookupError:
908 908 # Linkrev may point to any revision in the repository. When the
909 909 # repository is filtered this may lead to `filectx` trying to build
910 910 # `changectx` for filtered revision. In such case we fallback to
911 911 # creating `changectx` on the unfiltered version of the reposition.
912 912 # This fallback should not be an issue because `changectx` from
913 913 # `filectx` are not used in complex operations that care about
914 914 # filtering.
915 915 #
916 916 # This fallback is a cheap and dirty fix that prevent several
917 917 # crashes. It does not ensure the behavior is correct. However the
918 918 # behavior was not correct before filtering either and "incorrect
919 919 # behavior" is seen as better as "crash"
920 920 #
921 921 # Linkrevs have several serious troubles with filtering that are
922 922 # complicated to solve. Proper handling of the issue here should be
923 923 # considered when solving linkrev issue are on the table.
924 924 return changectx(self._repo.unfiltered(), self._changeid)
925 925
926 926 def filectx(self, fileid):
927 927 '''opens an arbitrary revision of the file without
928 928 opening a new filelog'''
929 929 return filectx(self._repo, self._path, fileid=fileid,
930 930 filelog=self._filelog)
931 931
932 932 def data(self):
933 933 return self._filelog.read(self._filenode)
934 934 def size(self):
935 935 return self._filelog.size(self._filerev)
936 936
937 937 def renamed(self):
938 938 """check if file was actually renamed in this changeset revision
939 939
940 940 If rename logged in file revision, we report copy for changeset only
941 941 if file revisions linkrev points back to the changeset in question
942 942 or both changeset parents contain different file revisions.
943 943 """
944 944
945 945 renamed = self._filelog.renamed(self._filenode)
946 946 if not renamed:
947 947 return renamed
948 948
949 949 if self.rev() == self.linkrev():
950 950 return renamed
951 951
952 952 name = self.path()
953 953 fnode = self._filenode
954 954 for p in self._changectx.parents():
955 955 try:
956 956 if fnode == p.filenode(name):
957 957 return None
958 958 except error.LookupError:
959 959 pass
960 960 return renamed
961 961
962 962 def children(self):
963 963 # hard for renames
964 964 c = self._filelog.children(self._filenode)
965 965 return [filectx(self._repo, self._path, fileid=x,
966 966 filelog=self._filelog) for x in c]
967 967
968 968 class committablectx(basectx):
969 969 """A committablectx object provides common functionality for a context that
970 970 wants the ability to commit, e.g. workingctx or memctx."""
971 971 def __init__(self, repo, text="", user=None, date=None, extra=None,
972 972 changes=None):
973 973 self._repo = repo
974 974 self._rev = None
975 975 self._node = None
976 976 self._text = text
977 977 if date:
978 978 self._date = util.parsedate(date)
979 979 if user:
980 980 self._user = user
981 981 if changes:
982 982 self._status = changes
983 983
984 984 self._extra = {}
985 985 if extra:
986 986 self._extra = extra.copy()
987 987 if 'branch' not in self._extra:
988 988 try:
989 989 branch = encoding.fromlocal(self._repo.dirstate.branch())
990 990 except UnicodeDecodeError:
991 991 raise util.Abort(_('branch name not in UTF-8!'))
992 992 self._extra['branch'] = branch
993 993 if self._extra['branch'] == '':
994 994 self._extra['branch'] = 'default'
995 995
996 996 def __str__(self):
997 997 return str(self._parents[0]) + "+"
998 998
999 999 def __nonzero__(self):
1000 1000 return True
1001 1001
1002 1002 def _buildflagfunc(self):
1003 1003 # Create a fallback function for getting file flags when the
1004 1004 # filesystem doesn't support them
1005 1005
1006 1006 copiesget = self._repo.dirstate.copies().get
1007 1007
1008 1008 if len(self._parents) < 2:
1009 1009 # when we have one parent, it's easy: copy from parent
1010 1010 man = self._parents[0].manifest()
1011 1011 def func(f):
1012 1012 f = copiesget(f, f)
1013 1013 return man.flags(f)
1014 1014 else:
1015 1015 # merges are tricky: we try to reconstruct the unstored
1016 1016 # result from the merge (issue1802)
1017 1017 p1, p2 = self._parents
1018 1018 pa = p1.ancestor(p2)
1019 1019 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1020 1020
1021 1021 def func(f):
1022 1022 f = copiesget(f, f) # may be wrong for merges with copies
1023 1023 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1024 1024 if fl1 == fl2:
1025 1025 return fl1
1026 1026 if fl1 == fla:
1027 1027 return fl2
1028 1028 if fl2 == fla:
1029 1029 return fl1
1030 1030 return '' # punt for conflicts
1031 1031
1032 1032 return func
1033 1033
1034 1034 @propertycache
1035 1035 def _flagfunc(self):
1036 1036 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1037 1037
1038 1038 @propertycache
1039 1039 def _manifest(self):
1040 1040 """generate a manifest corresponding to the values in self._status"""
1041 1041
1042 1042 man = self._parents[0].manifest().copy()
1043 1043 if len(self._parents) > 1:
1044 1044 man2 = self.p2().manifest()
1045 1045 def getman(f):
1046 1046 if f in man:
1047 1047 return man
1048 1048 return man2
1049 1049 else:
1050 1050 getman = lambda f: man
1051 1051
1052 1052 copied = self._repo.dirstate.copies()
1053 1053 ff = self._flagfunc
1054 1054 modified, added, removed, deleted = self._status[:4]
1055 1055 for i, l in (("a", added), ("m", modified)):
1056 1056 for f in l:
1057 1057 orig = copied.get(f, f)
1058 1058 man[f] = getman(orig).get(orig, nullid) + i
1059 1059 try:
1060 1060 man.set(f, ff(f))
1061 1061 except OSError:
1062 1062 pass
1063 1063
1064 1064 for f in deleted + removed:
1065 1065 if f in man:
1066 1066 del man[f]
1067 1067
1068 1068 return man
1069 1069
1070 1070 @propertycache
1071 1071 def _status(self):
1072 1072 return self._repo.status()
1073 1073
1074 1074 @propertycache
1075 1075 def _user(self):
1076 1076 return self._repo.ui.username()
1077 1077
1078 1078 @propertycache
1079 1079 def _date(self):
1080 1080 return util.makedate()
1081 1081
1082 1082 def subrev(self, subpath):
1083 1083 return None
1084 1084
1085 1085 def user(self):
1086 1086 return self._user or self._repo.ui.username()
1087 1087 def date(self):
1088 1088 return self._date
1089 1089 def description(self):
1090 1090 return self._text
1091 1091 def files(self):
1092 1092 return sorted(self._status[0] + self._status[1] + self._status[2])
1093 1093
1094 1094 def modified(self):
1095 1095 return self._status[0]
1096 1096 def added(self):
1097 1097 return self._status[1]
1098 1098 def removed(self):
1099 1099 return self._status[2]
1100 1100 def deleted(self):
1101 1101 return self._status[3]
1102 1102 def unknown(self):
1103 1103 return self._status[4]
1104 1104 def ignored(self):
1105 1105 return self._status[5]
1106 1106 def clean(self):
1107 1107 return self._status[6]
1108 1108 def branch(self):
1109 1109 return encoding.tolocal(self._extra['branch'])
1110 1110 def closesbranch(self):
1111 1111 return 'close' in self._extra
1112 1112 def extra(self):
1113 1113 return self._extra
1114 1114
1115 1115 def tags(self):
1116 1116 t = []
1117 1117 for p in self.parents():
1118 1118 t.extend(p.tags())
1119 1119 return t
1120 1120
1121 1121 def bookmarks(self):
1122 1122 b = []
1123 1123 for p in self.parents():
1124 1124 b.extend(p.bookmarks())
1125 1125 return b
1126 1126
1127 1127 def phase(self):
1128 1128 phase = phases.draft # default phase to draft
1129 1129 for p in self.parents():
1130 1130 phase = max(phase, p.phase())
1131 1131 return phase
1132 1132
1133 1133 def hidden(self):
1134 1134 return False
1135 1135
1136 1136 def children(self):
1137 1137 return []
1138 1138
1139 1139 def flags(self, path):
1140 1140 if '_manifest' in self.__dict__:
1141 1141 try:
1142 1142 return self._manifest.flags(path)
1143 1143 except KeyError:
1144 1144 return ''
1145 1145
1146 1146 try:
1147 1147 return self._flagfunc(path)
1148 1148 except OSError:
1149 1149 return ''
1150 1150
1151 1151 def ancestor(self, c2):
1152 1152 """return the "best" ancestor context of self and c2"""
1153 1153 return self._parents[0].ancestor(c2) # punt on two parents for now
1154 1154
1155 1155 def walk(self, match):
1156 1156 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1157 1157 True, False))
1158 1158
1159 1159 def matches(self, match):
1160 1160 return sorted(self._repo.dirstate.matches(match))
1161 1161
1162 1162 def ancestors(self):
1163 1163 for a in self._repo.changelog.ancestors(
1164 1164 [p.rev() for p in self._parents]):
1165 1165 yield changectx(self._repo, a)
1166 1166
1167 1167 def markcommitted(self, node):
1168 1168 """Perform post-commit cleanup necessary after committing this ctx
1169 1169
1170 1170 Specifically, this updates backing stores this working context
1171 1171 wraps to reflect the fact that the changes reflected by this
1172 1172 workingctx have been committed. For example, it marks
1173 1173 modified and added files as normal in the dirstate.
1174 1174
1175 1175 """
1176 1176
1177 1177 self._repo.dirstate.beginparentchange()
1178 1178 for f in self.modified() + self.added():
1179 1179 self._repo.dirstate.normal(f)
1180 1180 for f in self.removed():
1181 1181 self._repo.dirstate.drop(f)
1182 1182 self._repo.dirstate.setparents(node)
1183 1183 self._repo.dirstate.endparentchange()
1184 1184
1185 1185 def dirs(self):
1186 1186 return self._repo.dirstate.dirs()
1187 1187
1188 1188 class workingctx(committablectx):
1189 1189 """A workingctx object makes access to data related to
1190 1190 the current working directory convenient.
1191 1191 date - any valid date string or (unixtime, offset), or None.
1192 1192 user - username string, or None.
1193 1193 extra - a dictionary of extra values, or None.
1194 1194 changes - a list of file lists as returned by localrepo.status()
1195 1195 or None to use the repository status.
1196 1196 """
1197 1197 def __init__(self, repo, text="", user=None, date=None, extra=None,
1198 1198 changes=None):
1199 1199 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1200 1200
1201 1201 def __iter__(self):
1202 1202 d = self._repo.dirstate
1203 1203 for f in d:
1204 1204 if d[f] != 'r':
1205 1205 yield f
1206 1206
1207 1207 def __contains__(self, key):
1208 1208 return self._repo.dirstate[key] not in "?r"
1209 1209
1210 1210 @propertycache
1211 1211 def _parents(self):
1212 1212 p = self._repo.dirstate.parents()
1213 1213 if p[1] == nullid:
1214 1214 p = p[:-1]
1215 1215 return [changectx(self._repo, x) for x in p]
1216 1216
1217 1217 def filectx(self, path, filelog=None):
1218 1218 """get a file context from the working directory"""
1219 1219 return workingfilectx(self._repo, path, workingctx=self,
1220 1220 filelog=filelog)
1221 1221
1222 1222 def dirty(self, missing=False, merge=True, branch=True):
1223 1223 "check whether a working directory is modified"
1224 1224 # check subrepos first
1225 1225 for s in sorted(self.substate):
1226 1226 if self.sub(s).dirty():
1227 1227 return True
1228 1228 # check current working dir
1229 1229 return ((merge and self.p2()) or
1230 1230 (branch and self.branch() != self.p1().branch()) or
1231 1231 self.modified() or self.added() or self.removed() or
1232 1232 (missing and self.deleted()))
1233 1233
1234 1234 def add(self, list, prefix=""):
1235 1235 join = lambda f: os.path.join(prefix, f)
1236 1236 wlock = self._repo.wlock()
1237 1237 ui, ds = self._repo.ui, self._repo.dirstate
1238 1238 try:
1239 1239 rejected = []
1240 1240 lstat = self._repo.wvfs.lstat
1241 1241 for f in list:
1242 1242 scmutil.checkportable(ui, join(f))
1243 1243 try:
1244 1244 st = lstat(f)
1245 1245 except OSError:
1246 1246 ui.warn(_("%s does not exist!\n") % join(f))
1247 1247 rejected.append(f)
1248 1248 continue
1249 1249 if st.st_size > 10000000:
1250 1250 ui.warn(_("%s: up to %d MB of RAM may be required "
1251 1251 "to manage this file\n"
1252 1252 "(use 'hg revert %s' to cancel the "
1253 1253 "pending addition)\n")
1254 1254 % (f, 3 * st.st_size // 1000000, join(f)))
1255 1255 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1256 1256 ui.warn(_("%s not added: only files and symlinks "
1257 1257 "supported currently\n") % join(f))
1258 1258 rejected.append(f)
1259 1259 elif ds[f] in 'amn':
1260 1260 ui.warn(_("%s already tracked!\n") % join(f))
1261 1261 elif ds[f] == 'r':
1262 1262 ds.normallookup(f)
1263 1263 else:
1264 1264 ds.add(f)
1265 1265 return rejected
1266 1266 finally:
1267 1267 wlock.release()
1268 1268
1269 1269 def forget(self, files, prefix=""):
1270 1270 join = lambda f: os.path.join(prefix, f)
1271 1271 wlock = self._repo.wlock()
1272 1272 try:
1273 1273 rejected = []
1274 1274 for f in files:
1275 1275 if f not in self._repo.dirstate:
1276 1276 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1277 1277 rejected.append(f)
1278 1278 elif self._repo.dirstate[f] != 'a':
1279 1279 self._repo.dirstate.remove(f)
1280 1280 else:
1281 1281 self._repo.dirstate.drop(f)
1282 1282 return rejected
1283 1283 finally:
1284 1284 wlock.release()
1285 1285
1286 1286 def undelete(self, list):
1287 1287 pctxs = self.parents()
1288 1288 wlock = self._repo.wlock()
1289 1289 try:
1290 1290 for f in list:
1291 1291 if self._repo.dirstate[f] != 'r':
1292 1292 self._repo.ui.warn(_("%s not removed!\n") % f)
1293 1293 else:
1294 1294 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1295 1295 t = fctx.data()
1296 1296 self._repo.wwrite(f, t, fctx.flags())
1297 1297 self._repo.dirstate.normal(f)
1298 1298 finally:
1299 1299 wlock.release()
1300 1300
1301 1301 def copy(self, source, dest):
1302 1302 try:
1303 1303 st = self._repo.wvfs.lstat(dest)
1304 1304 except OSError, err:
1305 1305 if err.errno != errno.ENOENT:
1306 1306 raise
1307 1307 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1308 1308 return
1309 1309 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1310 1310 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1311 1311 "symbolic link\n") % dest)
1312 1312 else:
1313 1313 wlock = self._repo.wlock()
1314 1314 try:
1315 1315 if self._repo.dirstate[dest] in '?r':
1316 1316 self._repo.dirstate.add(dest)
1317 1317 self._repo.dirstate.copy(source, dest)
1318 1318 finally:
1319 1319 wlock.release()
1320 1320
1321 1321 def _filtersuspectsymlink(self, files):
1322 1322 if not files or self._repo.dirstate._checklink:
1323 1323 return files
1324 1324
1325 1325 # Symlink placeholders may get non-symlink-like contents
1326 1326 # via user error or dereferencing by NFS or Samba servers,
1327 1327 # so we filter out any placeholders that don't look like a
1328 1328 # symlink
1329 1329 sane = []
1330 1330 for f in files:
1331 1331 if self.flags(f) == 'l':
1332 1332 d = self[f].data()
1333 1333 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1334 1334 self._repo.ui.debug('ignoring suspect symlink placeholder'
1335 1335 ' "%s"\n' % f)
1336 1336 continue
1337 1337 sane.append(f)
1338 1338 return sane
1339 1339
1340 1340 def _checklookup(self, files):
1341 1341 # check for any possibly clean files
1342 1342 if not files:
1343 1343 return [], []
1344 1344
1345 1345 modified = []
1346 1346 fixup = []
1347 1347 pctx = self._parents[0]
1348 1348 # do a full compare of any files that might have changed
1349 1349 for f in sorted(files):
1350 1350 if (f not in pctx or self.flags(f) != pctx.flags(f)
1351 1351 or pctx[f].cmp(self[f])):
1352 1352 modified.append(f)
1353 1353 else:
1354 1354 fixup.append(f)
1355 1355
1356 1356 # update dirstate for files that are actually clean
1357 1357 if fixup:
1358 1358 try:
1359 1359 # updating the dirstate is optional
1360 1360 # so we don't wait on the lock
1361 1361 # wlock can invalidate the dirstate, so cache normal _after_
1362 1362 # taking the lock
1363 1363 wlock = self._repo.wlock(False)
1364 1364 normal = self._repo.dirstate.normal
1365 1365 try:
1366 1366 for f in fixup:
1367 1367 normal(f)
1368 1368 finally:
1369 1369 wlock.release()
1370 1370 except error.LockError:
1371 1371 pass
1372 1372 return modified, fixup
1373 1373
1374 1374 def _manifestmatches(self, match, s):
1375 1375 """Slow path for workingctx
1376 1376
1377 1377 The fast path is when we compare the working directory to its parent
1378 1378 which means this function is comparing with a non-parent; therefore we
1379 1379 need to build a manifest and return what matches.
1380 1380 """
1381 1381 mf = self._repo['.']._manifestmatches(match, s)
1382 1382 modified, added, removed = s[0:3]
1383 1383 for f in modified + added:
1384 1384 mf[f] = None
1385 1385 mf.set(f, self.flags(f))
1386 1386 for f in removed:
1387 1387 if f in mf:
1388 1388 del mf[f]
1389 1389 return mf
1390 1390
1391 1391 def _prestatus(self, other, s, match, listignored, listclean, listunknown):
1392 1392 """override the parent hook with a dirstate query
1393 1393
1394 1394 We use this prestatus hook to populate the status with information from
1395 1395 the dirstate.
1396 1396 """
1397 1397 # doesn't need to call super; if that changes, be aware that super
1398 1398 # calls self.manifest which would slow down the common case of calling
1399 1399 # status against a workingctx's parent
1400 1400 return self._dirstatestatus(match, listignored, listclean, listunknown)
1401 1401
1402 1402 def _poststatus(self, other, s, match, listignored, listclean, listunknown):
1403 1403 """override the parent hook with a filter for suspect symlinks
1404 1404
1405 1405 We use this poststatus hook to filter out symlinks that might have
1406 1406 accidentally ended up with the entire contents of the file they are
1407 1407 susposed to be linking to.
1408 1408 """
1409 1409 s[0] = self._filtersuspectsymlink(s[0])
1410 1410 self._status = s[:]
1411 1411 return s
1412 1412
1413 1413 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1414 1414 unknown=False):
1415 1415 '''Gets the status from the dirstate -- internal use only.'''
1416 1416 listignored, listclean, listunknown = ignored, clean, unknown
1417 1417 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1418 1418 subrepos = []
1419 1419 if '.hgsub' in self:
1420 1420 subrepos = sorted(self.substate)
1421 1421 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1422 1422 listclean, listunknown)
1423 1423 modified, added, removed, deleted, unknown, ignored, clean = s
1424 1424
1425 1425 # check for any possibly clean files
1426 1426 if cmp:
1427 1427 modified2, fixup = self._checklookup(cmp)
1428 1428 modified += modified2
1429 1429
1430 1430 # update dirstate for files that are actually clean
1431 1431 if fixup and listclean:
1432 1432 clean += fixup
1433 1433
1434 1434 return [modified, added, removed, deleted, unknown, ignored, clean]
1435 1435
1436 1436 def _buildstatus(self, other, s, match, listignored, listclean,
1437 1437 listunknown):
1438 1438 """build a status with respect to another context
1439 1439
1440 1440 This includes logic for maintaining the fast path of status when
1441 1441 comparing the working directory against its parent, which is to skip
1442 1442 building a new manifest if self (working directory) is not comparing
1443 1443 against its parent (repo['.']).
1444 1444 """
1445 1445 if other != self._repo['.']:
1446 1446 s = super(workingctx, self)._buildstatus(other, s, match,
1447 1447 listignored, listclean,
1448 1448 listunknown)
1449 1449 return s
1450 1450
1451 1451 def _matchstatus(self, other, s, match, listignored, listclean,
1452 1452 listunknown):
1453 1453 """override the match method with a filter for directory patterns
1454 1454
1455 1455 We use inheritance to customize the match.bad method only in cases of
1456 1456 workingctx since it belongs only to the working directory when
1457 1457 comparing against the parent changeset.
1458 1458
1459 1459 If we aren't comparing against the working directory's parent, then we
1460 1460 just use the default match object sent to us.
1461 1461 """
1462 1462 superself = super(workingctx, self)
1463 1463 match = superself._matchstatus(other, s, match, listignored, listclean,
1464 1464 listunknown)
1465 1465 if other != self._repo['.']:
1466 1466 def bad(f, msg):
1467 1467 # 'f' may be a directory pattern from 'match.files()',
1468 1468 # so 'f not in ctx1' is not enough
1469 1469 if f not in other and f not in other.dirs():
1470 1470 self._repo.ui.warn('%s: %s\n' %
1471 1471 (self._repo.dirstate.pathto(f), msg))
1472 1472 match.bad = bad
1473 1473 return match
1474 1474
1475 1475 def status(self, other='.', match=None, listignored=False,
1476 1476 listclean=False, listunknown=False, listsubrepos=False):
1477 1477 # yet to be determined: what to do if 'other' is a 'workingctx' or a
1478 1478 # 'memctx'?
1479 1479 s = super(workingctx, self).status(other, match, listignored, listclean,
1480 1480 listunknown, listsubrepos)
1481 1481 # calling 'super' subtly reveresed the contexts, so we flip the results
1482 1482 # (s[1] is 'added' and s[2] is 'removed')
1483 1483 s = list(s)
1484 1484 s[1], s[2] = s[2], s[1]
1485 return tuple(s)
1485 return scmutil.status(*s)
1486 1486
1487 1487 class committablefilectx(basefilectx):
1488 1488 """A committablefilectx provides common functionality for a file context
1489 1489 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1490 1490 def __init__(self, repo, path, filelog=None, ctx=None):
1491 1491 self._repo = repo
1492 1492 self._path = path
1493 1493 self._changeid = None
1494 1494 self._filerev = self._filenode = None
1495 1495
1496 1496 if filelog is not None:
1497 1497 self._filelog = filelog
1498 1498 if ctx:
1499 1499 self._changectx = ctx
1500 1500
1501 1501 def __nonzero__(self):
1502 1502 return True
1503 1503
1504 1504 def parents(self):
1505 1505 '''return parent filectxs, following copies if necessary'''
1506 1506 def filenode(ctx, path):
1507 1507 return ctx._manifest.get(path, nullid)
1508 1508
1509 1509 path = self._path
1510 1510 fl = self._filelog
1511 1511 pcl = self._changectx._parents
1512 1512 renamed = self.renamed()
1513 1513
1514 1514 if renamed:
1515 1515 pl = [renamed + (None,)]
1516 1516 else:
1517 1517 pl = [(path, filenode(pcl[0], path), fl)]
1518 1518
1519 1519 for pc in pcl[1:]:
1520 1520 pl.append((path, filenode(pc, path), fl))
1521 1521
1522 1522 return [filectx(self._repo, p, fileid=n, filelog=l)
1523 1523 for p, n, l in pl if n != nullid]
1524 1524
1525 1525 def children(self):
1526 1526 return []
1527 1527
1528 1528 class workingfilectx(committablefilectx):
1529 1529 """A workingfilectx object makes access to data related to a particular
1530 1530 file in the working directory convenient."""
1531 1531 def __init__(self, repo, path, filelog=None, workingctx=None):
1532 1532 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1533 1533
1534 1534 @propertycache
1535 1535 def _changectx(self):
1536 1536 return workingctx(self._repo)
1537 1537
1538 1538 def data(self):
1539 1539 return self._repo.wread(self._path)
1540 1540 def renamed(self):
1541 1541 rp = self._repo.dirstate.copied(self._path)
1542 1542 if not rp:
1543 1543 return None
1544 1544 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1545 1545
1546 1546 def size(self):
1547 1547 return self._repo.wvfs.lstat(self._path).st_size
1548 1548 def date(self):
1549 1549 t, tz = self._changectx.date()
1550 1550 try:
1551 1551 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1552 1552 except OSError, err:
1553 1553 if err.errno != errno.ENOENT:
1554 1554 raise
1555 1555 return (t, tz)
1556 1556
1557 1557 def cmp(self, fctx):
1558 1558 """compare with other file context
1559 1559
1560 1560 returns True if different than fctx.
1561 1561 """
1562 1562 # fctx should be a filectx (not a workingfilectx)
1563 1563 # invert comparison to reuse the same code path
1564 1564 return fctx.cmp(self)
1565 1565
1566 1566 def remove(self, ignoremissing=False):
1567 1567 """wraps unlink for a repo's working directory"""
1568 1568 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1569 1569
1570 1570 def write(self, data, flags):
1571 1571 """wraps repo.wwrite"""
1572 1572 self._repo.wwrite(self._path, data, flags)
1573 1573
1574 1574 class memctx(committablectx):
1575 1575 """Use memctx to perform in-memory commits via localrepo.commitctx().
1576 1576
1577 1577 Revision information is supplied at initialization time while
1578 1578 related files data and is made available through a callback
1579 1579 mechanism. 'repo' is the current localrepo, 'parents' is a
1580 1580 sequence of two parent revisions identifiers (pass None for every
1581 1581 missing parent), 'text' is the commit message and 'files' lists
1582 1582 names of files touched by the revision (normalized and relative to
1583 1583 repository root).
1584 1584
1585 1585 filectxfn(repo, memctx, path) is a callable receiving the
1586 1586 repository, the current memctx object and the normalized path of
1587 1587 requested file, relative to repository root. It is fired by the
1588 1588 commit function for every file in 'files', but calls order is
1589 1589 undefined. If the file is available in the revision being
1590 1590 committed (updated or added), filectxfn returns a memfilectx
1591 1591 object. If the file was removed, filectxfn raises an
1592 1592 IOError. Moved files are represented by marking the source file
1593 1593 removed and the new file added with copy information (see
1594 1594 memfilectx).
1595 1595
1596 1596 user receives the committer name and defaults to current
1597 1597 repository username, date is the commit date in any format
1598 1598 supported by util.parsedate() and defaults to current date, extra
1599 1599 is a dictionary of metadata or is left empty.
1600 1600 """
1601 1601
1602 1602 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1603 1603 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1604 1604 # this field to determine what to do in filectxfn.
1605 1605 _returnnoneformissingfiles = True
1606 1606
1607 1607 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1608 1608 date=None, extra=None, editor=False):
1609 1609 super(memctx, self).__init__(repo, text, user, date, extra)
1610 1610 self._rev = None
1611 1611 self._node = None
1612 1612 parents = [(p or nullid) for p in parents]
1613 1613 p1, p2 = parents
1614 1614 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1615 1615 files = sorted(set(files))
1616 1616 self._status = [files, [], [], [], []]
1617 1617 self._filectxfn = filectxfn
1618 1618 self.substate = {}
1619 1619
1620 1620 # if store is not callable, wrap it in a function
1621 1621 if not callable(filectxfn):
1622 1622 def getfilectx(repo, memctx, path):
1623 1623 fctx = filectxfn[path]
1624 1624 # this is weird but apparently we only keep track of one parent
1625 1625 # (why not only store that instead of a tuple?)
1626 1626 copied = fctx.renamed()
1627 1627 if copied:
1628 1628 copied = copied[0]
1629 1629 return memfilectx(repo, path, fctx.data(),
1630 1630 islink=fctx.islink(), isexec=fctx.isexec(),
1631 1631 copied=copied, memctx=memctx)
1632 1632 self._filectxfn = getfilectx
1633 1633
1634 1634 self._extra = extra and extra.copy() or {}
1635 1635 if self._extra.get('branch', '') == '':
1636 1636 self._extra['branch'] = 'default'
1637 1637
1638 1638 if editor:
1639 1639 self._text = editor(self._repo, self, [])
1640 1640 self._repo.savecommitmessage(self._text)
1641 1641
1642 1642 def filectx(self, path, filelog=None):
1643 1643 """get a file context from the working directory
1644 1644
1645 1645 Returns None if file doesn't exist and should be removed."""
1646 1646 return self._filectxfn(self._repo, self, path)
1647 1647
1648 1648 def commit(self):
1649 1649 """commit context to the repo"""
1650 1650 return self._repo.commitctx(self)
1651 1651
1652 1652 @propertycache
1653 1653 def _manifest(self):
1654 1654 """generate a manifest based on the return values of filectxfn"""
1655 1655
1656 1656 # keep this simple for now; just worry about p1
1657 1657 pctx = self._parents[0]
1658 1658 man = pctx.manifest().copy()
1659 1659
1660 1660 for f, fnode in man.iteritems():
1661 1661 p1node = nullid
1662 1662 p2node = nullid
1663 1663 p = pctx[f].parents() # if file isn't in pctx, check p2?
1664 1664 if len(p) > 0:
1665 1665 p1node = p[0].node()
1666 1666 if len(p) > 1:
1667 1667 p2node = p[1].node()
1668 1668 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1669 1669
1670 1670 return man
1671 1671
1672 1672
1673 1673 class memfilectx(committablefilectx):
1674 1674 """memfilectx represents an in-memory file to commit.
1675 1675
1676 1676 See memctx and commitablefilectx for more details.
1677 1677 """
1678 1678 def __init__(self, repo, path, data, islink=False,
1679 1679 isexec=False, copied=None, memctx=None):
1680 1680 """
1681 1681 path is the normalized file path relative to repository root.
1682 1682 data is the file content as a string.
1683 1683 islink is True if the file is a symbolic link.
1684 1684 isexec is True if the file is executable.
1685 1685 copied is the source file path if current file was copied in the
1686 1686 revision being committed, or None."""
1687 1687 super(memfilectx, self).__init__(repo, path, None, memctx)
1688 1688 self._data = data
1689 1689 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1690 1690 self._copied = None
1691 1691 if copied:
1692 1692 self._copied = (copied, nullid)
1693 1693
1694 1694 def data(self):
1695 1695 return self._data
1696 1696 def size(self):
1697 1697 return len(self.data())
1698 1698 def flags(self):
1699 1699 return self._flags
1700 1700 def renamed(self):
1701 1701 return self._copied
1702 1702
1703 1703 def remove(self, ignoremissing=False):
1704 1704 """wraps unlink for a repo's working directory"""
1705 1705 # need to figure out what to do here
1706 1706 del self._changectx[self._path]
1707 1707
1708 1708 def write(self, data, flags):
1709 1709 """wraps repo.wwrite"""
1710 1710 self._data = data
@@ -1,1595 +1,1596 b''
1 1 # subrepo.py - sub-repository handling for Mercurial
2 2 #
3 3 # Copyright 2009-2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import errno, os, re, shutil, posixpath, sys
9 9 import xml.dom.minidom
10 10 import stat, subprocess, tarfile
11 11 from i18n import _
12 import config, util, node, error, cmdutil, match as matchmod
12 import config, util, node, error, cmdutil, scmutil, match as matchmod
13 13 import phases
14 14 import pathutil
15 15 import exchange
16 16 hg = None
17 17 propertycache = util.propertycache
18 18
19 19 nullstate = ('', '', 'empty')
20 20
21 21 def _expandedabspath(path):
22 22 '''
23 23 get a path or url and if it is a path expand it and return an absolute path
24 24 '''
25 25 expandedpath = util.urllocalpath(util.expandpath(path))
26 26 u = util.url(expandedpath)
27 27 if not u.scheme:
28 28 path = util.normpath(os.path.abspath(u.path))
29 29 return path
30 30
31 31 def _getstorehashcachename(remotepath):
32 32 '''get a unique filename for the store hash cache of a remote repository'''
33 33 return util.sha1(_expandedabspath(remotepath)).hexdigest()[0:12]
34 34
35 35 def _calcfilehash(filename):
36 36 data = ''
37 37 if os.path.exists(filename):
38 38 fd = open(filename, 'rb')
39 39 try:
40 40 data = fd.read()
41 41 finally:
42 42 fd.close()
43 43 return util.sha1(data).hexdigest()
44 44
45 45 class SubrepoAbort(error.Abort):
46 46 """Exception class used to avoid handling a subrepo error more than once"""
47 47 def __init__(self, *args, **kw):
48 48 error.Abort.__init__(self, *args, **kw)
49 49 self.subrepo = kw.get('subrepo')
50 50 self.cause = kw.get('cause')
51 51
52 52 def annotatesubrepoerror(func):
53 53 def decoratedmethod(self, *args, **kargs):
54 54 try:
55 55 res = func(self, *args, **kargs)
56 56 except SubrepoAbort, ex:
57 57 # This exception has already been handled
58 58 raise ex
59 59 except error.Abort, ex:
60 60 subrepo = subrelpath(self)
61 61 errormsg = str(ex) + ' ' + _('(in subrepo %s)') % subrepo
62 62 # avoid handling this exception by raising a SubrepoAbort exception
63 63 raise SubrepoAbort(errormsg, hint=ex.hint, subrepo=subrepo,
64 64 cause=sys.exc_info())
65 65 return res
66 66 return decoratedmethod
67 67
68 68 def state(ctx, ui):
69 69 """return a state dict, mapping subrepo paths configured in .hgsub
70 70 to tuple: (source from .hgsub, revision from .hgsubstate, kind
71 71 (key in types dict))
72 72 """
73 73 p = config.config()
74 74 def read(f, sections=None, remap=None):
75 75 if f in ctx:
76 76 try:
77 77 data = ctx[f].data()
78 78 except IOError, err:
79 79 if err.errno != errno.ENOENT:
80 80 raise
81 81 # handle missing subrepo spec files as removed
82 82 ui.warn(_("warning: subrepo spec file %s not found\n") % f)
83 83 return
84 84 p.parse(f, data, sections, remap, read)
85 85 else:
86 86 raise util.Abort(_("subrepo spec file %s not found") % f)
87 87
88 88 if '.hgsub' in ctx:
89 89 read('.hgsub')
90 90
91 91 for path, src in ui.configitems('subpaths'):
92 92 p.set('subpaths', path, src, ui.configsource('subpaths', path))
93 93
94 94 rev = {}
95 95 if '.hgsubstate' in ctx:
96 96 try:
97 97 for i, l in enumerate(ctx['.hgsubstate'].data().splitlines()):
98 98 l = l.lstrip()
99 99 if not l:
100 100 continue
101 101 try:
102 102 revision, path = l.split(" ", 1)
103 103 except ValueError:
104 104 raise util.Abort(_("invalid subrepository revision "
105 105 "specifier in .hgsubstate line %d")
106 106 % (i + 1))
107 107 rev[path] = revision
108 108 except IOError, err:
109 109 if err.errno != errno.ENOENT:
110 110 raise
111 111
112 112 def remap(src):
113 113 for pattern, repl in p.items('subpaths'):
114 114 # Turn r'C:\foo\bar' into r'C:\\foo\\bar' since re.sub
115 115 # does a string decode.
116 116 repl = repl.encode('string-escape')
117 117 # However, we still want to allow back references to go
118 118 # through unharmed, so we turn r'\\1' into r'\1'. Again,
119 119 # extra escapes are needed because re.sub string decodes.
120 120 repl = re.sub(r'\\\\([0-9]+)', r'\\\1', repl)
121 121 try:
122 122 src = re.sub(pattern, repl, src, 1)
123 123 except re.error, e:
124 124 raise util.Abort(_("bad subrepository pattern in %s: %s")
125 125 % (p.source('subpaths', pattern), e))
126 126 return src
127 127
128 128 state = {}
129 129 for path, src in p[''].items():
130 130 kind = 'hg'
131 131 if src.startswith('['):
132 132 if ']' not in src:
133 133 raise util.Abort(_('missing ] in subrepo source'))
134 134 kind, src = src.split(']', 1)
135 135 kind = kind[1:]
136 136 src = src.lstrip() # strip any extra whitespace after ']'
137 137
138 138 if not util.url(src).isabs():
139 139 parent = _abssource(ctx._repo, abort=False)
140 140 if parent:
141 141 parent = util.url(parent)
142 142 parent.path = posixpath.join(parent.path or '', src)
143 143 parent.path = posixpath.normpath(parent.path)
144 144 joined = str(parent)
145 145 # Remap the full joined path and use it if it changes,
146 146 # else remap the original source.
147 147 remapped = remap(joined)
148 148 if remapped == joined:
149 149 src = remap(src)
150 150 else:
151 151 src = remapped
152 152
153 153 src = remap(src)
154 154 state[util.pconvert(path)] = (src.strip(), rev.get(path, ''), kind)
155 155
156 156 return state
157 157
158 158 def writestate(repo, state):
159 159 """rewrite .hgsubstate in (outer) repo with these subrepo states"""
160 160 lines = ['%s %s\n' % (state[s][1], s) for s in sorted(state)]
161 161 repo.wwrite('.hgsubstate', ''.join(lines), '')
162 162
163 163 def submerge(repo, wctx, mctx, actx, overwrite):
164 164 """delegated from merge.applyupdates: merging of .hgsubstate file
165 165 in working context, merging context and ancestor context"""
166 166 if mctx == actx: # backwards?
167 167 actx = wctx.p1()
168 168 s1 = wctx.substate
169 169 s2 = mctx.substate
170 170 sa = actx.substate
171 171 sm = {}
172 172
173 173 repo.ui.debug("subrepo merge %s %s %s\n" % (wctx, mctx, actx))
174 174
175 175 def debug(s, msg, r=""):
176 176 if r:
177 177 r = "%s:%s:%s" % r
178 178 repo.ui.debug(" subrepo %s: %s %s\n" % (s, msg, r))
179 179
180 180 for s, l in sorted(s1.iteritems()):
181 181 a = sa.get(s, nullstate)
182 182 ld = l # local state with possible dirty flag for compares
183 183 if wctx.sub(s).dirty():
184 184 ld = (l[0], l[1] + "+")
185 185 if wctx == actx: # overwrite
186 186 a = ld
187 187
188 188 if s in s2:
189 189 r = s2[s]
190 190 if ld == r or r == a: # no change or local is newer
191 191 sm[s] = l
192 192 continue
193 193 elif ld == a: # other side changed
194 194 debug(s, "other changed, get", r)
195 195 wctx.sub(s).get(r, overwrite)
196 196 sm[s] = r
197 197 elif ld[0] != r[0]: # sources differ
198 198 if repo.ui.promptchoice(
199 199 _(' subrepository sources for %s differ\n'
200 200 'use (l)ocal source (%s) or (r)emote source (%s)?'
201 201 '$$ &Local $$ &Remote') % (s, l[0], r[0]), 0):
202 202 debug(s, "prompt changed, get", r)
203 203 wctx.sub(s).get(r, overwrite)
204 204 sm[s] = r
205 205 elif ld[1] == a[1]: # local side is unchanged
206 206 debug(s, "other side changed, get", r)
207 207 wctx.sub(s).get(r, overwrite)
208 208 sm[s] = r
209 209 else:
210 210 debug(s, "both sides changed")
211 211 srepo = wctx.sub(s)
212 212 option = repo.ui.promptchoice(
213 213 _(' subrepository %s diverged (local revision: %s, '
214 214 'remote revision: %s)\n'
215 215 '(M)erge, keep (l)ocal or keep (r)emote?'
216 216 '$$ &Merge $$ &Local $$ &Remote')
217 217 % (s, srepo.shortid(l[1]), srepo.shortid(r[1])), 0)
218 218 if option == 0:
219 219 wctx.sub(s).merge(r)
220 220 sm[s] = l
221 221 debug(s, "merge with", r)
222 222 elif option == 1:
223 223 sm[s] = l
224 224 debug(s, "keep local subrepo revision", l)
225 225 else:
226 226 wctx.sub(s).get(r, overwrite)
227 227 sm[s] = r
228 228 debug(s, "get remote subrepo revision", r)
229 229 elif ld == a: # remote removed, local unchanged
230 230 debug(s, "remote removed, remove")
231 231 wctx.sub(s).remove()
232 232 elif a == nullstate: # not present in remote or ancestor
233 233 debug(s, "local added, keep")
234 234 sm[s] = l
235 235 continue
236 236 else:
237 237 if repo.ui.promptchoice(
238 238 _(' local changed subrepository %s which remote removed\n'
239 239 'use (c)hanged version or (d)elete?'
240 240 '$$ &Changed $$ &Delete') % s, 0):
241 241 debug(s, "prompt remove")
242 242 wctx.sub(s).remove()
243 243
244 244 for s, r in sorted(s2.items()):
245 245 if s in s1:
246 246 continue
247 247 elif s not in sa:
248 248 debug(s, "remote added, get", r)
249 249 mctx.sub(s).get(r)
250 250 sm[s] = r
251 251 elif r != sa[s]:
252 252 if repo.ui.promptchoice(
253 253 _(' remote changed subrepository %s which local removed\n'
254 254 'use (c)hanged version or (d)elete?'
255 255 '$$ &Changed $$ &Delete') % s, 0) == 0:
256 256 debug(s, "prompt recreate", r)
257 257 wctx.sub(s).get(r)
258 258 sm[s] = r
259 259
260 260 # record merged .hgsubstate
261 261 writestate(repo, sm)
262 262 return sm
263 263
264 264 def _updateprompt(ui, sub, dirty, local, remote):
265 265 if dirty:
266 266 msg = (_(' subrepository sources for %s differ\n'
267 267 'use (l)ocal source (%s) or (r)emote source (%s)?'
268 268 '$$ &Local $$ &Remote')
269 269 % (subrelpath(sub), local, remote))
270 270 else:
271 271 msg = (_(' subrepository sources for %s differ (in checked out '
272 272 'version)\n'
273 273 'use (l)ocal source (%s) or (r)emote source (%s)?'
274 274 '$$ &Local $$ &Remote')
275 275 % (subrelpath(sub), local, remote))
276 276 return ui.promptchoice(msg, 0)
277 277
278 278 def reporelpath(repo):
279 279 """return path to this (sub)repo as seen from outermost repo"""
280 280 parent = repo
281 281 while util.safehasattr(parent, '_subparent'):
282 282 parent = parent._subparent
283 283 return repo.root[len(pathutil.normasprefix(parent.root)):]
284 284
285 285 def subrelpath(sub):
286 286 """return path to this subrepo as seen from outermost repo"""
287 287 if util.safehasattr(sub, '_relpath'):
288 288 return sub._relpath
289 289 if not util.safehasattr(sub, '_repo'):
290 290 return sub._path
291 291 return reporelpath(sub._repo)
292 292
293 293 def _abssource(repo, push=False, abort=True):
294 294 """return pull/push path of repo - either based on parent repo .hgsub info
295 295 or on the top repo config. Abort or return None if no source found."""
296 296 if util.safehasattr(repo, '_subparent'):
297 297 source = util.url(repo._subsource)
298 298 if source.isabs():
299 299 return str(source)
300 300 source.path = posixpath.normpath(source.path)
301 301 parent = _abssource(repo._subparent, push, abort=False)
302 302 if parent:
303 303 parent = util.url(util.pconvert(parent))
304 304 parent.path = posixpath.join(parent.path or '', source.path)
305 305 parent.path = posixpath.normpath(parent.path)
306 306 return str(parent)
307 307 else: # recursion reached top repo
308 308 if util.safehasattr(repo, '_subtoppath'):
309 309 return repo._subtoppath
310 310 if push and repo.ui.config('paths', 'default-push'):
311 311 return repo.ui.config('paths', 'default-push')
312 312 if repo.ui.config('paths', 'default'):
313 313 return repo.ui.config('paths', 'default')
314 314 if repo.sharedpath != repo.path:
315 315 # chop off the .hg component to get the default path form
316 316 return os.path.dirname(repo.sharedpath)
317 317 if abort:
318 318 raise util.Abort(_("default path for subrepository not found"))
319 319
320 320 def _sanitize(ui, path, ignore):
321 321 for dirname, dirs, names in os.walk(path):
322 322 for i, d in enumerate(dirs):
323 323 if d.lower() == ignore:
324 324 del dirs[i]
325 325 break
326 326 if os.path.basename(dirname).lower() != '.hg':
327 327 continue
328 328 for f in names:
329 329 if f.lower() == 'hgrc':
330 330 ui.warn(_("warning: removing potentially hostile 'hgrc' "
331 331 "in '%s'\n") % dirname)
332 332 os.unlink(os.path.join(dirname, f))
333 333
334 334 def subrepo(ctx, path):
335 335 """return instance of the right subrepo class for subrepo in path"""
336 336 # subrepo inherently violates our import layering rules
337 337 # because it wants to make repo objects from deep inside the stack
338 338 # so we manually delay the circular imports to not break
339 339 # scripts that don't use our demand-loading
340 340 global hg
341 341 import hg as h
342 342 hg = h
343 343
344 344 pathutil.pathauditor(ctx._repo.root)(path)
345 345 state = ctx.substate[path]
346 346 if state[2] not in types:
347 347 raise util.Abort(_('unknown subrepo type %s') % state[2])
348 348 return types[state[2]](ctx, path, state[:2])
349 349
350 350 def newcommitphase(ui, ctx):
351 351 commitphase = phases.newcommitphase(ui)
352 352 substate = getattr(ctx, "substate", None)
353 353 if not substate:
354 354 return commitphase
355 355 check = ui.config('phases', 'checksubrepos', 'follow')
356 356 if check not in ('ignore', 'follow', 'abort'):
357 357 raise util.Abort(_('invalid phases.checksubrepos configuration: %s')
358 358 % (check))
359 359 if check == 'ignore':
360 360 return commitphase
361 361 maxphase = phases.public
362 362 maxsub = None
363 363 for s in sorted(substate):
364 364 sub = ctx.sub(s)
365 365 subphase = sub.phase(substate[s][1])
366 366 if maxphase < subphase:
367 367 maxphase = subphase
368 368 maxsub = s
369 369 if commitphase < maxphase:
370 370 if check == 'abort':
371 371 raise util.Abort(_("can't commit in %s phase"
372 372 " conflicting %s from subrepository %s") %
373 373 (phases.phasenames[commitphase],
374 374 phases.phasenames[maxphase], maxsub))
375 375 ui.warn(_("warning: changes are committed in"
376 376 " %s phase from subrepository %s\n") %
377 377 (phases.phasenames[maxphase], maxsub))
378 378 return maxphase
379 379 return commitphase
380 380
381 381 # subrepo classes need to implement the following abstract class:
382 382
383 383 class abstractsubrepo(object):
384 384
385 385 def storeclean(self, path):
386 386 """
387 387 returns true if the repository has not changed since it was last
388 388 cloned from or pushed to a given repository.
389 389 """
390 390 return False
391 391
392 392 def dirty(self, ignoreupdate=False):
393 393 """returns true if the dirstate of the subrepo is dirty or does not
394 394 match current stored state. If ignoreupdate is true, only check
395 395 whether the subrepo has uncommitted changes in its dirstate.
396 396 """
397 397 raise NotImplementedError
398 398
399 399 def basestate(self):
400 400 """current working directory base state, disregarding .hgsubstate
401 401 state and working directory modifications"""
402 402 raise NotImplementedError
403 403
404 404 def checknested(self, path):
405 405 """check if path is a subrepository within this repository"""
406 406 return False
407 407
408 408 def commit(self, text, user, date):
409 409 """commit the current changes to the subrepo with the given
410 410 log message. Use given user and date if possible. Return the
411 411 new state of the subrepo.
412 412 """
413 413 raise NotImplementedError
414 414
415 415 def phase(self, state):
416 416 """returns phase of specified state in the subrepository.
417 417 """
418 418 return phases.public
419 419
420 420 def remove(self):
421 421 """remove the subrepo
422 422
423 423 (should verify the dirstate is not dirty first)
424 424 """
425 425 raise NotImplementedError
426 426
427 427 def get(self, state, overwrite=False):
428 428 """run whatever commands are needed to put the subrepo into
429 429 this state
430 430 """
431 431 raise NotImplementedError
432 432
433 433 def merge(self, state):
434 434 """merge currently-saved state with the new state."""
435 435 raise NotImplementedError
436 436
437 437 def push(self, opts):
438 438 """perform whatever action is analogous to 'hg push'
439 439
440 440 This may be a no-op on some systems.
441 441 """
442 442 raise NotImplementedError
443 443
444 444 def add(self, ui, match, dryrun, listsubrepos, prefix, explicitonly):
445 445 return []
446 446
447 447 def cat(self, ui, match, prefix, **opts):
448 448 return 1
449 449
450 450 def status(self, rev2, **opts):
451 return [], [], [], [], [], [], []
451 return scmutil.status([], [], [], [], [], [], [])
452 452
453 453 def diff(self, ui, diffopts, node2, match, prefix, **opts):
454 454 pass
455 455
456 456 def outgoing(self, ui, dest, opts):
457 457 return 1
458 458
459 459 def incoming(self, ui, source, opts):
460 460 return 1
461 461
462 462 def files(self):
463 463 """return filename iterator"""
464 464 raise NotImplementedError
465 465
466 466 def filedata(self, name):
467 467 """return file data"""
468 468 raise NotImplementedError
469 469
470 470 def fileflags(self, name):
471 471 """return file flags"""
472 472 return ''
473 473
474 474 def archive(self, ui, archiver, prefix, match=None):
475 475 if match is not None:
476 476 files = [f for f in self.files() if match(f)]
477 477 else:
478 478 files = self.files()
479 479 total = len(files)
480 480 relpath = subrelpath(self)
481 481 ui.progress(_('archiving (%s)') % relpath, 0,
482 482 unit=_('files'), total=total)
483 483 for i, name in enumerate(files):
484 484 flags = self.fileflags(name)
485 485 mode = 'x' in flags and 0755 or 0644
486 486 symlink = 'l' in flags
487 487 archiver.addfile(os.path.join(prefix, self._path, name),
488 488 mode, symlink, self.filedata(name))
489 489 ui.progress(_('archiving (%s)') % relpath, i + 1,
490 490 unit=_('files'), total=total)
491 491 ui.progress(_('archiving (%s)') % relpath, None)
492 492 return total
493 493
494 494 def walk(self, match):
495 495 '''
496 496 walk recursively through the directory tree, finding all files
497 497 matched by the match function
498 498 '''
499 499 pass
500 500
501 501 def forget(self, ui, match, prefix):
502 502 return ([], [])
503 503
504 504 def revert(self, ui, substate, *pats, **opts):
505 505 ui.warn('%s: reverting %s subrepos is unsupported\n' \
506 506 % (substate[0], substate[2]))
507 507 return []
508 508
509 509 def shortid(self, revid):
510 510 return revid
511 511
512 512 class hgsubrepo(abstractsubrepo):
513 513 def __init__(self, ctx, path, state):
514 514 self._path = path
515 515 self._state = state
516 516 r = ctx._repo
517 517 root = r.wjoin(path)
518 518 create = False
519 519 if not os.path.exists(os.path.join(root, '.hg')):
520 520 create = True
521 521 util.makedirs(root)
522 522 self._repo = hg.repository(r.baseui, root, create=create)
523 523 for s, k in [('ui', 'commitsubrepos')]:
524 524 v = r.ui.config(s, k)
525 525 if v:
526 526 self._repo.ui.setconfig(s, k, v, 'subrepo')
527 527 self._repo.ui.setconfig('ui', '_usedassubrepo', 'True', 'subrepo')
528 528 self._initrepo(r, state[0], create)
529 529
530 530 def storeclean(self, path):
531 531 lock = self._repo.lock()
532 532 try:
533 533 return self._storeclean(path)
534 534 finally:
535 535 lock.release()
536 536
537 537 def _storeclean(self, path):
538 538 clean = True
539 539 itercache = self._calcstorehash(path)
540 540 try:
541 541 for filehash in self._readstorehashcache(path):
542 542 if filehash != itercache.next():
543 543 clean = False
544 544 break
545 545 except StopIteration:
546 546 # the cached and current pull states have a different size
547 547 clean = False
548 548 if clean:
549 549 try:
550 550 itercache.next()
551 551 # the cached and current pull states have a different size
552 552 clean = False
553 553 except StopIteration:
554 554 pass
555 555 return clean
556 556
557 557 def _calcstorehash(self, remotepath):
558 558 '''calculate a unique "store hash"
559 559
560 560 This method is used to to detect when there are changes that may
561 561 require a push to a given remote path.'''
562 562 # sort the files that will be hashed in increasing (likely) file size
563 563 filelist = ('bookmarks', 'store/phaseroots', 'store/00changelog.i')
564 564 yield '# %s\n' % _expandedabspath(remotepath)
565 565 for relname in filelist:
566 566 absname = os.path.normpath(self._repo.join(relname))
567 567 yield '%s = %s\n' % (relname, _calcfilehash(absname))
568 568
569 569 def _getstorehashcachepath(self, remotepath):
570 570 '''get a unique path for the store hash cache'''
571 571 return self._repo.join(os.path.join(
572 572 'cache', 'storehash', _getstorehashcachename(remotepath)))
573 573
574 574 def _readstorehashcache(self, remotepath):
575 575 '''read the store hash cache for a given remote repository'''
576 576 cachefile = self._getstorehashcachepath(remotepath)
577 577 if not os.path.exists(cachefile):
578 578 return ''
579 579 fd = open(cachefile, 'r')
580 580 try:
581 581 pullstate = fd.readlines()
582 582 finally:
583 583 fd.close()
584 584 return pullstate
585 585
586 586 def _cachestorehash(self, remotepath):
587 587 '''cache the current store hash
588 588
589 589 Each remote repo requires its own store hash cache, because a subrepo
590 590 store may be "clean" versus a given remote repo, but not versus another
591 591 '''
592 592 cachefile = self._getstorehashcachepath(remotepath)
593 593 lock = self._repo.lock()
594 594 try:
595 595 storehash = list(self._calcstorehash(remotepath))
596 596 cachedir = os.path.dirname(cachefile)
597 597 if not os.path.exists(cachedir):
598 598 util.makedirs(cachedir, notindexed=True)
599 599 fd = open(cachefile, 'w')
600 600 try:
601 601 fd.writelines(storehash)
602 602 finally:
603 603 fd.close()
604 604 finally:
605 605 lock.release()
606 606
607 607 @annotatesubrepoerror
608 608 def _initrepo(self, parentrepo, source, create):
609 609 self._repo._subparent = parentrepo
610 610 self._repo._subsource = source
611 611
612 612 if create:
613 613 lines = ['[paths]\n']
614 614
615 615 def addpathconfig(key, value):
616 616 if value:
617 617 lines.append('%s = %s\n' % (key, value))
618 618 self._repo.ui.setconfig('paths', key, value, 'subrepo')
619 619
620 620 defpath = _abssource(self._repo, abort=False)
621 621 defpushpath = _abssource(self._repo, True, abort=False)
622 622 addpathconfig('default', defpath)
623 623 if defpath != defpushpath:
624 624 addpathconfig('default-push', defpushpath)
625 625
626 626 fp = self._repo.opener("hgrc", "w", text=True)
627 627 try:
628 628 fp.write(''.join(lines))
629 629 finally:
630 630 fp.close()
631 631
632 632 @annotatesubrepoerror
633 633 def add(self, ui, match, dryrun, listsubrepos, prefix, explicitonly):
634 634 return cmdutil.add(ui, self._repo, match, dryrun, listsubrepos,
635 635 os.path.join(prefix, self._path), explicitonly)
636 636
637 637 @annotatesubrepoerror
638 638 def cat(self, ui, match, prefix, **opts):
639 639 rev = self._state[1]
640 640 ctx = self._repo[rev]
641 641 return cmdutil.cat(ui, self._repo, ctx, match, prefix, **opts)
642 642
643 643 @annotatesubrepoerror
644 644 def status(self, rev2, **opts):
645 645 try:
646 646 rev1 = self._state[1]
647 647 ctx1 = self._repo[rev1]
648 648 ctx2 = self._repo[rev2]
649 649 return self._repo.status(ctx1, ctx2, **opts)
650 650 except error.RepoLookupError, inst:
651 651 self._repo.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
652 652 % (inst, subrelpath(self)))
653 return [], [], [], [], [], [], []
653 return scmutil.status([], [], [], [], [], [], [])
654 654
655 655 @annotatesubrepoerror
656 656 def diff(self, ui, diffopts, node2, match, prefix, **opts):
657 657 try:
658 658 node1 = node.bin(self._state[1])
659 659 # We currently expect node2 to come from substate and be
660 660 # in hex format
661 661 if node2 is not None:
662 662 node2 = node.bin(node2)
663 663 cmdutil.diffordiffstat(ui, self._repo, diffopts,
664 664 node1, node2, match,
665 665 prefix=posixpath.join(prefix, self._path),
666 666 listsubrepos=True, **opts)
667 667 except error.RepoLookupError, inst:
668 668 self._repo.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
669 669 % (inst, subrelpath(self)))
670 670
671 671 @annotatesubrepoerror
672 672 def archive(self, ui, archiver, prefix, match=None):
673 673 self._get(self._state + ('hg',))
674 674 total = abstractsubrepo.archive(self, ui, archiver, prefix, match)
675 675 rev = self._state[1]
676 676 ctx = self._repo[rev]
677 677 for subpath in ctx.substate:
678 678 s = subrepo(ctx, subpath)
679 679 submatch = matchmod.narrowmatcher(subpath, match)
680 680 total += s.archive(
681 681 ui, archiver, os.path.join(prefix, self._path), submatch)
682 682 return total
683 683
684 684 @annotatesubrepoerror
685 685 def dirty(self, ignoreupdate=False):
686 686 r = self._state[1]
687 687 if r == '' and not ignoreupdate: # no state recorded
688 688 return True
689 689 w = self._repo[None]
690 690 if r != w.p1().hex() and not ignoreupdate:
691 691 # different version checked out
692 692 return True
693 693 return w.dirty() # working directory changed
694 694
695 695 def basestate(self):
696 696 return self._repo['.'].hex()
697 697
698 698 def checknested(self, path):
699 699 return self._repo._checknested(self._repo.wjoin(path))
700 700
701 701 @annotatesubrepoerror
702 702 def commit(self, text, user, date):
703 703 # don't bother committing in the subrepo if it's only been
704 704 # updated
705 705 if not self.dirty(True):
706 706 return self._repo['.'].hex()
707 707 self._repo.ui.debug("committing subrepo %s\n" % subrelpath(self))
708 708 n = self._repo.commit(text, user, date)
709 709 if not n:
710 710 return self._repo['.'].hex() # different version checked out
711 711 return node.hex(n)
712 712
713 713 @annotatesubrepoerror
714 714 def phase(self, state):
715 715 return self._repo[state].phase()
716 716
717 717 @annotatesubrepoerror
718 718 def remove(self):
719 719 # we can't fully delete the repository as it may contain
720 720 # local-only history
721 721 self._repo.ui.note(_('removing subrepo %s\n') % subrelpath(self))
722 722 hg.clean(self._repo, node.nullid, False)
723 723
724 724 def _get(self, state):
725 725 source, revision, kind = state
726 726 if revision in self._repo.unfiltered():
727 727 return True
728 728 self._repo._subsource = source
729 729 srcurl = _abssource(self._repo)
730 730 other = hg.peer(self._repo, {}, srcurl)
731 731 if len(self._repo) == 0:
732 732 self._repo.ui.status(_('cloning subrepo %s from %s\n')
733 733 % (subrelpath(self), srcurl))
734 734 parentrepo = self._repo._subparent
735 735 shutil.rmtree(self._repo.path)
736 736 other, cloned = hg.clone(self._repo._subparent.baseui, {},
737 737 other, self._repo.root,
738 738 update=False)
739 739 self._repo = cloned.local()
740 740 self._initrepo(parentrepo, source, create=True)
741 741 self._cachestorehash(srcurl)
742 742 else:
743 743 self._repo.ui.status(_('pulling subrepo %s from %s\n')
744 744 % (subrelpath(self), srcurl))
745 745 cleansub = self.storeclean(srcurl)
746 746 exchange.pull(self._repo, other)
747 747 if cleansub:
748 748 # keep the repo clean after pull
749 749 self._cachestorehash(srcurl)
750 750 return False
751 751
752 752 @annotatesubrepoerror
753 753 def get(self, state, overwrite=False):
754 754 inrepo = self._get(state)
755 755 source, revision, kind = state
756 756 repo = self._repo
757 757 repo.ui.debug("getting subrepo %s\n" % self._path)
758 758 if inrepo:
759 759 urepo = repo.unfiltered()
760 760 ctx = urepo[revision]
761 761 if ctx.hidden():
762 762 urepo.ui.warn(
763 763 _('revision %s in subrepo %s is hidden\n') \
764 764 % (revision[0:12], self._path))
765 765 repo = urepo
766 766 hg.updaterepo(repo, revision, overwrite)
767 767
768 768 @annotatesubrepoerror
769 769 def merge(self, state):
770 770 self._get(state)
771 771 cur = self._repo['.']
772 772 dst = self._repo[state[1]]
773 773 anc = dst.ancestor(cur)
774 774
775 775 def mergefunc():
776 776 if anc == cur and dst.branch() == cur.branch():
777 777 self._repo.ui.debug("updating subrepo %s\n" % subrelpath(self))
778 778 hg.update(self._repo, state[1])
779 779 elif anc == dst:
780 780 self._repo.ui.debug("skipping subrepo %s\n" % subrelpath(self))
781 781 else:
782 782 self._repo.ui.debug("merging subrepo %s\n" % subrelpath(self))
783 783 hg.merge(self._repo, state[1], remind=False)
784 784
785 785 wctx = self._repo[None]
786 786 if self.dirty():
787 787 if anc != dst:
788 788 if _updateprompt(self._repo.ui, self, wctx.dirty(), cur, dst):
789 789 mergefunc()
790 790 else:
791 791 mergefunc()
792 792 else:
793 793 mergefunc()
794 794
795 795 @annotatesubrepoerror
796 796 def push(self, opts):
797 797 force = opts.get('force')
798 798 newbranch = opts.get('new_branch')
799 799 ssh = opts.get('ssh')
800 800
801 801 # push subrepos depth-first for coherent ordering
802 802 c = self._repo['']
803 803 subs = c.substate # only repos that are committed
804 804 for s in sorted(subs):
805 805 if c.sub(s).push(opts) == 0:
806 806 return False
807 807
808 808 dsturl = _abssource(self._repo, True)
809 809 if not force:
810 810 if self.storeclean(dsturl):
811 811 self._repo.ui.status(
812 812 _('no changes made to subrepo %s since last push to %s\n')
813 813 % (subrelpath(self), dsturl))
814 814 return None
815 815 self._repo.ui.status(_('pushing subrepo %s to %s\n') %
816 816 (subrelpath(self), dsturl))
817 817 other = hg.peer(self._repo, {'ssh': ssh}, dsturl)
818 818 res = exchange.push(self._repo, other, force, newbranch=newbranch)
819 819
820 820 # the repo is now clean
821 821 self._cachestorehash(dsturl)
822 822 return res.cgresult
823 823
824 824 @annotatesubrepoerror
825 825 def outgoing(self, ui, dest, opts):
826 826 return hg.outgoing(ui, self._repo, _abssource(self._repo, True), opts)
827 827
828 828 @annotatesubrepoerror
829 829 def incoming(self, ui, source, opts):
830 830 return hg.incoming(ui, self._repo, _abssource(self._repo, False), opts)
831 831
832 832 @annotatesubrepoerror
833 833 def files(self):
834 834 rev = self._state[1]
835 835 ctx = self._repo[rev]
836 836 return ctx.manifest()
837 837
838 838 def filedata(self, name):
839 839 rev = self._state[1]
840 840 return self._repo[rev][name].data()
841 841
842 842 def fileflags(self, name):
843 843 rev = self._state[1]
844 844 ctx = self._repo[rev]
845 845 return ctx.flags(name)
846 846
847 847 def walk(self, match):
848 848 ctx = self._repo[None]
849 849 return ctx.walk(match)
850 850
851 851 @annotatesubrepoerror
852 852 def forget(self, ui, match, prefix):
853 853 return cmdutil.forget(ui, self._repo, match,
854 854 os.path.join(prefix, self._path), True)
855 855
856 856 @annotatesubrepoerror
857 857 def revert(self, ui, substate, *pats, **opts):
858 858 # reverting a subrepo is a 2 step process:
859 859 # 1. if the no_backup is not set, revert all modified
860 860 # files inside the subrepo
861 861 # 2. update the subrepo to the revision specified in
862 862 # the corresponding substate dictionary
863 863 ui.status(_('reverting subrepo %s\n') % substate[0])
864 864 if not opts.get('no_backup'):
865 865 # Revert all files on the subrepo, creating backups
866 866 # Note that this will not recursively revert subrepos
867 867 # We could do it if there was a set:subrepos() predicate
868 868 opts = opts.copy()
869 869 opts['date'] = None
870 870 opts['rev'] = substate[1]
871 871
872 872 pats = []
873 873 if not opts.get('all'):
874 874 pats = ['set:modified()']
875 875 self.filerevert(ui, *pats, **opts)
876 876
877 877 # Update the repo to the revision specified in the given substate
878 878 self.get(substate, overwrite=True)
879 879
880 880 def filerevert(self, ui, *pats, **opts):
881 881 ctx = self._repo[opts['rev']]
882 882 parents = self._repo.dirstate.parents()
883 883 if opts.get('all'):
884 884 pats = ['set:modified()']
885 885 else:
886 886 pats = []
887 887 cmdutil.revert(ui, self._repo, ctx, parents, *pats, **opts)
888 888
889 889 def shortid(self, revid):
890 890 return revid[:12]
891 891
892 892 class svnsubrepo(abstractsubrepo):
893 893 def __init__(self, ctx, path, state):
894 894 self._path = path
895 895 self._state = state
896 896 self._ctx = ctx
897 897 self._ui = ctx._repo.ui
898 898 self._exe = util.findexe('svn')
899 899 if not self._exe:
900 900 raise util.Abort(_("'svn' executable not found for subrepo '%s'")
901 901 % self._path)
902 902
903 903 def _svncommand(self, commands, filename='', failok=False):
904 904 cmd = [self._exe]
905 905 extrakw = {}
906 906 if not self._ui.interactive():
907 907 # Making stdin be a pipe should prevent svn from behaving
908 908 # interactively even if we can't pass --non-interactive.
909 909 extrakw['stdin'] = subprocess.PIPE
910 910 # Starting in svn 1.5 --non-interactive is a global flag
911 911 # instead of being per-command, but we need to support 1.4 so
912 912 # we have to be intelligent about what commands take
913 913 # --non-interactive.
914 914 if commands[0] in ('update', 'checkout', 'commit'):
915 915 cmd.append('--non-interactive')
916 916 cmd.extend(commands)
917 917 if filename is not None:
918 918 path = os.path.join(self._ctx._repo.origroot, self._path, filename)
919 919 cmd.append(path)
920 920 env = dict(os.environ)
921 921 # Avoid localized output, preserve current locale for everything else.
922 922 lc_all = env.get('LC_ALL')
923 923 if lc_all:
924 924 env['LANG'] = lc_all
925 925 del env['LC_ALL']
926 926 env['LC_MESSAGES'] = 'C'
927 927 p = subprocess.Popen(cmd, bufsize=-1, close_fds=util.closefds,
928 928 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
929 929 universal_newlines=True, env=env, **extrakw)
930 930 stdout, stderr = p.communicate()
931 931 stderr = stderr.strip()
932 932 if not failok:
933 933 if p.returncode:
934 934 raise util.Abort(stderr or 'exited with code %d' % p.returncode)
935 935 if stderr:
936 936 self._ui.warn(stderr + '\n')
937 937 return stdout, stderr
938 938
939 939 @propertycache
940 940 def _svnversion(self):
941 941 output, err = self._svncommand(['--version', '--quiet'], filename=None)
942 942 m = re.search(r'^(\d+)\.(\d+)', output)
943 943 if not m:
944 944 raise util.Abort(_('cannot retrieve svn tool version'))
945 945 return (int(m.group(1)), int(m.group(2)))
946 946
947 947 def _wcrevs(self):
948 948 # Get the working directory revision as well as the last
949 949 # commit revision so we can compare the subrepo state with
950 950 # both. We used to store the working directory one.
951 951 output, err = self._svncommand(['info', '--xml'])
952 952 doc = xml.dom.minidom.parseString(output)
953 953 entries = doc.getElementsByTagName('entry')
954 954 lastrev, rev = '0', '0'
955 955 if entries:
956 956 rev = str(entries[0].getAttribute('revision')) or '0'
957 957 commits = entries[0].getElementsByTagName('commit')
958 958 if commits:
959 959 lastrev = str(commits[0].getAttribute('revision')) or '0'
960 960 return (lastrev, rev)
961 961
962 962 def _wcrev(self):
963 963 return self._wcrevs()[0]
964 964
965 965 def _wcchanged(self):
966 966 """Return (changes, extchanges, missing) where changes is True
967 967 if the working directory was changed, extchanges is
968 968 True if any of these changes concern an external entry and missing
969 969 is True if any change is a missing entry.
970 970 """
971 971 output, err = self._svncommand(['status', '--xml'])
972 972 externals, changes, missing = [], [], []
973 973 doc = xml.dom.minidom.parseString(output)
974 974 for e in doc.getElementsByTagName('entry'):
975 975 s = e.getElementsByTagName('wc-status')
976 976 if not s:
977 977 continue
978 978 item = s[0].getAttribute('item')
979 979 props = s[0].getAttribute('props')
980 980 path = e.getAttribute('path')
981 981 if item == 'external':
982 982 externals.append(path)
983 983 elif item == 'missing':
984 984 missing.append(path)
985 985 if (item not in ('', 'normal', 'unversioned', 'external')
986 986 or props not in ('', 'none', 'normal')):
987 987 changes.append(path)
988 988 for path in changes:
989 989 for ext in externals:
990 990 if path == ext or path.startswith(ext + os.sep):
991 991 return True, True, bool(missing)
992 992 return bool(changes), False, bool(missing)
993 993
994 994 def dirty(self, ignoreupdate=False):
995 995 if not self._wcchanged()[0]:
996 996 if self._state[1] in self._wcrevs() or ignoreupdate:
997 997 return False
998 998 return True
999 999
1000 1000 def basestate(self):
1001 1001 lastrev, rev = self._wcrevs()
1002 1002 if lastrev != rev:
1003 1003 # Last committed rev is not the same than rev. We would
1004 1004 # like to take lastrev but we do not know if the subrepo
1005 1005 # URL exists at lastrev. Test it and fallback to rev it
1006 1006 # is not there.
1007 1007 try:
1008 1008 self._svncommand(['list', '%s@%s' % (self._state[0], lastrev)])
1009 1009 return lastrev
1010 1010 except error.Abort:
1011 1011 pass
1012 1012 return rev
1013 1013
1014 1014 @annotatesubrepoerror
1015 1015 def commit(self, text, user, date):
1016 1016 # user and date are out of our hands since svn is centralized
1017 1017 changed, extchanged, missing = self._wcchanged()
1018 1018 if not changed:
1019 1019 return self.basestate()
1020 1020 if extchanged:
1021 1021 # Do not try to commit externals
1022 1022 raise util.Abort(_('cannot commit svn externals'))
1023 1023 if missing:
1024 1024 # svn can commit with missing entries but aborting like hg
1025 1025 # seems a better approach.
1026 1026 raise util.Abort(_('cannot commit missing svn entries'))
1027 1027 commitinfo, err = self._svncommand(['commit', '-m', text])
1028 1028 self._ui.status(commitinfo)
1029 1029 newrev = re.search('Committed revision ([0-9]+).', commitinfo)
1030 1030 if not newrev:
1031 1031 if not commitinfo.strip():
1032 1032 # Sometimes, our definition of "changed" differs from
1033 1033 # svn one. For instance, svn ignores missing files
1034 1034 # when committing. If there are only missing files, no
1035 1035 # commit is made, no output and no error code.
1036 1036 raise util.Abort(_('failed to commit svn changes'))
1037 1037 raise util.Abort(commitinfo.splitlines()[-1])
1038 1038 newrev = newrev.groups()[0]
1039 1039 self._ui.status(self._svncommand(['update', '-r', newrev])[0])
1040 1040 return newrev
1041 1041
1042 1042 @annotatesubrepoerror
1043 1043 def remove(self):
1044 1044 if self.dirty():
1045 1045 self._ui.warn(_('not removing repo %s because '
1046 1046 'it has changes.\n') % self._path)
1047 1047 return
1048 1048 self._ui.note(_('removing subrepo %s\n') % self._path)
1049 1049
1050 1050 def onerror(function, path, excinfo):
1051 1051 if function is not os.remove:
1052 1052 raise
1053 1053 # read-only files cannot be unlinked under Windows
1054 1054 s = os.stat(path)
1055 1055 if (s.st_mode & stat.S_IWRITE) != 0:
1056 1056 raise
1057 1057 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
1058 1058 os.remove(path)
1059 1059
1060 1060 path = self._ctx._repo.wjoin(self._path)
1061 1061 shutil.rmtree(path, onerror=onerror)
1062 1062 try:
1063 1063 os.removedirs(os.path.dirname(path))
1064 1064 except OSError:
1065 1065 pass
1066 1066
1067 1067 @annotatesubrepoerror
1068 1068 def get(self, state, overwrite=False):
1069 1069 if overwrite:
1070 1070 self._svncommand(['revert', '--recursive'])
1071 1071 args = ['checkout']
1072 1072 if self._svnversion >= (1, 5):
1073 1073 args.append('--force')
1074 1074 # The revision must be specified at the end of the URL to properly
1075 1075 # update to a directory which has since been deleted and recreated.
1076 1076 args.append('%s@%s' % (state[0], state[1]))
1077 1077 status, err = self._svncommand(args, failok=True)
1078 1078 _sanitize(self._ui, self._ctx._repo.wjoin(self._path), '.svn')
1079 1079 if not re.search('Checked out revision [0-9]+.', status):
1080 1080 if ('is already a working copy for a different URL' in err
1081 1081 and (self._wcchanged()[:2] == (False, False))):
1082 1082 # obstructed but clean working copy, so just blow it away.
1083 1083 self.remove()
1084 1084 self.get(state, overwrite=False)
1085 1085 return
1086 1086 raise util.Abort((status or err).splitlines()[-1])
1087 1087 self._ui.status(status)
1088 1088
1089 1089 @annotatesubrepoerror
1090 1090 def merge(self, state):
1091 1091 old = self._state[1]
1092 1092 new = state[1]
1093 1093 wcrev = self._wcrev()
1094 1094 if new != wcrev:
1095 1095 dirty = old == wcrev or self._wcchanged()[0]
1096 1096 if _updateprompt(self._ui, self, dirty, wcrev, new):
1097 1097 self.get(state, False)
1098 1098
1099 1099 def push(self, opts):
1100 1100 # push is a no-op for SVN
1101 1101 return True
1102 1102
1103 1103 @annotatesubrepoerror
1104 1104 def files(self):
1105 1105 output = self._svncommand(['list', '--recursive', '--xml'])[0]
1106 1106 doc = xml.dom.minidom.parseString(output)
1107 1107 paths = []
1108 1108 for e in doc.getElementsByTagName('entry'):
1109 1109 kind = str(e.getAttribute('kind'))
1110 1110 if kind != 'file':
1111 1111 continue
1112 1112 name = ''.join(c.data for c
1113 1113 in e.getElementsByTagName('name')[0].childNodes
1114 1114 if c.nodeType == c.TEXT_NODE)
1115 1115 paths.append(name.encode('utf-8'))
1116 1116 return paths
1117 1117
1118 1118 def filedata(self, name):
1119 1119 return self._svncommand(['cat'], name)[0]
1120 1120
1121 1121
1122 1122 class gitsubrepo(abstractsubrepo):
1123 1123 def __init__(self, ctx, path, state):
1124 1124 self._state = state
1125 1125 self._ctx = ctx
1126 1126 self._path = path
1127 1127 self._relpath = os.path.join(reporelpath(ctx._repo), path)
1128 1128 self._abspath = ctx._repo.wjoin(path)
1129 1129 self._subparent = ctx._repo
1130 1130 self._ui = ctx._repo.ui
1131 1131 self._ensuregit()
1132 1132
1133 1133 def _ensuregit(self):
1134 1134 try:
1135 1135 self._gitexecutable = 'git'
1136 1136 out, err = self._gitnodir(['--version'])
1137 1137 except OSError, e:
1138 1138 if e.errno != 2 or os.name != 'nt':
1139 1139 raise
1140 1140 self._gitexecutable = 'git.cmd'
1141 1141 out, err = self._gitnodir(['--version'])
1142 1142 versionstatus = self._checkversion(out)
1143 1143 if versionstatus == 'unknown':
1144 1144 self._ui.warn(_('cannot retrieve git version\n'))
1145 1145 elif versionstatus == 'abort':
1146 1146 raise util.Abort(_('git subrepo requires at least 1.6.0 or later'))
1147 1147 elif versionstatus == 'warning':
1148 1148 self._ui.warn(_('git subrepo requires at least 1.6.0 or later\n'))
1149 1149
1150 1150 @staticmethod
1151 1151 def _checkversion(out):
1152 1152 '''ensure git version is new enough
1153 1153
1154 1154 >>> _checkversion = gitsubrepo._checkversion
1155 1155 >>> _checkversion('git version 1.6.0')
1156 1156 'ok'
1157 1157 >>> _checkversion('git version 1.8.5')
1158 1158 'ok'
1159 1159 >>> _checkversion('git version 1.4.0')
1160 1160 'abort'
1161 1161 >>> _checkversion('git version 1.5.0')
1162 1162 'warning'
1163 1163 >>> _checkversion('git version 1.9-rc0')
1164 1164 'ok'
1165 1165 >>> _checkversion('git version 1.9.0.265.g81cdec2')
1166 1166 'ok'
1167 1167 >>> _checkversion('git version 1.9.0.GIT')
1168 1168 'ok'
1169 1169 >>> _checkversion('git version 12345')
1170 1170 'unknown'
1171 1171 >>> _checkversion('no')
1172 1172 'unknown'
1173 1173 '''
1174 1174 m = re.search(r'^git version (\d+)\.(\d+)', out)
1175 1175 if not m:
1176 1176 return 'unknown'
1177 1177 version = (int(m.group(1)), int(m.group(2)))
1178 1178 # git 1.4.0 can't work at all, but 1.5.X can in at least some cases,
1179 1179 # despite the docstring comment. For now, error on 1.4.0, warn on
1180 1180 # 1.5.0 but attempt to continue.
1181 1181 if version < (1, 5):
1182 1182 return 'abort'
1183 1183 elif version < (1, 6):
1184 1184 return 'warning'
1185 1185 return 'ok'
1186 1186
1187 1187 def _gitcommand(self, commands, env=None, stream=False):
1188 1188 return self._gitdir(commands, env=env, stream=stream)[0]
1189 1189
1190 1190 def _gitdir(self, commands, env=None, stream=False):
1191 1191 return self._gitnodir(commands, env=env, stream=stream,
1192 1192 cwd=self._abspath)
1193 1193
1194 1194 def _gitnodir(self, commands, env=None, stream=False, cwd=None):
1195 1195 """Calls the git command
1196 1196
1197 1197 The methods tries to call the git command. versions prior to 1.6.0
1198 1198 are not supported and very probably fail.
1199 1199 """
1200 1200 self._ui.debug('%s: git %s\n' % (self._relpath, ' '.join(commands)))
1201 1201 # unless ui.quiet is set, print git's stderr,
1202 1202 # which is mostly progress and useful info
1203 1203 errpipe = None
1204 1204 if self._ui.quiet:
1205 1205 errpipe = open(os.devnull, 'w')
1206 1206 p = subprocess.Popen([self._gitexecutable] + commands, bufsize=-1,
1207 1207 cwd=cwd, env=env, close_fds=util.closefds,
1208 1208 stdout=subprocess.PIPE, stderr=errpipe)
1209 1209 if stream:
1210 1210 return p.stdout, None
1211 1211
1212 1212 retdata = p.stdout.read().strip()
1213 1213 # wait for the child to exit to avoid race condition.
1214 1214 p.wait()
1215 1215
1216 1216 if p.returncode != 0 and p.returncode != 1:
1217 1217 # there are certain error codes that are ok
1218 1218 command = commands[0]
1219 1219 if command in ('cat-file', 'symbolic-ref'):
1220 1220 return retdata, p.returncode
1221 1221 # for all others, abort
1222 1222 raise util.Abort('git %s error %d in %s' %
1223 1223 (command, p.returncode, self._relpath))
1224 1224
1225 1225 return retdata, p.returncode
1226 1226
1227 1227 def _gitmissing(self):
1228 1228 return not os.path.exists(os.path.join(self._abspath, '.git'))
1229 1229
1230 1230 def _gitstate(self):
1231 1231 return self._gitcommand(['rev-parse', 'HEAD'])
1232 1232
1233 1233 def _gitcurrentbranch(self):
1234 1234 current, err = self._gitdir(['symbolic-ref', 'HEAD', '--quiet'])
1235 1235 if err:
1236 1236 current = None
1237 1237 return current
1238 1238
1239 1239 def _gitremote(self, remote):
1240 1240 out = self._gitcommand(['remote', 'show', '-n', remote])
1241 1241 line = out.split('\n')[1]
1242 1242 i = line.index('URL: ') + len('URL: ')
1243 1243 return line[i:]
1244 1244
1245 1245 def _githavelocally(self, revision):
1246 1246 out, code = self._gitdir(['cat-file', '-e', revision])
1247 1247 return code == 0
1248 1248
1249 1249 def _gitisancestor(self, r1, r2):
1250 1250 base = self._gitcommand(['merge-base', r1, r2])
1251 1251 return base == r1
1252 1252
1253 1253 def _gitisbare(self):
1254 1254 return self._gitcommand(['config', '--bool', 'core.bare']) == 'true'
1255 1255
1256 1256 def _gitupdatestat(self):
1257 1257 """This must be run before git diff-index.
1258 1258 diff-index only looks at changes to file stat;
1259 1259 this command looks at file contents and updates the stat."""
1260 1260 self._gitcommand(['update-index', '-q', '--refresh'])
1261 1261
1262 1262 def _gitbranchmap(self):
1263 1263 '''returns 2 things:
1264 1264 a map from git branch to revision
1265 1265 a map from revision to branches'''
1266 1266 branch2rev = {}
1267 1267 rev2branch = {}
1268 1268
1269 1269 out = self._gitcommand(['for-each-ref', '--format',
1270 1270 '%(objectname) %(refname)'])
1271 1271 for line in out.split('\n'):
1272 1272 revision, ref = line.split(' ')
1273 1273 if (not ref.startswith('refs/heads/') and
1274 1274 not ref.startswith('refs/remotes/')):
1275 1275 continue
1276 1276 if ref.startswith('refs/remotes/') and ref.endswith('/HEAD'):
1277 1277 continue # ignore remote/HEAD redirects
1278 1278 branch2rev[ref] = revision
1279 1279 rev2branch.setdefault(revision, []).append(ref)
1280 1280 return branch2rev, rev2branch
1281 1281
1282 1282 def _gittracking(self, branches):
1283 1283 'return map of remote branch to local tracking branch'
1284 1284 # assumes no more than one local tracking branch for each remote
1285 1285 tracking = {}
1286 1286 for b in branches:
1287 1287 if b.startswith('refs/remotes/'):
1288 1288 continue
1289 1289 bname = b.split('/', 2)[2]
1290 1290 remote = self._gitcommand(['config', 'branch.%s.remote' % bname])
1291 1291 if remote:
1292 1292 ref = self._gitcommand(['config', 'branch.%s.merge' % bname])
1293 1293 tracking['refs/remotes/%s/%s' %
1294 1294 (remote, ref.split('/', 2)[2])] = b
1295 1295 return tracking
1296 1296
1297 1297 def _abssource(self, source):
1298 1298 if '://' not in source:
1299 1299 # recognize the scp syntax as an absolute source
1300 1300 colon = source.find(':')
1301 1301 if colon != -1 and '/' not in source[:colon]:
1302 1302 return source
1303 1303 self._subsource = source
1304 1304 return _abssource(self)
1305 1305
1306 1306 def _fetch(self, source, revision):
1307 1307 if self._gitmissing():
1308 1308 source = self._abssource(source)
1309 1309 self._ui.status(_('cloning subrepo %s from %s\n') %
1310 1310 (self._relpath, source))
1311 1311 self._gitnodir(['clone', source, self._abspath])
1312 1312 if self._githavelocally(revision):
1313 1313 return
1314 1314 self._ui.status(_('pulling subrepo %s from %s\n') %
1315 1315 (self._relpath, self._gitremote('origin')))
1316 1316 # try only origin: the originally cloned repo
1317 1317 self._gitcommand(['fetch'])
1318 1318 if not self._githavelocally(revision):
1319 1319 raise util.Abort(_("revision %s does not exist in subrepo %s\n") %
1320 1320 (revision, self._relpath))
1321 1321
1322 1322 @annotatesubrepoerror
1323 1323 def dirty(self, ignoreupdate=False):
1324 1324 if self._gitmissing():
1325 1325 return self._state[1] != ''
1326 1326 if self._gitisbare():
1327 1327 return True
1328 1328 if not ignoreupdate and self._state[1] != self._gitstate():
1329 1329 # different version checked out
1330 1330 return True
1331 1331 # check for staged changes or modified files; ignore untracked files
1332 1332 self._gitupdatestat()
1333 1333 out, code = self._gitdir(['diff-index', '--quiet', 'HEAD'])
1334 1334 return code == 1
1335 1335
1336 1336 def basestate(self):
1337 1337 return self._gitstate()
1338 1338
1339 1339 @annotatesubrepoerror
1340 1340 def get(self, state, overwrite=False):
1341 1341 source, revision, kind = state
1342 1342 if not revision:
1343 1343 self.remove()
1344 1344 return
1345 1345 self._fetch(source, revision)
1346 1346 # if the repo was set to be bare, unbare it
1347 1347 if self._gitisbare():
1348 1348 self._gitcommand(['config', 'core.bare', 'false'])
1349 1349 if self._gitstate() == revision:
1350 1350 self._gitcommand(['reset', '--hard', 'HEAD'])
1351 1351 return
1352 1352 elif self._gitstate() == revision:
1353 1353 if overwrite:
1354 1354 # first reset the index to unmark new files for commit, because
1355 1355 # reset --hard will otherwise throw away files added for commit,
1356 1356 # not just unmark them.
1357 1357 self._gitcommand(['reset', 'HEAD'])
1358 1358 self._gitcommand(['reset', '--hard', 'HEAD'])
1359 1359 return
1360 1360 branch2rev, rev2branch = self._gitbranchmap()
1361 1361
1362 1362 def checkout(args):
1363 1363 cmd = ['checkout']
1364 1364 if overwrite:
1365 1365 # first reset the index to unmark new files for commit, because
1366 1366 # the -f option will otherwise throw away files added for
1367 1367 # commit, not just unmark them.
1368 1368 self._gitcommand(['reset', 'HEAD'])
1369 1369 cmd.append('-f')
1370 1370 self._gitcommand(cmd + args)
1371 1371 _sanitize(self._ui, self._abspath, '.git')
1372 1372
1373 1373 def rawcheckout():
1374 1374 # no branch to checkout, check it out with no branch
1375 1375 self._ui.warn(_('checking out detached HEAD in subrepo %s\n') %
1376 1376 self._relpath)
1377 1377 self._ui.warn(_('check out a git branch if you intend '
1378 1378 'to make changes\n'))
1379 1379 checkout(['-q', revision])
1380 1380
1381 1381 if revision not in rev2branch:
1382 1382 rawcheckout()
1383 1383 return
1384 1384 branches = rev2branch[revision]
1385 1385 firstlocalbranch = None
1386 1386 for b in branches:
1387 1387 if b == 'refs/heads/master':
1388 1388 # master trumps all other branches
1389 1389 checkout(['refs/heads/master'])
1390 1390 return
1391 1391 if not firstlocalbranch and not b.startswith('refs/remotes/'):
1392 1392 firstlocalbranch = b
1393 1393 if firstlocalbranch:
1394 1394 checkout([firstlocalbranch])
1395 1395 return
1396 1396
1397 1397 tracking = self._gittracking(branch2rev.keys())
1398 1398 # choose a remote branch already tracked if possible
1399 1399 remote = branches[0]
1400 1400 if remote not in tracking:
1401 1401 for b in branches:
1402 1402 if b in tracking:
1403 1403 remote = b
1404 1404 break
1405 1405
1406 1406 if remote not in tracking:
1407 1407 # create a new local tracking branch
1408 1408 local = remote.split('/', 3)[3]
1409 1409 checkout(['-b', local, remote])
1410 1410 elif self._gitisancestor(branch2rev[tracking[remote]], remote):
1411 1411 # When updating to a tracked remote branch,
1412 1412 # if the local tracking branch is downstream of it,
1413 1413 # a normal `git pull` would have performed a "fast-forward merge"
1414 1414 # which is equivalent to updating the local branch to the remote.
1415 1415 # Since we are only looking at branching at update, we need to
1416 1416 # detect this situation and perform this action lazily.
1417 1417 if tracking[remote] != self._gitcurrentbranch():
1418 1418 checkout([tracking[remote]])
1419 1419 self._gitcommand(['merge', '--ff', remote])
1420 1420 _sanitize(self._ui, self._abspath, '.git')
1421 1421 else:
1422 1422 # a real merge would be required, just checkout the revision
1423 1423 rawcheckout()
1424 1424
1425 1425 @annotatesubrepoerror
1426 1426 def commit(self, text, user, date):
1427 1427 if self._gitmissing():
1428 1428 raise util.Abort(_("subrepo %s is missing") % self._relpath)
1429 1429 cmd = ['commit', '-a', '-m', text]
1430 1430 env = os.environ.copy()
1431 1431 if user:
1432 1432 cmd += ['--author', user]
1433 1433 if date:
1434 1434 # git's date parser silently ignores when seconds < 1e9
1435 1435 # convert to ISO8601
1436 1436 env['GIT_AUTHOR_DATE'] = util.datestr(date,
1437 1437 '%Y-%m-%dT%H:%M:%S %1%2')
1438 1438 self._gitcommand(cmd, env=env)
1439 1439 # make sure commit works otherwise HEAD might not exist under certain
1440 1440 # circumstances
1441 1441 return self._gitstate()
1442 1442
1443 1443 @annotatesubrepoerror
1444 1444 def merge(self, state):
1445 1445 source, revision, kind = state
1446 1446 self._fetch(source, revision)
1447 1447 base = self._gitcommand(['merge-base', revision, self._state[1]])
1448 1448 self._gitupdatestat()
1449 1449 out, code = self._gitdir(['diff-index', '--quiet', 'HEAD'])
1450 1450
1451 1451 def mergefunc():
1452 1452 if base == revision:
1453 1453 self.get(state) # fast forward merge
1454 1454 elif base != self._state[1]:
1455 1455 self._gitcommand(['merge', '--no-commit', revision])
1456 1456 _sanitize(self._ui, self._abspath, '.git')
1457 1457
1458 1458 if self.dirty():
1459 1459 if self._gitstate() != revision:
1460 1460 dirty = self._gitstate() == self._state[1] or code != 0
1461 1461 if _updateprompt(self._ui, self, dirty,
1462 1462 self._state[1][:7], revision[:7]):
1463 1463 mergefunc()
1464 1464 else:
1465 1465 mergefunc()
1466 1466
1467 1467 @annotatesubrepoerror
1468 1468 def push(self, opts):
1469 1469 force = opts.get('force')
1470 1470
1471 1471 if not self._state[1]:
1472 1472 return True
1473 1473 if self._gitmissing():
1474 1474 raise util.Abort(_("subrepo %s is missing") % self._relpath)
1475 1475 # if a branch in origin contains the revision, nothing to do
1476 1476 branch2rev, rev2branch = self._gitbranchmap()
1477 1477 if self._state[1] in rev2branch:
1478 1478 for b in rev2branch[self._state[1]]:
1479 1479 if b.startswith('refs/remotes/origin/'):
1480 1480 return True
1481 1481 for b, revision in branch2rev.iteritems():
1482 1482 if b.startswith('refs/remotes/origin/'):
1483 1483 if self._gitisancestor(self._state[1], revision):
1484 1484 return True
1485 1485 # otherwise, try to push the currently checked out branch
1486 1486 cmd = ['push']
1487 1487 if force:
1488 1488 cmd.append('--force')
1489 1489
1490 1490 current = self._gitcurrentbranch()
1491 1491 if current:
1492 1492 # determine if the current branch is even useful
1493 1493 if not self._gitisancestor(self._state[1], current):
1494 1494 self._ui.warn(_('unrelated git branch checked out '
1495 1495 'in subrepo %s\n') % self._relpath)
1496 1496 return False
1497 1497 self._ui.status(_('pushing branch %s of subrepo %s\n') %
1498 1498 (current.split('/', 2)[2], self._relpath))
1499 1499 ret = self._gitdir(cmd + ['origin', current])
1500 1500 return ret[1] == 0
1501 1501 else:
1502 1502 self._ui.warn(_('no branch checked out in subrepo %s\n'
1503 1503 'cannot push revision %s\n') %
1504 1504 (self._relpath, self._state[1]))
1505 1505 return False
1506 1506
1507 1507 @annotatesubrepoerror
1508 1508 def remove(self):
1509 1509 if self._gitmissing():
1510 1510 return
1511 1511 if self.dirty():
1512 1512 self._ui.warn(_('not removing repo %s because '
1513 1513 'it has changes.\n') % self._relpath)
1514 1514 return
1515 1515 # we can't fully delete the repository as it may contain
1516 1516 # local-only history
1517 1517 self._ui.note(_('removing subrepo %s\n') % self._relpath)
1518 1518 self._gitcommand(['config', 'core.bare', 'true'])
1519 1519 for f in os.listdir(self._abspath):
1520 1520 if f == '.git':
1521 1521 continue
1522 1522 path = os.path.join(self._abspath, f)
1523 1523 if os.path.isdir(path) and not os.path.islink(path):
1524 1524 shutil.rmtree(path)
1525 1525 else:
1526 1526 os.remove(path)
1527 1527
1528 1528 def archive(self, ui, archiver, prefix, match=None):
1529 1529 total = 0
1530 1530 source, revision = self._state
1531 1531 if not revision:
1532 1532 return total
1533 1533 self._fetch(source, revision)
1534 1534
1535 1535 # Parse git's native archive command.
1536 1536 # This should be much faster than manually traversing the trees
1537 1537 # and objects with many subprocess calls.
1538 1538 tarstream = self._gitcommand(['archive', revision], stream=True)
1539 1539 tar = tarfile.open(fileobj=tarstream, mode='r|')
1540 1540 relpath = subrelpath(self)
1541 1541 ui.progress(_('archiving (%s)') % relpath, 0, unit=_('files'))
1542 1542 for i, info in enumerate(tar):
1543 1543 if info.isdir():
1544 1544 continue
1545 1545 if match and not match(info.name):
1546 1546 continue
1547 1547 if info.issym():
1548 1548 data = info.linkname
1549 1549 else:
1550 1550 data = tar.extractfile(info).read()
1551 1551 archiver.addfile(os.path.join(prefix, self._path, info.name),
1552 1552 info.mode, info.issym(), data)
1553 1553 total += 1
1554 1554 ui.progress(_('archiving (%s)') % relpath, i + 1,
1555 1555 unit=_('files'))
1556 1556 ui.progress(_('archiving (%s)') % relpath, None)
1557 1557 return total
1558 1558
1559 1559
1560 1560 @annotatesubrepoerror
1561 1561 def status(self, rev2, **opts):
1562 1562 rev1 = self._state[1]
1563 1563 if self._gitmissing() or not rev1:
1564 1564 # if the repo is missing, return no results
1565 1565 return [], [], [], [], [], [], []
1566 1566 modified, added, removed = [], [], []
1567 1567 self._gitupdatestat()
1568 1568 if rev2:
1569 1569 command = ['diff-tree', rev1, rev2]
1570 1570 else:
1571 1571 command = ['diff-index', rev1]
1572 1572 out = self._gitcommand(command)
1573 1573 for line in out.split('\n'):
1574 1574 tab = line.find('\t')
1575 1575 if tab == -1:
1576 1576 continue
1577 1577 status, f = line[tab - 1], line[tab + 1:]
1578 1578 if status == 'M':
1579 1579 modified.append(f)
1580 1580 elif status == 'A':
1581 1581 added.append(f)
1582 1582 elif status == 'D':
1583 1583 removed.append(f)
1584 1584
1585 1585 deleted = unknown = ignored = clean = []
1586 return modified, added, removed, deleted, unknown, ignored, clean
1586 return scmutil.status(modified, added, removed, deleted,
1587 unknown, ignored, clean)
1587 1588
1588 1589 def shortid(self, revid):
1589 1590 return revid[:7]
1590 1591
1591 1592 types = {
1592 1593 'hg': hgsubrepo,
1593 1594 'svn': svnsubrepo,
1594 1595 'git': gitsubrepo,
1595 1596 }
@@ -1,13 +1,13 b''
1 1 workingfilectx.date = (1000, 0)
2 2 ASCII : Gr?ezi!
3 3 Latin-1 : Gr�ezi!
4 4 UTF-8 : Grüezi!
5 (['foo'], [], [], [], [], [], [])
5 <status modified=['foo'], added=[], removed=[], deleted=[], unknown=[], ignored=[], clean=[]>
6 6 diff --git a/foo b/foo
7 7
8 8 --- a/foo
9 9 +++ b/foo
10 10 @@ -1,1 +1,2 @@
11 11 foo
12 12 +bar
13 13
General Comments 0
You need to be logged in to leave comments. Login now