##// END OF EJS Templates
match: rename _fmap to _fileroots for clarity...
Drew Gottlieb -
r25189:1c8c33ea default
parent child Browse files
Show More
@@ -1,1372 +1,1372
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10 10
11 11 import os
12 12 import copy
13 13
14 14 from mercurial import hg, util, cmdutil, scmutil, match as match_, \
15 15 archival, pathutil, revset
16 16 from mercurial.i18n import _
17 17
18 18 import lfutil
19 19 import lfcommands
20 20 import basestore
21 21
22 22 # -- Utility functions: commonly/repeatedly needed functionality ---------------
23 23
24 24 def composelargefilematcher(match, manifest):
25 25 '''create a matcher that matches only the largefiles in the original
26 26 matcher'''
27 27 m = copy.copy(match)
28 28 lfile = lambda f: lfutil.standin(f) in manifest
29 29 m._files = filter(lfile, m._files)
30 m._fmap = set(m._files)
30 m._fileroots = set(m._files)
31 31 m._always = False
32 32 origmatchfn = m.matchfn
33 33 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
34 34 return m
35 35
36 36 def composenormalfilematcher(match, manifest, exclude=None):
37 37 excluded = set()
38 38 if exclude is not None:
39 39 excluded.update(exclude)
40 40
41 41 m = copy.copy(match)
42 42 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
43 43 manifest or f in excluded)
44 44 m._files = filter(notlfile, m._files)
45 m._fmap = set(m._files)
45 m._fileroots = set(m._files)
46 46 m._always = False
47 47 origmatchfn = m.matchfn
48 48 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
49 49 return m
50 50
51 51 def installnormalfilesmatchfn(manifest):
52 52 '''installmatchfn with a matchfn that ignores all largefiles'''
53 53 def overridematch(ctx, pats=[], opts={}, globbed=False,
54 54 default='relpath'):
55 55 match = oldmatch(ctx, pats, opts, globbed, default)
56 56 return composenormalfilematcher(match, manifest)
57 57 oldmatch = installmatchfn(overridematch)
58 58
59 59 def installmatchfn(f):
60 60 '''monkey patch the scmutil module with a custom match function.
61 61 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
62 62 oldmatch = scmutil.match
63 63 setattr(f, 'oldmatch', oldmatch)
64 64 scmutil.match = f
65 65 return oldmatch
66 66
67 67 def restorematchfn():
68 68 '''restores scmutil.match to what it was before installmatchfn
69 69 was called. no-op if scmutil.match is its original function.
70 70
71 71 Note that n calls to installmatchfn will require n calls to
72 72 restore the original matchfn.'''
73 73 scmutil.match = getattr(scmutil.match, 'oldmatch')
74 74
75 75 def installmatchandpatsfn(f):
76 76 oldmatchandpats = scmutil.matchandpats
77 77 setattr(f, 'oldmatchandpats', oldmatchandpats)
78 78 scmutil.matchandpats = f
79 79 return oldmatchandpats
80 80
81 81 def restorematchandpatsfn():
82 82 '''restores scmutil.matchandpats to what it was before
83 83 installmatchandpatsfn was called. No-op if scmutil.matchandpats
84 84 is its original function.
85 85
86 86 Note that n calls to installmatchandpatsfn will require n calls
87 87 to restore the original matchfn.'''
88 88 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
89 89 scmutil.matchandpats)
90 90
91 91 def addlargefiles(ui, repo, isaddremove, matcher, **opts):
92 92 large = opts.get('large')
93 93 lfsize = lfutil.getminsize(
94 94 ui, lfutil.islfilesrepo(repo), opts.get('lfsize'))
95 95
96 96 lfmatcher = None
97 97 if lfutil.islfilesrepo(repo):
98 98 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
99 99 if lfpats:
100 100 lfmatcher = match_.match(repo.root, '', list(lfpats))
101 101
102 102 lfnames = []
103 103 m = copy.copy(matcher)
104 104 m.bad = lambda x, y: None
105 105 wctx = repo[None]
106 106 for f in repo.walk(m):
107 107 exact = m.exact(f)
108 108 lfile = lfutil.standin(f) in wctx
109 109 nfile = f in wctx
110 110 exists = lfile or nfile
111 111
112 112 # addremove in core gets fancy with the name, add doesn't
113 113 if isaddremove:
114 114 name = m.uipath(f)
115 115 else:
116 116 name = m.rel(f)
117 117
118 118 # Don't warn the user when they attempt to add a normal tracked file.
119 119 # The normal add code will do that for us.
120 120 if exact and exists:
121 121 if lfile:
122 122 ui.warn(_('%s already a largefile\n') % name)
123 123 continue
124 124
125 125 if (exact or not exists) and not lfutil.isstandin(f):
126 126 # In case the file was removed previously, but not committed
127 127 # (issue3507)
128 128 if not repo.wvfs.exists(f):
129 129 continue
130 130
131 131 abovemin = (lfsize and
132 132 repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024)
133 133 if large or abovemin or (lfmatcher and lfmatcher(f)):
134 134 lfnames.append(f)
135 135 if ui.verbose or not exact:
136 136 ui.status(_('adding %s as a largefile\n') % name)
137 137
138 138 bad = []
139 139
140 140 # Need to lock, otherwise there could be a race condition between
141 141 # when standins are created and added to the repo.
142 142 wlock = repo.wlock()
143 143 try:
144 144 if not opts.get('dry_run'):
145 145 standins = []
146 146 lfdirstate = lfutil.openlfdirstate(ui, repo)
147 147 for f in lfnames:
148 148 standinname = lfutil.standin(f)
149 149 lfutil.writestandin(repo, standinname, hash='',
150 150 executable=lfutil.getexecutable(repo.wjoin(f)))
151 151 standins.append(standinname)
152 152 if lfdirstate[f] == 'r':
153 153 lfdirstate.normallookup(f)
154 154 else:
155 155 lfdirstate.add(f)
156 156 lfdirstate.write()
157 157 bad += [lfutil.splitstandin(f)
158 158 for f in repo[None].add(standins)
159 159 if f in m.files()]
160 160
161 161 added = [f for f in lfnames if f not in bad]
162 162 finally:
163 163 wlock.release()
164 164 return added, bad
165 165
166 166 def removelargefiles(ui, repo, isaddremove, matcher, **opts):
167 167 after = opts.get('after')
168 168 m = composelargefilematcher(matcher, repo[None].manifest())
169 169 try:
170 170 repo.lfstatus = True
171 171 s = repo.status(match=m, clean=not isaddremove)
172 172 finally:
173 173 repo.lfstatus = False
174 174 manifest = repo[None].manifest()
175 175 modified, added, deleted, clean = [[f for f in list
176 176 if lfutil.standin(f) in manifest]
177 177 for list in (s.modified, s.added,
178 178 s.deleted, s.clean)]
179 179
180 180 def warn(files, msg):
181 181 for f in files:
182 182 ui.warn(msg % m.rel(f))
183 183 return int(len(files) > 0)
184 184
185 185 result = 0
186 186
187 187 if after:
188 188 remove = deleted
189 189 result = warn(modified + added + clean,
190 190 _('not removing %s: file still exists\n'))
191 191 else:
192 192 remove = deleted + clean
193 193 result = warn(modified, _('not removing %s: file is modified (use -f'
194 194 ' to force removal)\n'))
195 195 result = warn(added, _('not removing %s: file has been marked for add'
196 196 ' (use forget to undo)\n')) or result
197 197
198 198 # Need to lock because standin files are deleted then removed from the
199 199 # repository and we could race in-between.
200 200 wlock = repo.wlock()
201 201 try:
202 202 lfdirstate = lfutil.openlfdirstate(ui, repo)
203 203 for f in sorted(remove):
204 204 if ui.verbose or not m.exact(f):
205 205 # addremove in core gets fancy with the name, remove doesn't
206 206 if isaddremove:
207 207 name = m.uipath(f)
208 208 else:
209 209 name = m.rel(f)
210 210 ui.status(_('removing %s\n') % name)
211 211
212 212 if not opts.get('dry_run'):
213 213 if not after:
214 214 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
215 215
216 216 if opts.get('dry_run'):
217 217 return result
218 218
219 219 remove = [lfutil.standin(f) for f in remove]
220 220 # If this is being called by addremove, let the original addremove
221 221 # function handle this.
222 222 if not isaddremove:
223 223 for f in remove:
224 224 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
225 225 repo[None].forget(remove)
226 226
227 227 for f in remove:
228 228 lfutil.synclfdirstate(repo, lfdirstate, lfutil.splitstandin(f),
229 229 False)
230 230
231 231 lfdirstate.write()
232 232 finally:
233 233 wlock.release()
234 234
235 235 return result
236 236
237 237 # For overriding mercurial.hgweb.webcommands so that largefiles will
238 238 # appear at their right place in the manifests.
239 239 def decodepath(orig, path):
240 240 return lfutil.splitstandin(path) or path
241 241
242 242 # -- Wrappers: modify existing commands --------------------------------
243 243
244 244 def overrideadd(orig, ui, repo, *pats, **opts):
245 245 if opts.get('normal') and opts.get('large'):
246 246 raise util.Abort(_('--normal cannot be used with --large'))
247 247 return orig(ui, repo, *pats, **opts)
248 248
249 249 def cmdutiladd(orig, ui, repo, matcher, prefix, explicitonly, **opts):
250 250 # The --normal flag short circuits this override
251 251 if opts.get('normal'):
252 252 return orig(ui, repo, matcher, prefix, explicitonly, **opts)
253 253
254 254 ladded, lbad = addlargefiles(ui, repo, False, matcher, **opts)
255 255 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest(),
256 256 ladded)
257 257 bad = orig(ui, repo, normalmatcher, prefix, explicitonly, **opts)
258 258
259 259 bad.extend(f for f in lbad)
260 260 return bad
261 261
262 262 def cmdutilremove(orig, ui, repo, matcher, prefix, after, force, subrepos):
263 263 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
264 264 result = orig(ui, repo, normalmatcher, prefix, after, force, subrepos)
265 265 return removelargefiles(ui, repo, False, matcher, after=after,
266 266 force=force) or result
267 267
268 268 def overridestatusfn(orig, repo, rev2, **opts):
269 269 try:
270 270 repo._repo.lfstatus = True
271 271 return orig(repo, rev2, **opts)
272 272 finally:
273 273 repo._repo.lfstatus = False
274 274
275 275 def overridestatus(orig, ui, repo, *pats, **opts):
276 276 try:
277 277 repo.lfstatus = True
278 278 return orig(ui, repo, *pats, **opts)
279 279 finally:
280 280 repo.lfstatus = False
281 281
282 282 def overridedirty(orig, repo, ignoreupdate=False):
283 283 try:
284 284 repo._repo.lfstatus = True
285 285 return orig(repo, ignoreupdate)
286 286 finally:
287 287 repo._repo.lfstatus = False
288 288
289 289 def overridelog(orig, ui, repo, *pats, **opts):
290 290 def overridematchandpats(ctx, pats=[], opts={}, globbed=False,
291 291 default='relpath'):
292 292 """Matcher that merges root directory with .hglf, suitable for log.
293 293 It is still possible to match .hglf directly.
294 294 For any listed files run log on the standin too.
295 295 matchfn tries both the given filename and with .hglf stripped.
296 296 """
297 297 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default)
298 298 m, p = copy.copy(matchandpats)
299 299
300 300 if m.always():
301 301 # We want to match everything anyway, so there's no benefit trying
302 302 # to add standins.
303 303 return matchandpats
304 304
305 305 pats = set(p)
306 306
307 307 def fixpats(pat, tostandin=lfutil.standin):
308 308 if pat.startswith('set:'):
309 309 return pat
310 310
311 311 kindpat = match_._patsplit(pat, None)
312 312
313 313 if kindpat[0] is not None:
314 314 return kindpat[0] + ':' + tostandin(kindpat[1])
315 315 return tostandin(kindpat[1])
316 316
317 317 if m._cwd:
318 318 hglf = lfutil.shortname
319 319 back = util.pconvert(m.rel(hglf)[:-len(hglf)])
320 320
321 321 def tostandin(f):
322 322 # The file may already be a standin, so trucate the back
323 323 # prefix and test before mangling it. This avoids turning
324 324 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
325 325 if f.startswith(back) and lfutil.splitstandin(f[len(back):]):
326 326 return f
327 327
328 328 # An absolute path is from outside the repo, so truncate the
329 329 # path to the root before building the standin. Otherwise cwd
330 330 # is somewhere in the repo, relative to root, and needs to be
331 331 # prepended before building the standin.
332 332 if os.path.isabs(m._cwd):
333 333 f = f[len(back):]
334 334 else:
335 335 f = m._cwd + '/' + f
336 336 return back + lfutil.standin(f)
337 337
338 338 pats.update(fixpats(f, tostandin) for f in p)
339 339 else:
340 340 def tostandin(f):
341 341 if lfutil.splitstandin(f):
342 342 return f
343 343 return lfutil.standin(f)
344 344 pats.update(fixpats(f, tostandin) for f in p)
345 345
346 346 for i in range(0, len(m._files)):
347 347 # Don't add '.hglf' to m.files, since that is already covered by '.'
348 348 if m._files[i] == '.':
349 349 continue
350 350 standin = lfutil.standin(m._files[i])
351 351 # If the "standin" is a directory, append instead of replace to
352 352 # support naming a directory on the command line with only
353 353 # largefiles. The original directory is kept to support normal
354 354 # files.
355 355 if standin in repo[ctx.node()]:
356 356 m._files[i] = standin
357 357 elif m._files[i] not in repo[ctx.node()] \
358 358 and repo.wvfs.isdir(standin):
359 359 m._files.append(standin)
360 360
361 m._fmap = set(m._files)
361 m._fileroots = set(m._files)
362 362 m._always = False
363 363 origmatchfn = m.matchfn
364 364 def lfmatchfn(f):
365 365 lf = lfutil.splitstandin(f)
366 366 if lf is not None and origmatchfn(lf):
367 367 return True
368 368 r = origmatchfn(f)
369 369 return r
370 370 m.matchfn = lfmatchfn
371 371
372 372 ui.debug('updated patterns: %s\n' % sorted(pats))
373 373 return m, pats
374 374
375 375 # For hg log --patch, the match object is used in two different senses:
376 376 # (1) to determine what revisions should be printed out, and
377 377 # (2) to determine what files to print out diffs for.
378 378 # The magic matchandpats override should be used for case (1) but not for
379 379 # case (2).
380 380 def overridemakelogfilematcher(repo, pats, opts):
381 381 wctx = repo[None]
382 382 match, pats = oldmatchandpats(wctx, pats, opts)
383 383 return lambda rev: match
384 384
385 385 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
386 386 oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
387 387 setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
388 388
389 389 try:
390 390 return orig(ui, repo, *pats, **opts)
391 391 finally:
392 392 restorematchandpatsfn()
393 393 setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
394 394
395 395 def overrideverify(orig, ui, repo, *pats, **opts):
396 396 large = opts.pop('large', False)
397 397 all = opts.pop('lfa', False)
398 398 contents = opts.pop('lfc', False)
399 399
400 400 result = orig(ui, repo, *pats, **opts)
401 401 if large or all or contents:
402 402 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
403 403 return result
404 404
405 405 def overridedebugstate(orig, ui, repo, *pats, **opts):
406 406 large = opts.pop('large', False)
407 407 if large:
408 408 class fakerepo(object):
409 409 dirstate = lfutil.openlfdirstate(ui, repo)
410 410 orig(ui, fakerepo, *pats, **opts)
411 411 else:
412 412 orig(ui, repo, *pats, **opts)
413 413
414 414 # Before starting the manifest merge, merge.updates will call
415 415 # _checkunknownfile to check if there are any files in the merged-in
416 416 # changeset that collide with unknown files in the working copy.
417 417 #
418 418 # The largefiles are seen as unknown, so this prevents us from merging
419 419 # in a file 'foo' if we already have a largefile with the same name.
420 420 #
421 421 # The overridden function filters the unknown files by removing any
422 422 # largefiles. This makes the merge proceed and we can then handle this
423 423 # case further in the overridden calculateupdates function below.
424 424 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
425 425 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
426 426 return False
427 427 return origfn(repo, wctx, mctx, f, f2)
428 428
429 429 # The manifest merge handles conflicts on the manifest level. We want
430 430 # to handle changes in largefile-ness of files at this level too.
431 431 #
432 432 # The strategy is to run the original calculateupdates and then process
433 433 # the action list it outputs. There are two cases we need to deal with:
434 434 #
435 435 # 1. Normal file in p1, largefile in p2. Here the largefile is
436 436 # detected via its standin file, which will enter the working copy
437 437 # with a "get" action. It is not "merge" since the standin is all
438 438 # Mercurial is concerned with at this level -- the link to the
439 439 # existing normal file is not relevant here.
440 440 #
441 441 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
442 442 # since the largefile will be present in the working copy and
443 443 # different from the normal file in p2. Mercurial therefore
444 444 # triggers a merge action.
445 445 #
446 446 # In both cases, we prompt the user and emit new actions to either
447 447 # remove the standin (if the normal file was kept) or to remove the
448 448 # normal file and get the standin (if the largefile was kept). The
449 449 # default prompt answer is to use the largefile version since it was
450 450 # presumably changed on purpose.
451 451 #
452 452 # Finally, the merge.applyupdates function will then take care of
453 453 # writing the files into the working copy and lfcommands.updatelfiles
454 454 # will update the largefiles.
455 455 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
456 456 partial, acceptremote, followcopies):
457 457 overwrite = force and not branchmerge
458 458 actions, diverge, renamedelete = origfn(
459 459 repo, p1, p2, pas, branchmerge, force, partial, acceptremote,
460 460 followcopies)
461 461
462 462 if overwrite:
463 463 return actions, diverge, renamedelete
464 464
465 465 # Convert to dictionary with filename as key and action as value.
466 466 lfiles = set()
467 467 for f in actions:
468 468 splitstandin = f and lfutil.splitstandin(f)
469 469 if splitstandin in p1:
470 470 lfiles.add(splitstandin)
471 471 elif lfutil.standin(f) in p1:
472 472 lfiles.add(f)
473 473
474 474 for lfile in lfiles:
475 475 standin = lfutil.standin(lfile)
476 476 (lm, largs, lmsg) = actions.get(lfile, (None, None, None))
477 477 (sm, sargs, smsg) = actions.get(standin, (None, None, None))
478 478 if sm in ('g', 'dc') and lm != 'r':
479 479 # Case 1: normal file in the working copy, largefile in
480 480 # the second parent
481 481 usermsg = _('remote turned local normal file %s into a largefile\n'
482 482 'use (l)argefile or keep (n)ormal file?'
483 483 '$$ &Largefile $$ &Normal file') % lfile
484 484 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
485 485 actions[lfile] = ('r', None, 'replaced by standin')
486 486 actions[standin] = ('g', sargs, 'replaces standin')
487 487 else: # keep local normal file
488 488 actions[lfile] = ('k', None, 'replaces standin')
489 489 if branchmerge:
490 490 actions[standin] = ('k', None, 'replaced by non-standin')
491 491 else:
492 492 actions[standin] = ('r', None, 'replaced by non-standin')
493 493 elif lm in ('g', 'dc') and sm != 'r':
494 494 # Case 2: largefile in the working copy, normal file in
495 495 # the second parent
496 496 usermsg = _('remote turned local largefile %s into a normal file\n'
497 497 'keep (l)argefile or use (n)ormal file?'
498 498 '$$ &Largefile $$ &Normal file') % lfile
499 499 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
500 500 if branchmerge:
501 501 # largefile can be restored from standin safely
502 502 actions[lfile] = ('k', None, 'replaced by standin')
503 503 actions[standin] = ('k', None, 'replaces standin')
504 504 else:
505 505 # "lfile" should be marked as "removed" without
506 506 # removal of itself
507 507 actions[lfile] = ('lfmr', None,
508 508 'forget non-standin largefile')
509 509
510 510 # linear-merge should treat this largefile as 're-added'
511 511 actions[standin] = ('a', None, 'keep standin')
512 512 else: # pick remote normal file
513 513 actions[lfile] = ('g', largs, 'replaces standin')
514 514 actions[standin] = ('r', None, 'replaced by non-standin')
515 515
516 516 return actions, diverge, renamedelete
517 517
518 518 def mergerecordupdates(orig, repo, actions, branchmerge):
519 519 if 'lfmr' in actions:
520 520 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
521 521 for lfile, args, msg in actions['lfmr']:
522 522 # this should be executed before 'orig', to execute 'remove'
523 523 # before all other actions
524 524 repo.dirstate.remove(lfile)
525 525 # make sure lfile doesn't get synclfdirstate'd as normal
526 526 lfdirstate.add(lfile)
527 527 lfdirstate.write()
528 528
529 529 return orig(repo, actions, branchmerge)
530 530
531 531
532 532 # Override filemerge to prompt the user about how they wish to merge
533 533 # largefiles. This will handle identical edits without prompting the user.
534 534 def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca, labels=None):
535 535 if not lfutil.isstandin(orig):
536 536 return origfn(repo, mynode, orig, fcd, fco, fca, labels=labels)
537 537
538 538 ahash = fca.data().strip().lower()
539 539 dhash = fcd.data().strip().lower()
540 540 ohash = fco.data().strip().lower()
541 541 if (ohash != ahash and
542 542 ohash != dhash and
543 543 (dhash == ahash or
544 544 repo.ui.promptchoice(
545 545 _('largefile %s has a merge conflict\nancestor was %s\n'
546 546 'keep (l)ocal %s or\ntake (o)ther %s?'
547 547 '$$ &Local $$ &Other') %
548 548 (lfutil.splitstandin(orig), ahash, dhash, ohash),
549 549 0) == 1)):
550 550 repo.wwrite(fcd.path(), fco.data(), fco.flags())
551 551 return 0
552 552
553 553 def copiespathcopies(orig, ctx1, ctx2, match=None):
554 554 copies = orig(ctx1, ctx2, match=match)
555 555 updated = {}
556 556
557 557 for k, v in copies.iteritems():
558 558 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
559 559
560 560 return updated
561 561
562 562 # Copy first changes the matchers to match standins instead of
563 563 # largefiles. Then it overrides util.copyfile in that function it
564 564 # checks if the destination largefile already exists. It also keeps a
565 565 # list of copied files so that the largefiles can be copied and the
566 566 # dirstate updated.
567 567 def overridecopy(orig, ui, repo, pats, opts, rename=False):
568 568 # doesn't remove largefile on rename
569 569 if len(pats) < 2:
570 570 # this isn't legal, let the original function deal with it
571 571 return orig(ui, repo, pats, opts, rename)
572 572
573 573 # This could copy both lfiles and normal files in one command,
574 574 # but we don't want to do that. First replace their matcher to
575 575 # only match normal files and run it, then replace it to just
576 576 # match largefiles and run it again.
577 577 nonormalfiles = False
578 578 nolfiles = False
579 579 installnormalfilesmatchfn(repo[None].manifest())
580 580 try:
581 581 result = orig(ui, repo, pats, opts, rename)
582 582 except util.Abort, e:
583 583 if str(e) != _('no files to copy'):
584 584 raise e
585 585 else:
586 586 nonormalfiles = True
587 587 result = 0
588 588 finally:
589 589 restorematchfn()
590 590
591 591 # The first rename can cause our current working directory to be removed.
592 592 # In that case there is nothing left to copy/rename so just quit.
593 593 try:
594 594 repo.getcwd()
595 595 except OSError:
596 596 return result
597 597
598 598 def makestandin(relpath):
599 599 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
600 600 return os.path.join(repo.wjoin(lfutil.standin(path)))
601 601
602 602 fullpats = scmutil.expandpats(pats)
603 603 dest = fullpats[-1]
604 604
605 605 if os.path.isdir(dest):
606 606 if not os.path.isdir(makestandin(dest)):
607 607 os.makedirs(makestandin(dest))
608 608
609 609 try:
610 610 # When we call orig below it creates the standins but we don't add
611 611 # them to the dir state until later so lock during that time.
612 612 wlock = repo.wlock()
613 613
614 614 manifest = repo[None].manifest()
615 615 def overridematch(ctx, pats=[], opts={}, globbed=False,
616 616 default='relpath'):
617 617 newpats = []
618 618 # The patterns were previously mangled to add the standin
619 619 # directory; we need to remove that now
620 620 for pat in pats:
621 621 if match_.patkind(pat) is None and lfutil.shortname in pat:
622 622 newpats.append(pat.replace(lfutil.shortname, ''))
623 623 else:
624 624 newpats.append(pat)
625 625 match = oldmatch(ctx, newpats, opts, globbed, default)
626 626 m = copy.copy(match)
627 627 lfile = lambda f: lfutil.standin(f) in manifest
628 628 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
629 m._fmap = set(m._files)
629 m._fileroots = set(m._files)
630 630 origmatchfn = m.matchfn
631 631 m.matchfn = lambda f: (lfutil.isstandin(f) and
632 632 (f in manifest) and
633 633 origmatchfn(lfutil.splitstandin(f)) or
634 634 None)
635 635 return m
636 636 oldmatch = installmatchfn(overridematch)
637 637 listpats = []
638 638 for pat in pats:
639 639 if match_.patkind(pat) is not None:
640 640 listpats.append(pat)
641 641 else:
642 642 listpats.append(makestandin(pat))
643 643
644 644 try:
645 645 origcopyfile = util.copyfile
646 646 copiedfiles = []
647 647 def overridecopyfile(src, dest):
648 648 if (lfutil.shortname in src and
649 649 dest.startswith(repo.wjoin(lfutil.shortname))):
650 650 destlfile = dest.replace(lfutil.shortname, '')
651 651 if not opts['force'] and os.path.exists(destlfile):
652 652 raise IOError('',
653 653 _('destination largefile already exists'))
654 654 copiedfiles.append((src, dest))
655 655 origcopyfile(src, dest)
656 656
657 657 util.copyfile = overridecopyfile
658 658 result += orig(ui, repo, listpats, opts, rename)
659 659 finally:
660 660 util.copyfile = origcopyfile
661 661
662 662 lfdirstate = lfutil.openlfdirstate(ui, repo)
663 663 for (src, dest) in copiedfiles:
664 664 if (lfutil.shortname in src and
665 665 dest.startswith(repo.wjoin(lfutil.shortname))):
666 666 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
667 667 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
668 668 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
669 669 if not os.path.isdir(destlfiledir):
670 670 os.makedirs(destlfiledir)
671 671 if rename:
672 672 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
673 673
674 674 # The file is gone, but this deletes any empty parent
675 675 # directories as a side-effect.
676 676 util.unlinkpath(repo.wjoin(srclfile), True)
677 677 lfdirstate.remove(srclfile)
678 678 else:
679 679 util.copyfile(repo.wjoin(srclfile),
680 680 repo.wjoin(destlfile))
681 681
682 682 lfdirstate.add(destlfile)
683 683 lfdirstate.write()
684 684 except util.Abort, e:
685 685 if str(e) != _('no files to copy'):
686 686 raise e
687 687 else:
688 688 nolfiles = True
689 689 finally:
690 690 restorematchfn()
691 691 wlock.release()
692 692
693 693 if nolfiles and nonormalfiles:
694 694 raise util.Abort(_('no files to copy'))
695 695
696 696 return result
697 697
698 698 # When the user calls revert, we have to be careful to not revert any
699 699 # changes to other largefiles accidentally. This means we have to keep
700 700 # track of the largefiles that are being reverted so we only pull down
701 701 # the necessary largefiles.
702 702 #
703 703 # Standins are only updated (to match the hash of largefiles) before
704 704 # commits. Update the standins then run the original revert, changing
705 705 # the matcher to hit standins instead of largefiles. Based on the
706 706 # resulting standins update the largefiles.
707 707 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
708 708 # Because we put the standins in a bad state (by updating them)
709 709 # and then return them to a correct state we need to lock to
710 710 # prevent others from changing them in their incorrect state.
711 711 wlock = repo.wlock()
712 712 try:
713 713 lfdirstate = lfutil.openlfdirstate(ui, repo)
714 714 s = lfutil.lfdirstatestatus(lfdirstate, repo)
715 715 lfdirstate.write()
716 716 for lfile in s.modified:
717 717 lfutil.updatestandin(repo, lfutil.standin(lfile))
718 718 for lfile in s.deleted:
719 719 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
720 720 os.unlink(repo.wjoin(lfutil.standin(lfile)))
721 721
722 722 oldstandins = lfutil.getstandinsstate(repo)
723 723
724 724 def overridematch(mctx, pats=[], opts={}, globbed=False,
725 725 default='relpath'):
726 726 match = oldmatch(mctx, pats, opts, globbed, default)
727 727 m = copy.copy(match)
728 728
729 729 # revert supports recursing into subrepos, and though largefiles
730 730 # currently doesn't work correctly in that case, this match is
731 731 # called, so the lfdirstate above may not be the correct one for
732 732 # this invocation of match.
733 733 lfdirstate = lfutil.openlfdirstate(mctx.repo().ui, mctx.repo(),
734 734 False)
735 735
736 736 def tostandin(f):
737 737 standin = lfutil.standin(f)
738 738 if standin in ctx or standin in mctx:
739 739 return standin
740 740 elif standin in repo[None] or lfdirstate[f] == 'r':
741 741 return None
742 742 return f
743 743 m._files = [tostandin(f) for f in m._files]
744 744 m._files = [f for f in m._files if f is not None]
745 m._fmap = set(m._files)
745 m._fileroots = set(m._files)
746 746 origmatchfn = m.matchfn
747 747 def matchfn(f):
748 748 if lfutil.isstandin(f):
749 749 return (origmatchfn(lfutil.splitstandin(f)) and
750 750 (f in ctx or f in mctx))
751 751 return origmatchfn(f)
752 752 m.matchfn = matchfn
753 753 return m
754 754 oldmatch = installmatchfn(overridematch)
755 755 try:
756 756 orig(ui, repo, ctx, parents, *pats, **opts)
757 757 finally:
758 758 restorematchfn()
759 759
760 760 newstandins = lfutil.getstandinsstate(repo)
761 761 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
762 762 # lfdirstate should be 'normallookup'-ed for updated files,
763 763 # because reverting doesn't touch dirstate for 'normal' files
764 764 # when target revision is explicitly specified: in such case,
765 765 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
766 766 # of target (standin) file.
767 767 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
768 768 normallookup=True)
769 769
770 770 finally:
771 771 wlock.release()
772 772
773 773 # after pulling changesets, we need to take some extra care to get
774 774 # largefiles updated remotely
775 775 def overridepull(orig, ui, repo, source=None, **opts):
776 776 revsprepull = len(repo)
777 777 if not source:
778 778 source = 'default'
779 779 repo.lfpullsource = source
780 780 result = orig(ui, repo, source, **opts)
781 781 revspostpull = len(repo)
782 782 lfrevs = opts.get('lfrev', [])
783 783 if opts.get('all_largefiles'):
784 784 lfrevs.append('pulled()')
785 785 if lfrevs and revspostpull > revsprepull:
786 786 numcached = 0
787 787 repo.firstpulled = revsprepull # for pulled() revset expression
788 788 try:
789 789 for rev in scmutil.revrange(repo, lfrevs):
790 790 ui.note(_('pulling largefiles for revision %s\n') % rev)
791 791 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
792 792 numcached += len(cached)
793 793 finally:
794 794 del repo.firstpulled
795 795 ui.status(_("%d largefiles cached\n") % numcached)
796 796 return result
797 797
798 798 def pulledrevsetsymbol(repo, subset, x):
799 799 """``pulled()``
800 800 Changesets that just has been pulled.
801 801
802 802 Only available with largefiles from pull --lfrev expressions.
803 803
804 804 .. container:: verbose
805 805
806 806 Some examples:
807 807
808 808 - pull largefiles for all new changesets::
809 809
810 810 hg pull -lfrev "pulled()"
811 811
812 812 - pull largefiles for all new branch heads::
813 813
814 814 hg pull -lfrev "head(pulled()) and not closed()"
815 815
816 816 """
817 817
818 818 try:
819 819 firstpulled = repo.firstpulled
820 820 except AttributeError:
821 821 raise util.Abort(_("pulled() only available in --lfrev"))
822 822 return revset.baseset([r for r in subset if r >= firstpulled])
823 823
824 824 def overrideclone(orig, ui, source, dest=None, **opts):
825 825 d = dest
826 826 if d is None:
827 827 d = hg.defaultdest(source)
828 828 if opts.get('all_largefiles') and not hg.islocal(d):
829 829 raise util.Abort(_(
830 830 '--all-largefiles is incompatible with non-local destination %s') %
831 831 d)
832 832
833 833 return orig(ui, source, dest, **opts)
834 834
835 835 def hgclone(orig, ui, opts, *args, **kwargs):
836 836 result = orig(ui, opts, *args, **kwargs)
837 837
838 838 if result is not None:
839 839 sourcerepo, destrepo = result
840 840 repo = destrepo.local()
841 841
842 842 # When cloning to a remote repo (like through SSH), no repo is available
843 843 # from the peer. Therefore the largefiles can't be downloaded and the
844 844 # hgrc can't be updated.
845 845 if not repo:
846 846 return result
847 847
848 848 # If largefiles is required for this repo, permanently enable it locally
849 849 if 'largefiles' in repo.requirements:
850 850 fp = repo.vfs('hgrc', 'a', text=True)
851 851 try:
852 852 fp.write('\n[extensions]\nlargefiles=\n')
853 853 finally:
854 854 fp.close()
855 855
856 856 # Caching is implicitly limited to 'rev' option, since the dest repo was
857 857 # truncated at that point. The user may expect a download count with
858 858 # this option, so attempt whether or not this is a largefile repo.
859 859 if opts.get('all_largefiles'):
860 860 success, missing = lfcommands.downloadlfiles(ui, repo, None)
861 861
862 862 if missing != 0:
863 863 return None
864 864
865 865 return result
866 866
867 867 def overriderebase(orig, ui, repo, **opts):
868 868 if not util.safehasattr(repo, '_largefilesenabled'):
869 869 return orig(ui, repo, **opts)
870 870
871 871 resuming = opts.get('continue')
872 872 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
873 873 repo._lfstatuswriters.append(lambda *msg, **opts: None)
874 874 try:
875 875 return orig(ui, repo, **opts)
876 876 finally:
877 877 repo._lfstatuswriters.pop()
878 878 repo._lfcommithooks.pop()
879 879
880 880 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
881 881 prefix='', mtime=None, subrepos=None):
882 882 # No need to lock because we are only reading history and
883 883 # largefile caches, neither of which are modified.
884 884 lfcommands.cachelfiles(repo.ui, repo, node)
885 885
886 886 if kind not in archival.archivers:
887 887 raise util.Abort(_("unknown archive type '%s'") % kind)
888 888
889 889 ctx = repo[node]
890 890
891 891 if kind == 'files':
892 892 if prefix:
893 893 raise util.Abort(
894 894 _('cannot give prefix when archiving to files'))
895 895 else:
896 896 prefix = archival.tidyprefix(dest, kind, prefix)
897 897
898 898 def write(name, mode, islink, getdata):
899 899 if matchfn and not matchfn(name):
900 900 return
901 901 data = getdata()
902 902 if decode:
903 903 data = repo.wwritedata(name, data)
904 904 archiver.addfile(prefix + name, mode, islink, data)
905 905
906 906 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
907 907
908 908 if repo.ui.configbool("ui", "archivemeta", True):
909 909 write('.hg_archival.txt', 0644, False,
910 910 lambda: archival.buildmetadata(ctx))
911 911
912 912 for f in ctx:
913 913 ff = ctx.flags(f)
914 914 getdata = ctx[f].data
915 915 if lfutil.isstandin(f):
916 916 path = lfutil.findfile(repo, getdata().strip())
917 917 if path is None:
918 918 raise util.Abort(
919 919 _('largefile %s not found in repo store or system cache')
920 920 % lfutil.splitstandin(f))
921 921 f = lfutil.splitstandin(f)
922 922
923 923 def getdatafn():
924 924 fd = None
925 925 try:
926 926 fd = open(path, 'rb')
927 927 return fd.read()
928 928 finally:
929 929 if fd:
930 930 fd.close()
931 931
932 932 getdata = getdatafn
933 933 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
934 934
935 935 if subrepos:
936 936 for subpath in sorted(ctx.substate):
937 937 sub = ctx.sub(subpath)
938 938 submatch = match_.narrowmatcher(subpath, matchfn)
939 939 sub.archive(archiver, prefix, submatch)
940 940
941 941 archiver.done()
942 942
943 943 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None):
944 944 repo._get(repo._state + ('hg',))
945 945 rev = repo._state[1]
946 946 ctx = repo._repo[rev]
947 947
948 948 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
949 949
950 950 def write(name, mode, islink, getdata):
951 951 # At this point, the standin has been replaced with the largefile name,
952 952 # so the normal matcher works here without the lfutil variants.
953 953 if match and not match(f):
954 954 return
955 955 data = getdata()
956 956
957 957 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
958 958
959 959 for f in ctx:
960 960 ff = ctx.flags(f)
961 961 getdata = ctx[f].data
962 962 if lfutil.isstandin(f):
963 963 path = lfutil.findfile(repo._repo, getdata().strip())
964 964 if path is None:
965 965 raise util.Abort(
966 966 _('largefile %s not found in repo store or system cache')
967 967 % lfutil.splitstandin(f))
968 968 f = lfutil.splitstandin(f)
969 969
970 970 def getdatafn():
971 971 fd = None
972 972 try:
973 973 fd = open(os.path.join(prefix, path), 'rb')
974 974 return fd.read()
975 975 finally:
976 976 if fd:
977 977 fd.close()
978 978
979 979 getdata = getdatafn
980 980
981 981 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
982 982
983 983 for subpath in sorted(ctx.substate):
984 984 sub = ctx.sub(subpath)
985 985 submatch = match_.narrowmatcher(subpath, match)
986 986 sub.archive(archiver, prefix + repo._path + '/', submatch)
987 987
988 988 # If a largefile is modified, the change is not reflected in its
989 989 # standin until a commit. cmdutil.bailifchanged() raises an exception
990 990 # if the repo has uncommitted changes. Wrap it to also check if
991 991 # largefiles were changed. This is used by bisect, backout and fetch.
992 992 def overridebailifchanged(orig, repo, *args, **kwargs):
993 993 orig(repo, *args, **kwargs)
994 994 repo.lfstatus = True
995 995 s = repo.status()
996 996 repo.lfstatus = False
997 997 if s.modified or s.added or s.removed or s.deleted:
998 998 raise util.Abort(_('uncommitted changes'))
999 999
1000 1000 def cmdutilforget(orig, ui, repo, match, prefix, explicitonly):
1001 1001 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1002 1002 bad, forgot = orig(ui, repo, normalmatcher, prefix, explicitonly)
1003 1003 m = composelargefilematcher(match, repo[None].manifest())
1004 1004
1005 1005 try:
1006 1006 repo.lfstatus = True
1007 1007 s = repo.status(match=m, clean=True)
1008 1008 finally:
1009 1009 repo.lfstatus = False
1010 1010 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1011 1011 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
1012 1012
1013 1013 for f in forget:
1014 1014 if lfutil.standin(f) not in repo.dirstate and not \
1015 1015 repo.wvfs.isdir(lfutil.standin(f)):
1016 1016 ui.warn(_('not removing %s: file is already untracked\n')
1017 1017 % m.rel(f))
1018 1018 bad.append(f)
1019 1019
1020 1020 for f in forget:
1021 1021 if ui.verbose or not m.exact(f):
1022 1022 ui.status(_('removing %s\n') % m.rel(f))
1023 1023
1024 1024 # Need to lock because standin files are deleted then removed from the
1025 1025 # repository and we could race in-between.
1026 1026 wlock = repo.wlock()
1027 1027 try:
1028 1028 lfdirstate = lfutil.openlfdirstate(ui, repo)
1029 1029 for f in forget:
1030 1030 if lfdirstate[f] == 'a':
1031 1031 lfdirstate.drop(f)
1032 1032 else:
1033 1033 lfdirstate.remove(f)
1034 1034 lfdirstate.write()
1035 1035 standins = [lfutil.standin(f) for f in forget]
1036 1036 for f in standins:
1037 1037 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
1038 1038 rejected = repo[None].forget(standins)
1039 1039 finally:
1040 1040 wlock.release()
1041 1041
1042 1042 bad.extend(f for f in rejected if f in m.files())
1043 1043 forgot.extend(f for f in forget if f not in rejected)
1044 1044 return bad, forgot
1045 1045
1046 1046 def _getoutgoings(repo, other, missing, addfunc):
1047 1047 """get pairs of filename and largefile hash in outgoing revisions
1048 1048 in 'missing'.
1049 1049
1050 1050 largefiles already existing on 'other' repository are ignored.
1051 1051
1052 1052 'addfunc' is invoked with each unique pairs of filename and
1053 1053 largefile hash value.
1054 1054 """
1055 1055 knowns = set()
1056 1056 lfhashes = set()
1057 1057 def dedup(fn, lfhash):
1058 1058 k = (fn, lfhash)
1059 1059 if k not in knowns:
1060 1060 knowns.add(k)
1061 1061 lfhashes.add(lfhash)
1062 1062 lfutil.getlfilestoupload(repo, missing, dedup)
1063 1063 if lfhashes:
1064 1064 lfexists = basestore._openstore(repo, other).exists(lfhashes)
1065 1065 for fn, lfhash in knowns:
1066 1066 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1067 1067 addfunc(fn, lfhash)
1068 1068
1069 1069 def outgoinghook(ui, repo, other, opts, missing):
1070 1070 if opts.pop('large', None):
1071 1071 lfhashes = set()
1072 1072 if ui.debugflag:
1073 1073 toupload = {}
1074 1074 def addfunc(fn, lfhash):
1075 1075 if fn not in toupload:
1076 1076 toupload[fn] = []
1077 1077 toupload[fn].append(lfhash)
1078 1078 lfhashes.add(lfhash)
1079 1079 def showhashes(fn):
1080 1080 for lfhash in sorted(toupload[fn]):
1081 1081 ui.debug(' %s\n' % (lfhash))
1082 1082 else:
1083 1083 toupload = set()
1084 1084 def addfunc(fn, lfhash):
1085 1085 toupload.add(fn)
1086 1086 lfhashes.add(lfhash)
1087 1087 def showhashes(fn):
1088 1088 pass
1089 1089 _getoutgoings(repo, other, missing, addfunc)
1090 1090
1091 1091 if not toupload:
1092 1092 ui.status(_('largefiles: no files to upload\n'))
1093 1093 else:
1094 1094 ui.status(_('largefiles to upload (%d entities):\n')
1095 1095 % (len(lfhashes)))
1096 1096 for file in sorted(toupload):
1097 1097 ui.status(lfutil.splitstandin(file) + '\n')
1098 1098 showhashes(file)
1099 1099 ui.status('\n')
1100 1100
1101 1101 def summaryremotehook(ui, repo, opts, changes):
1102 1102 largeopt = opts.get('large', False)
1103 1103 if changes is None:
1104 1104 if largeopt:
1105 1105 return (False, True) # only outgoing check is needed
1106 1106 else:
1107 1107 return (False, False)
1108 1108 elif largeopt:
1109 1109 url, branch, peer, outgoing = changes[1]
1110 1110 if peer is None:
1111 1111 # i18n: column positioning for "hg summary"
1112 1112 ui.status(_('largefiles: (no remote repo)\n'))
1113 1113 return
1114 1114
1115 1115 toupload = set()
1116 1116 lfhashes = set()
1117 1117 def addfunc(fn, lfhash):
1118 1118 toupload.add(fn)
1119 1119 lfhashes.add(lfhash)
1120 1120 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1121 1121
1122 1122 if not toupload:
1123 1123 # i18n: column positioning for "hg summary"
1124 1124 ui.status(_('largefiles: (no files to upload)\n'))
1125 1125 else:
1126 1126 # i18n: column positioning for "hg summary"
1127 1127 ui.status(_('largefiles: %d entities for %d files to upload\n')
1128 1128 % (len(lfhashes), len(toupload)))
1129 1129
1130 1130 def overridesummary(orig, ui, repo, *pats, **opts):
1131 1131 try:
1132 1132 repo.lfstatus = True
1133 1133 orig(ui, repo, *pats, **opts)
1134 1134 finally:
1135 1135 repo.lfstatus = False
1136 1136
1137 1137 def scmutiladdremove(orig, repo, matcher, prefix, opts={}, dry_run=None,
1138 1138 similarity=None):
1139 1139 if not lfutil.islfilesrepo(repo):
1140 1140 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1141 1141 # Get the list of missing largefiles so we can remove them
1142 1142 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1143 1143 unsure, s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [],
1144 1144 False, False, False)
1145 1145
1146 1146 # Call into the normal remove code, but the removing of the standin, we want
1147 1147 # to have handled by original addremove. Monkey patching here makes sure
1148 1148 # we don't remove the standin in the largefiles code, preventing a very
1149 1149 # confused state later.
1150 1150 if s.deleted:
1151 1151 m = copy.copy(matcher)
1152 1152
1153 1153 # The m._files and m._map attributes are not changed to the deleted list
1154 1154 # because that affects the m.exact() test, which in turn governs whether
1155 1155 # or not the file name is printed, and how. Simply limit the original
1156 1156 # matches to those in the deleted status list.
1157 1157 matchfn = m.matchfn
1158 1158 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1159 1159
1160 1160 removelargefiles(repo.ui, repo, True, m, **opts)
1161 1161 # Call into the normal add code, and any files that *should* be added as
1162 1162 # largefiles will be
1163 1163 added, bad = addlargefiles(repo.ui, repo, True, matcher, **opts)
1164 1164 # Now that we've handled largefiles, hand off to the original addremove
1165 1165 # function to take care of the rest. Make sure it doesn't do anything with
1166 1166 # largefiles by passing a matcher that will ignore them.
1167 1167 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1168 1168 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1169 1169
1170 1170 # Calling purge with --all will cause the largefiles to be deleted.
1171 1171 # Override repo.status to prevent this from happening.
1172 1172 def overridepurge(orig, ui, repo, *dirs, **opts):
1173 1173 # XXX Monkey patching a repoview will not work. The assigned attribute will
1174 1174 # be set on the unfiltered repo, but we will only lookup attributes in the
1175 1175 # unfiltered repo if the lookup in the repoview object itself fails. As the
1176 1176 # monkey patched method exists on the repoview class the lookup will not
1177 1177 # fail. As a result, the original version will shadow the monkey patched
1178 1178 # one, defeating the monkey patch.
1179 1179 #
1180 1180 # As a work around we use an unfiltered repo here. We should do something
1181 1181 # cleaner instead.
1182 1182 repo = repo.unfiltered()
1183 1183 oldstatus = repo.status
1184 1184 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1185 1185 clean=False, unknown=False, listsubrepos=False):
1186 1186 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1187 1187 listsubrepos)
1188 1188 lfdirstate = lfutil.openlfdirstate(ui, repo)
1189 1189 unknown = [f for f in r.unknown if lfdirstate[f] == '?']
1190 1190 ignored = [f for f in r.ignored if lfdirstate[f] == '?']
1191 1191 return scmutil.status(r.modified, r.added, r.removed, r.deleted,
1192 1192 unknown, ignored, r.clean)
1193 1193 repo.status = overridestatus
1194 1194 orig(ui, repo, *dirs, **opts)
1195 1195 repo.status = oldstatus
1196 1196 def overriderollback(orig, ui, repo, **opts):
1197 1197 wlock = repo.wlock()
1198 1198 try:
1199 1199 before = repo.dirstate.parents()
1200 1200 orphans = set(f for f in repo.dirstate
1201 1201 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1202 1202 result = orig(ui, repo, **opts)
1203 1203 after = repo.dirstate.parents()
1204 1204 if before == after:
1205 1205 return result # no need to restore standins
1206 1206
1207 1207 pctx = repo['.']
1208 1208 for f in repo.dirstate:
1209 1209 if lfutil.isstandin(f):
1210 1210 orphans.discard(f)
1211 1211 if repo.dirstate[f] == 'r':
1212 1212 repo.wvfs.unlinkpath(f, ignoremissing=True)
1213 1213 elif f in pctx:
1214 1214 fctx = pctx[f]
1215 1215 repo.wwrite(f, fctx.data(), fctx.flags())
1216 1216 else:
1217 1217 # content of standin is not so important in 'a',
1218 1218 # 'm' or 'n' (coming from the 2nd parent) cases
1219 1219 lfutil.writestandin(repo, f, '', False)
1220 1220 for standin in orphans:
1221 1221 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1222 1222
1223 1223 lfdirstate = lfutil.openlfdirstate(ui, repo)
1224 1224 orphans = set(lfdirstate)
1225 1225 lfiles = lfutil.listlfiles(repo)
1226 1226 for file in lfiles:
1227 1227 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1228 1228 orphans.discard(file)
1229 1229 for lfile in orphans:
1230 1230 lfdirstate.drop(lfile)
1231 1231 lfdirstate.write()
1232 1232 finally:
1233 1233 wlock.release()
1234 1234 return result
1235 1235
1236 1236 def overridetransplant(orig, ui, repo, *revs, **opts):
1237 1237 resuming = opts.get('continue')
1238 1238 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1239 1239 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1240 1240 try:
1241 1241 result = orig(ui, repo, *revs, **opts)
1242 1242 finally:
1243 1243 repo._lfstatuswriters.pop()
1244 1244 repo._lfcommithooks.pop()
1245 1245 return result
1246 1246
1247 1247 def overridecat(orig, ui, repo, file1, *pats, **opts):
1248 1248 ctx = scmutil.revsingle(repo, opts.get('rev'))
1249 1249 err = 1
1250 1250 notbad = set()
1251 1251 m = scmutil.match(ctx, (file1,) + pats, opts)
1252 1252 origmatchfn = m.matchfn
1253 1253 def lfmatchfn(f):
1254 1254 if origmatchfn(f):
1255 1255 return True
1256 1256 lf = lfutil.splitstandin(f)
1257 1257 if lf is None:
1258 1258 return False
1259 1259 notbad.add(lf)
1260 1260 return origmatchfn(lf)
1261 1261 m.matchfn = lfmatchfn
1262 1262 origbadfn = m.bad
1263 1263 def lfbadfn(f, msg):
1264 1264 if not f in notbad:
1265 1265 origbadfn(f, msg)
1266 1266 m.bad = lfbadfn
1267 1267
1268 1268 origvisitdirfn = m.visitdir
1269 1269 def lfvisitdirfn(dir):
1270 1270 if dir == lfutil.shortname:
1271 1271 return True
1272 1272 ret = origvisitdirfn(dir)
1273 1273 if ret:
1274 1274 return ret
1275 1275 lf = lfutil.splitstandin(dir)
1276 1276 if lf is None:
1277 1277 return False
1278 1278 return origvisitdirfn(lf)
1279 1279 m.visitdir = lfvisitdirfn
1280 1280
1281 1281 for f in ctx.walk(m):
1282 1282 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1283 1283 pathname=f)
1284 1284 lf = lfutil.splitstandin(f)
1285 1285 if lf is None or origmatchfn(f):
1286 1286 # duplicating unreachable code from commands.cat
1287 1287 data = ctx[f].data()
1288 1288 if opts.get('decode'):
1289 1289 data = repo.wwritedata(f, data)
1290 1290 fp.write(data)
1291 1291 else:
1292 1292 hash = lfutil.readstandin(repo, lf, ctx.rev())
1293 1293 if not lfutil.inusercache(repo.ui, hash):
1294 1294 store = basestore._openstore(repo)
1295 1295 success, missing = store.get([(lf, hash)])
1296 1296 if len(success) != 1:
1297 1297 raise util.Abort(
1298 1298 _('largefile %s is not in cache and could not be '
1299 1299 'downloaded') % lf)
1300 1300 path = lfutil.usercachepath(repo.ui, hash)
1301 1301 fpin = open(path, "rb")
1302 1302 for chunk in util.filechunkiter(fpin, 128 * 1024):
1303 1303 fp.write(chunk)
1304 1304 fpin.close()
1305 1305 fp.close()
1306 1306 err = 0
1307 1307 return err
1308 1308
1309 1309 def mergeupdate(orig, repo, node, branchmerge, force, partial,
1310 1310 *args, **kwargs):
1311 1311 wlock = repo.wlock()
1312 1312 try:
1313 1313 # branch | | |
1314 1314 # merge | force | partial | action
1315 1315 # -------+-------+---------+--------------
1316 1316 # x | x | x | linear-merge
1317 1317 # o | x | x | branch-merge
1318 1318 # x | o | x | overwrite (as clean update)
1319 1319 # o | o | x | force-branch-merge (*1)
1320 1320 # x | x | o | (*)
1321 1321 # o | x | o | (*)
1322 1322 # x | o | o | overwrite (as revert)
1323 1323 # o | o | o | (*)
1324 1324 #
1325 1325 # (*) don't care
1326 1326 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1327 1327
1328 1328 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1329 1329 unsure, s = lfdirstate.status(match_.always(repo.root,
1330 1330 repo.getcwd()),
1331 1331 [], False, False, False)
1332 1332 pctx = repo['.']
1333 1333 for lfile in unsure + s.modified:
1334 1334 lfileabs = repo.wvfs.join(lfile)
1335 1335 if not os.path.exists(lfileabs):
1336 1336 continue
1337 1337 lfhash = lfutil.hashrepofile(repo, lfile)
1338 1338 standin = lfutil.standin(lfile)
1339 1339 lfutil.writestandin(repo, standin, lfhash,
1340 1340 lfutil.getexecutable(lfileabs))
1341 1341 if (standin in pctx and
1342 1342 lfhash == lfutil.readstandin(repo, lfile, '.')):
1343 1343 lfdirstate.normal(lfile)
1344 1344 for lfile in s.added:
1345 1345 lfutil.updatestandin(repo, lfutil.standin(lfile))
1346 1346 lfdirstate.write()
1347 1347
1348 1348 oldstandins = lfutil.getstandinsstate(repo)
1349 1349
1350 1350 result = orig(repo, node, branchmerge, force, partial, *args, **kwargs)
1351 1351
1352 1352 newstandins = lfutil.getstandinsstate(repo)
1353 1353 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1354 1354 if branchmerge or force or partial:
1355 1355 filelist.extend(s.deleted + s.removed)
1356 1356
1357 1357 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1358 1358 normallookup=partial)
1359 1359
1360 1360 return result
1361 1361 finally:
1362 1362 wlock.release()
1363 1363
1364 1364 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1365 1365 result = orig(repo, files, *args, **kwargs)
1366 1366
1367 1367 filelist = [lfutil.splitstandin(f) for f in files if lfutil.isstandin(f)]
1368 1368 if filelist:
1369 1369 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1370 1370 printmessage=False, normallookup=True)
1371 1371
1372 1372 return result
@@ -1,542 +1,542
1 1 # match.py - filename matching
2 2 #
3 3 # Copyright 2008, 2009 Matt Mackall <mpm@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import re
9 9 import util, pathutil
10 10 from i18n import _
11 11
12 12 propertycache = util.propertycache
13 13
14 14 def _rematcher(regex):
15 15 '''compile the regexp with the best available regexp engine and return a
16 16 matcher function'''
17 17 m = util.re.compile(regex)
18 18 try:
19 19 # slightly faster, provided by facebook's re2 bindings
20 20 return m.test_match
21 21 except AttributeError:
22 22 return m.match
23 23
24 24 def _expandsets(kindpats, ctx, listsubrepos):
25 25 '''Returns the kindpats list with the 'set' patterns expanded.'''
26 26 fset = set()
27 27 other = []
28 28
29 29 for kind, pat in kindpats:
30 30 if kind == 'set':
31 31 if not ctx:
32 32 raise util.Abort("fileset expression with no context")
33 33 s = ctx.getfileset(pat)
34 34 fset.update(s)
35 35
36 36 if listsubrepos:
37 37 for subpath in ctx.substate:
38 38 s = ctx.sub(subpath).getfileset(pat)
39 39 fset.update(subpath + '/' + f for f in s)
40 40
41 41 continue
42 42 other.append((kind, pat))
43 43 return fset, other
44 44
45 45 def _kindpatsalwaysmatch(kindpats):
46 46 """"Checks whether the kindspats match everything, as e.g.
47 47 'relpath:.' does.
48 48 """
49 49 for kind, pat in kindpats:
50 50 if pat != '' or kind not in ['relpath', 'glob']:
51 51 return False
52 52 return True
53 53
54 54 class match(object):
55 55 def __init__(self, root, cwd, patterns, include=[], exclude=[],
56 56 default='glob', exact=False, auditor=None, ctx=None,
57 57 listsubrepos=False):
58 58 """build an object to match a set of file patterns
59 59
60 60 arguments:
61 61 root - the canonical root of the tree you're matching against
62 62 cwd - the current working directory, if relevant
63 63 patterns - patterns to find
64 64 include - patterns to include (unless they are excluded)
65 65 exclude - patterns to exclude (even if they are included)
66 66 default - if a pattern in patterns has no explicit type, assume this one
67 67 exact - patterns are actually filenames (include/exclude still apply)
68 68
69 69 a pattern is one of:
70 70 'glob:<glob>' - a glob relative to cwd
71 71 're:<regexp>' - a regular expression
72 72 'path:<path>' - a path relative to repository root
73 73 'relglob:<glob>' - an unrooted glob (*.c matches C files in all dirs)
74 74 'relpath:<path>' - a path relative to cwd
75 75 'relre:<regexp>' - a regexp that needn't match the start of a name
76 76 'set:<fileset>' - a fileset expression
77 77 '<something>' - a pattern of the specified default type
78 78 """
79 79
80 80 self._root = root
81 81 self._cwd = cwd
82 82 self._files = [] # exact files and roots of patterns
83 83 self._anypats = bool(include or exclude)
84 84 self._always = False
85 85 self._pathrestricted = bool(include or exclude or patterns)
86 86
87 87 matchfns = []
88 88 if include:
89 89 kindpats = self._normalize(include, 'glob', root, cwd, auditor)
90 90 self.includepat, im = _buildmatch(ctx, kindpats, '(?:/|$)',
91 91 listsubrepos)
92 92 matchfns.append(im)
93 93 if exclude:
94 94 kindpats = self._normalize(exclude, 'glob', root, cwd, auditor)
95 95 self.excludepat, em = _buildmatch(ctx, kindpats, '(?:/|$)',
96 96 listsubrepos)
97 97 matchfns.append(lambda f: not em(f))
98 98 if exact:
99 99 if isinstance(patterns, list):
100 100 self._files = patterns
101 101 else:
102 102 self._files = list(patterns)
103 103 matchfns.append(self.exact)
104 104 elif patterns:
105 105 kindpats = self._normalize(patterns, default, root, cwd, auditor)
106 106 if not _kindpatsalwaysmatch(kindpats):
107 107 self._files = _roots(kindpats)
108 108 self._anypats = self._anypats or _anypats(kindpats)
109 109 self.patternspat, pm = _buildmatch(ctx, kindpats, '$',
110 110 listsubrepos)
111 111 matchfns.append(pm)
112 112
113 113 if not matchfns:
114 114 m = util.always
115 115 self._always = True
116 116 elif len(matchfns) == 1:
117 117 m = matchfns[0]
118 118 else:
119 119 def m(f):
120 120 for matchfn in matchfns:
121 121 if not matchfn(f):
122 122 return False
123 123 return True
124 124
125 125 self.matchfn = m
126 self._fmap = set(self._files)
126 self._fileroots = set(self._files)
127 127
128 128 def __call__(self, fn):
129 129 return self.matchfn(fn)
130 130 def __iter__(self):
131 131 for f in self._files:
132 132 yield f
133 133
134 134 # Callbacks related to how the matcher is used by dirstate.walk.
135 135 # Subscribers to these events must monkeypatch the matcher object.
136 136 def bad(self, f, msg):
137 137 '''Callback from dirstate.walk for each explicit file that can't be
138 138 found/accessed, with an error message.'''
139 139 pass
140 140
141 141 # If an explicitdir is set, it will be called when an explicitly listed
142 142 # directory is visited.
143 143 explicitdir = None
144 144
145 145 # If an traversedir is set, it will be called when a directory discovered
146 146 # by recursive traversal is visited.
147 147 traversedir = None
148 148
149 149 def abs(self, f):
150 150 '''Convert a repo path back to path that is relative to the root of the
151 151 matcher.'''
152 152 return f
153 153
154 154 def rel(self, f):
155 155 '''Convert repo path back to path that is relative to cwd of matcher.'''
156 156 return util.pathto(self._root, self._cwd, f)
157 157
158 158 def uipath(self, f):
159 159 '''Convert repo path to a display path. If patterns or -I/-X were used
160 160 to create this matcher, the display path will be relative to cwd.
161 161 Otherwise it is relative to the root of the repo.'''
162 162 return (self._pathrestricted and self.rel(f)) or self.abs(f)
163 163
164 164 def files(self):
165 165 '''Explicitly listed files or patterns or roots:
166 166 if no patterns or .always(): empty list,
167 167 if exact: list exact files,
168 168 if not .anypats(): list all files and dirs,
169 169 else: optimal roots'''
170 170 return self._files
171 171
172 172 @propertycache
173 173 def _dirs(self):
174 return set(util.dirs(self._fmap)) | set(['.'])
174 return set(util.dirs(self._fileroots)) | set(['.'])
175 175
176 176 def visitdir(self, dir):
177 return (not self._fmap or '.' in self._fmap or
178 dir in self._fmap or dir in self._dirs or
179 any(parentdir in self._fmap
177 return (not self._fileroots or '.' in self._fileroots or
178 dir in self._fileroots or dir in self._dirs or
179 any(parentdir in self._fileroots
180 180 for parentdir in util.finddirs(dir)))
181 181
182 182 def exact(self, f):
183 183 '''Returns True if f is in .files().'''
184 return f in self._fmap
184 return f in self._fileroots
185 185
186 186 def anypats(self):
187 187 '''Matcher uses patterns or include/exclude.'''
188 188 return self._anypats
189 189
190 190 def always(self):
191 191 '''Matcher will match everything and .files() will be empty
192 192 - optimization might be possible and necessary.'''
193 193 return self._always
194 194
195 195 def ispartial(self):
196 196 '''True if the matcher won't always match.
197 197
198 198 Although it's just the inverse of _always in this implementation,
199 199 an extenion such as narrowhg might make it return something
200 200 slightly different.'''
201 201 return not self._always
202 202
203 203 def isexact(self):
204 204 return self.matchfn == self.exact
205 205
206 206 def _normalize(self, patterns, default, root, cwd, auditor):
207 207 '''Convert 'kind:pat' from the patterns list to tuples with kind and
208 208 normalized and rooted patterns and with listfiles expanded.'''
209 209 kindpats = []
210 210 for kind, pat in [_patsplit(p, default) for p in patterns]:
211 211 if kind in ('glob', 'relpath'):
212 212 pat = pathutil.canonpath(root, cwd, pat, auditor)
213 213 elif kind in ('relglob', 'path'):
214 214 pat = util.normpath(pat)
215 215 elif kind in ('listfile', 'listfile0'):
216 216 try:
217 217 files = util.readfile(pat)
218 218 if kind == 'listfile0':
219 219 files = files.split('\0')
220 220 else:
221 221 files = files.splitlines()
222 222 files = [f for f in files if f]
223 223 except EnvironmentError:
224 224 raise util.Abort(_("unable to read file list (%s)") % pat)
225 225 kindpats += self._normalize(files, default, root, cwd, auditor)
226 226 continue
227 227 # else: re or relre - which cannot be normalized
228 228 kindpats.append((kind, pat))
229 229 return kindpats
230 230
231 231 def exact(root, cwd, files):
232 232 return match(root, cwd, files, exact=True)
233 233
234 234 def always(root, cwd):
235 235 return match(root, cwd, [])
236 236
237 237 class narrowmatcher(match):
238 238 """Adapt a matcher to work on a subdirectory only.
239 239
240 240 The paths are remapped to remove/insert the path as needed:
241 241
242 242 >>> m1 = match('root', '', ['a.txt', 'sub/b.txt'])
243 243 >>> m2 = narrowmatcher('sub', m1)
244 244 >>> bool(m2('a.txt'))
245 245 False
246 246 >>> bool(m2('b.txt'))
247 247 True
248 248 >>> bool(m2.matchfn('a.txt'))
249 249 False
250 250 >>> bool(m2.matchfn('b.txt'))
251 251 True
252 252 >>> m2.files()
253 253 ['b.txt']
254 254 >>> m2.exact('b.txt')
255 255 True
256 256 >>> util.pconvert(m2.rel('b.txt'))
257 257 'sub/b.txt'
258 258 >>> def bad(f, msg):
259 259 ... print "%s: %s" % (f, msg)
260 260 >>> m1.bad = bad
261 261 >>> m2.bad('x.txt', 'No such file')
262 262 sub/x.txt: No such file
263 263 >>> m2.abs('c.txt')
264 264 'sub/c.txt'
265 265 """
266 266
267 267 def __init__(self, path, matcher):
268 268 self._root = matcher._root
269 269 self._cwd = matcher._cwd
270 270 self._path = path
271 271 self._matcher = matcher
272 272 self._always = matcher._always
273 273 self._pathrestricted = matcher._pathrestricted
274 274
275 275 self._files = [f[len(path) + 1:] for f in matcher._files
276 276 if f.startswith(path + "/")]
277 277 self._anypats = matcher._anypats
278 278 self.matchfn = lambda fn: matcher.matchfn(self._path + "/" + fn)
279 self._fmap = set(self._files)
279 self._fileroots = set(self._files)
280 280
281 281 def abs(self, f):
282 282 return self._matcher.abs(self._path + "/" + f)
283 283
284 284 def bad(self, f, msg):
285 285 self._matcher.bad(self._path + "/" + f, msg)
286 286
287 287 def rel(self, f):
288 288 return self._matcher.rel(self._path + "/" + f)
289 289
290 290 class icasefsmatcher(match):
291 291 """A matcher for wdir on case insensitive filesystems, which normalizes the
292 292 given patterns to the case in the filesystem.
293 293 """
294 294
295 295 def __init__(self, root, cwd, patterns, include, exclude, default, auditor,
296 296 ctx, listsubrepos=False):
297 297 init = super(icasefsmatcher, self).__init__
298 298 self._dsnormalize = ctx.repo().dirstate.normalize
299 299
300 300 init(root, cwd, patterns, include, exclude, default, auditor=auditor,
301 301 ctx=ctx, listsubrepos=listsubrepos)
302 302
303 303 # m.exact(file) must be based off of the actual user input, otherwise
304 304 # inexact case matches are treated as exact, and not noted without -v.
305 305 if self._files:
306 self._fmap = set(_roots(self._kp))
306 self._fileroots = set(_roots(self._kp))
307 307
308 308 def _normalize(self, patterns, default, root, cwd, auditor):
309 309 self._kp = super(icasefsmatcher, self)._normalize(patterns, default,
310 310 root, cwd, auditor)
311 311 kindpats = []
312 312 for kind, pats in self._kp:
313 313 if kind not in ('re', 'relre'): # regex can't be normalized
314 314 pats = self._dsnormalize(pats)
315 315 kindpats.append((kind, pats))
316 316 return kindpats
317 317
318 318 def patkind(pattern, default=None):
319 319 '''If pattern is 'kind:pat' with a known kind, return kind.'''
320 320 return _patsplit(pattern, default)[0]
321 321
322 322 def _patsplit(pattern, default):
323 323 """Split a string into the optional pattern kind prefix and the actual
324 324 pattern."""
325 325 if ':' in pattern:
326 326 kind, pat = pattern.split(':', 1)
327 327 if kind in ('re', 'glob', 'path', 'relglob', 'relpath', 'relre',
328 328 'listfile', 'listfile0', 'set'):
329 329 return kind, pat
330 330 return default, pattern
331 331
332 332 def _globre(pat):
333 333 r'''Convert an extended glob string to a regexp string.
334 334
335 335 >>> print _globre(r'?')
336 336 .
337 337 >>> print _globre(r'*')
338 338 [^/]*
339 339 >>> print _globre(r'**')
340 340 .*
341 341 >>> print _globre(r'**/a')
342 342 (?:.*/)?a
343 343 >>> print _globre(r'a/**/b')
344 344 a\/(?:.*/)?b
345 345 >>> print _globre(r'[a*?!^][^b][!c]')
346 346 [a*?!^][\^b][^c]
347 347 >>> print _globre(r'{a,b}')
348 348 (?:a|b)
349 349 >>> print _globre(r'.\*\?')
350 350 \.\*\?
351 351 '''
352 352 i, n = 0, len(pat)
353 353 res = ''
354 354 group = 0
355 355 escape = util.re.escape
356 356 def peek():
357 357 return i < n and pat[i]
358 358 while i < n:
359 359 c = pat[i]
360 360 i += 1
361 361 if c not in '*?[{},\\':
362 362 res += escape(c)
363 363 elif c == '*':
364 364 if peek() == '*':
365 365 i += 1
366 366 if peek() == '/':
367 367 i += 1
368 368 res += '(?:.*/)?'
369 369 else:
370 370 res += '.*'
371 371 else:
372 372 res += '[^/]*'
373 373 elif c == '?':
374 374 res += '.'
375 375 elif c == '[':
376 376 j = i
377 377 if j < n and pat[j] in '!]':
378 378 j += 1
379 379 while j < n and pat[j] != ']':
380 380 j += 1
381 381 if j >= n:
382 382 res += '\\['
383 383 else:
384 384 stuff = pat[i:j].replace('\\','\\\\')
385 385 i = j + 1
386 386 if stuff[0] == '!':
387 387 stuff = '^' + stuff[1:]
388 388 elif stuff[0] == '^':
389 389 stuff = '\\' + stuff
390 390 res = '%s[%s]' % (res, stuff)
391 391 elif c == '{':
392 392 group += 1
393 393 res += '(?:'
394 394 elif c == '}' and group:
395 395 res += ')'
396 396 group -= 1
397 397 elif c == ',' and group:
398 398 res += '|'
399 399 elif c == '\\':
400 400 p = peek()
401 401 if p:
402 402 i += 1
403 403 res += escape(p)
404 404 else:
405 405 res += escape(c)
406 406 else:
407 407 res += escape(c)
408 408 return res
409 409
410 410 def _regex(kind, pat, globsuffix):
411 411 '''Convert a (normalized) pattern of any kind into a regular expression.
412 412 globsuffix is appended to the regexp of globs.'''
413 413 if not pat:
414 414 return ''
415 415 if kind == 're':
416 416 return pat
417 417 if kind == 'path':
418 418 return '^' + util.re.escape(pat) + '(?:/|$)'
419 419 if kind == 'relglob':
420 420 return '(?:|.*/)' + _globre(pat) + globsuffix
421 421 if kind == 'relpath':
422 422 return util.re.escape(pat) + '(?:/|$)'
423 423 if kind == 'relre':
424 424 if pat.startswith('^'):
425 425 return pat
426 426 return '.*' + pat
427 427 return _globre(pat) + globsuffix
428 428
429 429 def _buildmatch(ctx, kindpats, globsuffix, listsubrepos):
430 430 '''Return regexp string and a matcher function for kindpats.
431 431 globsuffix is appended to the regexp of globs.'''
432 432 fset, kindpats = _expandsets(kindpats, ctx, listsubrepos)
433 433 if not kindpats:
434 434 return "", fset.__contains__
435 435
436 436 regex, mf = _buildregexmatch(kindpats, globsuffix)
437 437 if fset:
438 438 return regex, lambda f: f in fset or mf(f)
439 439 return regex, mf
440 440
441 441 def _buildregexmatch(kindpats, globsuffix):
442 442 """Build a match function from a list of kinds and kindpats,
443 443 return regexp string and a matcher function."""
444 444 try:
445 445 regex = '(?:%s)' % '|'.join([_regex(k, p, globsuffix)
446 446 for (k, p) in kindpats])
447 447 if len(regex) > 20000:
448 448 raise OverflowError
449 449 return regex, _rematcher(regex)
450 450 except OverflowError:
451 451 # We're using a Python with a tiny regex engine and we
452 452 # made it explode, so we'll divide the pattern list in two
453 453 # until it works
454 454 l = len(kindpats)
455 455 if l < 2:
456 456 raise
457 457 regexa, a = _buildregexmatch(kindpats[:l//2], globsuffix)
458 458 regexb, b = _buildregexmatch(kindpats[l//2:], globsuffix)
459 459 return regex, lambda s: a(s) or b(s)
460 460 except re.error:
461 461 for k, p in kindpats:
462 462 try:
463 463 _rematcher('(?:%s)' % _regex(k, p, globsuffix))
464 464 except re.error:
465 465 raise util.Abort(_("invalid pattern (%s): %s") % (k, p))
466 466 raise util.Abort(_("invalid pattern"))
467 467
468 468 def _roots(kindpats):
469 469 '''return roots and exact explicitly listed files from patterns
470 470
471 471 >>> _roots([('glob', 'g/*'), ('glob', 'g'), ('glob', 'g*')])
472 472 ['g', 'g', '.']
473 473 >>> _roots([('relpath', 'r'), ('path', 'p/p'), ('path', '')])
474 474 ['r', 'p/p', '.']
475 475 >>> _roots([('relglob', 'rg*'), ('re', 're/'), ('relre', 'rr')])
476 476 ['.', '.', '.']
477 477 '''
478 478 r = []
479 479 for kind, pat in kindpats:
480 480 if kind == 'glob': # find the non-glob prefix
481 481 root = []
482 482 for p in pat.split('/'):
483 483 if '[' in p or '{' in p or '*' in p or '?' in p:
484 484 break
485 485 root.append(p)
486 486 r.append('/'.join(root) or '.')
487 487 elif kind in ('relpath', 'path'):
488 488 r.append(pat or '.')
489 489 else: # relglob, re, relre
490 490 r.append('.')
491 491 return r
492 492
493 493 def _anypats(kindpats):
494 494 for kind, pat in kindpats:
495 495 if kind in ('glob', 're', 'relglob', 'relre', 'set'):
496 496 return True
497 497
498 498 _commentre = None
499 499
500 500 def readpatternfile(filepath, warn):
501 501 '''parse a pattern file, returning a list of
502 502 patterns. These patterns should be given to compile()
503 503 to be validated and converted into a match function.'''
504 504 syntaxes = {'re': 'relre:', 'regexp': 'relre:', 'glob': 'relglob:'}
505 505 syntax = 'relre:'
506 506 patterns = []
507 507
508 508 fp = open(filepath)
509 509 for line in fp:
510 510 if "#" in line:
511 511 global _commentre
512 512 if not _commentre:
513 513 _commentre = re.compile(r'((^|[^\\])(\\\\)*)#.*')
514 514 # remove comments prefixed by an even number of escapes
515 515 line = _commentre.sub(r'\1', line)
516 516 # fixup properly escaped comments that survived the above
517 517 line = line.replace("\\#", "#")
518 518 line = line.rstrip()
519 519 if not line:
520 520 continue
521 521
522 522 if line.startswith('syntax:'):
523 523 s = line[7:].strip()
524 524 try:
525 525 syntax = syntaxes[s]
526 526 except KeyError:
527 527 warn(_("%s: ignoring invalid syntax '%s'\n") % (filepath, s))
528 528 continue
529 529
530 530 linesyntax = syntax
531 531 for s, rels in syntaxes.iteritems():
532 532 if line.startswith(rels):
533 533 linesyntax = rels
534 534 line = line[len(rels):]
535 535 break
536 536 elif line.startswith(s+':'):
537 537 linesyntax = rels
538 538 line = line[len(s) + 1:]
539 539 break
540 540 patterns.append(linesyntax + line)
541 541 fp.close()
542 542 return patterns
General Comments 0
You need to be logged in to leave comments. Login now