##// END OF EJS Templates
run commit and update hooks after command completion (issue1827)...
Sune Foldager -
r10492:0e64d814 stable
parent child Browse files
Show More
@@ -1,566 +1,568 b''
1 1 # keyword.py - $Keyword$ expansion for Mercurial
2 2 #
3 3 # Copyright 2007-2009 Christian Ebert <blacktrash@gmx.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 #
8 8 # $Id$
9 9 #
10 10 # Keyword expansion hack against the grain of a DSCM
11 11 #
12 12 # There are many good reasons why this is not needed in a distributed
13 13 # SCM, still it may be useful in very small projects based on single
14 14 # files (like LaTeX packages), that are mostly addressed to an
15 15 # audience not running a version control system.
16 16 #
17 17 # For in-depth discussion refer to
18 18 # <http://mercurial.selenic.com/wiki/KeywordPlan>.
19 19 #
20 20 # Keyword expansion is based on Mercurial's changeset template mappings.
21 21 #
22 22 # Binary files are not touched.
23 23 #
24 24 # Files to act upon/ignore are specified in the [keyword] section.
25 25 # Customized keyword template mappings in the [keywordmaps] section.
26 26 #
27 27 # Run "hg help keyword" and "hg kwdemo" to get info on configuration.
28 28
29 29 '''expand keywords in tracked files
30 30
31 31 This extension expands RCS/CVS-like or self-customized $Keywords$ in
32 32 tracked text files selected by your configuration.
33 33
34 34 Keywords are only expanded in local repositories and not stored in the
35 35 change history. The mechanism can be regarded as a convenience for the
36 36 current user or for archive distribution.
37 37
38 38 Configuration is done in the [keyword] and [keywordmaps] sections of
39 39 hgrc files.
40 40
41 41 Example::
42 42
43 43 [keyword]
44 44 # expand keywords in every python file except those matching "x*"
45 45 **.py =
46 46 x* = ignore
47 47
48 48 NOTE: the more specific you are in your filename patterns the less you
49 49 lose speed in huge repositories.
50 50
51 51 For [keywordmaps] template mapping and expansion demonstration and
52 52 control run "hg kwdemo". See "hg help templates" for a list of
53 53 available templates and filters.
54 54
55 55 An additional date template filter {date|utcdate} is provided. It
56 56 returns a date like "2006/09/18 15:13:13".
57 57
58 58 The default template mappings (view with "hg kwdemo -d") can be
59 59 replaced with customized keywords and templates. Again, run "hg
60 60 kwdemo" to control the results of your config changes.
61 61
62 62 Before changing/disabling active keywords, run "hg kwshrink" to avoid
63 63 the risk of inadvertently storing expanded keywords in the change
64 64 history.
65 65
66 66 To force expansion after enabling it, or a configuration change, run
67 67 "hg kwexpand".
68 68
69 69 Also, when committing with the record extension or using mq's qrecord,
70 70 be aware that keywords cannot be updated. Again, run "hg kwexpand" on
71 71 the files in question to update keyword expansions after all changes
72 72 have been checked in.
73 73
74 74 Expansions spanning more than one line and incremental expansions,
75 75 like CVS' $Log$, are not supported. A keyword template map "Log =
76 76 {desc}" expands to the first line of the changeset description.
77 77 '''
78 78
79 79 from mercurial import commands, cmdutil, dispatch, filelog, revlog, extensions
80 80 from mercurial import patch, localrepo, templater, templatefilters, util, match
81 81 from mercurial.hgweb import webcommands
82 82 from mercurial.lock import release
83 83 from mercurial.node import nullid
84 84 from mercurial.i18n import _
85 85 import re, shutil, tempfile
86 86
87 87 commands.optionalrepo += ' kwdemo'
88 88
89 89 # hg commands that do not act on keywords
90 90 nokwcommands = ('add addremove annotate bundle copy export grep incoming init'
91 91 ' log outgoing push rename rollback tip verify'
92 92 ' convert email glog')
93 93
94 94 # hg commands that trigger expansion only when writing to working dir,
95 95 # not when reading filelog, and unexpand when reading from working dir
96 96 restricted = ('merge record resolve qfold qimport qnew qpush qrefresh qrecord'
97 97 ' transplant')
98 98
99 99 # provide cvs-like UTC date filter
100 100 utcdate = lambda x: util.datestr((x[0], 0), '%Y/%m/%d %H:%M:%S')
101 101
102 102 # make keyword tools accessible
103 103 kwtools = {'templater': None, 'hgcmd': '', 'inc': [], 'exc': ['.hg*']}
104 104
105 105
106 106 class kwtemplater(object):
107 107 '''
108 108 Sets up keyword templates, corresponding keyword regex, and
109 109 provides keyword substitution functions.
110 110 '''
111 111 templates = {
112 112 'Revision': '{node|short}',
113 113 'Author': '{author|user}',
114 114 'Date': '{date|utcdate}',
115 115 'RCSfile': '{file|basename},v',
116 116 'RCSFile': '{file|basename},v', # kept for backwards compatibility
117 117 # with hg-keyword
118 118 'Source': '{root}/{file},v',
119 119 'Id': '{file|basename},v {node|short} {date|utcdate} {author|user}',
120 120 'Header': '{root}/{file},v {node|short} {date|utcdate} {author|user}',
121 121 }
122 122
123 123 def __init__(self, ui, repo):
124 124 self.ui = ui
125 125 self.repo = repo
126 126 self.match = match.match(repo.root, '', [],
127 127 kwtools['inc'], kwtools['exc'])
128 128 self.restrict = kwtools['hgcmd'] in restricted.split()
129 129
130 130 kwmaps = self.ui.configitems('keywordmaps')
131 131 if kwmaps: # override default templates
132 132 self.templates = dict((k, templater.parsestring(v, False))
133 133 for k, v in kwmaps)
134 134 escaped = map(re.escape, self.templates.keys())
135 135 kwpat = r'\$(%s)(: [^$\n\r]*? )??\$' % '|'.join(escaped)
136 136 self.re_kw = re.compile(kwpat)
137 137
138 138 templatefilters.filters['utcdate'] = utcdate
139 139 self.ct = cmdutil.changeset_templater(self.ui, self.repo,
140 140 False, None, '', False)
141 141
142 142 def substitute(self, data, path, ctx, subfunc):
143 143 '''Replaces keywords in data with expanded template.'''
144 144 def kwsub(mobj):
145 145 kw = mobj.group(1)
146 146 self.ct.use_template(self.templates[kw])
147 147 self.ui.pushbuffer()
148 148 self.ct.show(ctx, root=self.repo.root, file=path)
149 149 ekw = templatefilters.firstline(self.ui.popbuffer())
150 150 return '$%s: %s $' % (kw, ekw)
151 151 return subfunc(kwsub, data)
152 152
153 153 def expand(self, path, node, data):
154 154 '''Returns data with keywords expanded.'''
155 155 if not self.restrict and self.match(path) and not util.binary(data):
156 156 ctx = self.repo.filectx(path, fileid=node).changectx()
157 157 return self.substitute(data, path, ctx, self.re_kw.sub)
158 158 return data
159 159
160 160 def iskwfile(self, path, flagfunc):
161 161 '''Returns true if path matches [keyword] pattern
162 162 and is not a symbolic link.
163 163 Caveat: localrepository._link fails on Windows.'''
164 164 return self.match(path) and not 'l' in flagfunc(path)
165 165
166 166 def overwrite(self, node, expand, files):
167 167 '''Overwrites selected files expanding/shrinking keywords.'''
168 168 ctx = self.repo[node]
169 169 mf = ctx.manifest()
170 170 if node is not None: # commit
171 171 files = [f for f in ctx.files() if f in mf]
172 172 notify = self.ui.debug
173 173 else: # kwexpand/kwshrink
174 174 notify = self.ui.note
175 175 candidates = [f for f in files if self.iskwfile(f, ctx.flags)]
176 176 if candidates:
177 177 self.restrict = True # do not expand when reading
178 178 msg = (expand and _('overwriting %s expanding keywords\n')
179 179 or _('overwriting %s shrinking keywords\n'))
180 180 for f in candidates:
181 181 fp = self.repo.file(f)
182 182 data = fp.read(mf[f])
183 183 if util.binary(data):
184 184 continue
185 185 if expand:
186 186 if node is None:
187 187 ctx = self.repo.filectx(f, fileid=mf[f]).changectx()
188 188 data, found = self.substitute(data, f, ctx,
189 189 self.re_kw.subn)
190 190 else:
191 191 found = self.re_kw.search(data)
192 192 if found:
193 193 notify(msg % f)
194 194 self.repo.wwrite(f, data, mf.flags(f))
195 195 if node is None:
196 196 self.repo.dirstate.normal(f)
197 197 self.restrict = False
198 198
199 199 def shrinktext(self, text):
200 200 '''Unconditionally removes all keyword substitutions from text.'''
201 201 return self.re_kw.sub(r'$\1$', text)
202 202
203 203 def shrink(self, fname, text):
204 204 '''Returns text with all keyword substitutions removed.'''
205 205 if self.match(fname) and not util.binary(text):
206 206 return self.shrinktext(text)
207 207 return text
208 208
209 209 def shrinklines(self, fname, lines):
210 210 '''Returns lines with keyword substitutions removed.'''
211 211 if self.match(fname):
212 212 text = ''.join(lines)
213 213 if not util.binary(text):
214 214 return self.shrinktext(text).splitlines(True)
215 215 return lines
216 216
217 217 def wread(self, fname, data):
218 218 '''If in restricted mode returns data read from wdir with
219 219 keyword substitutions removed.'''
220 220 return self.restrict and self.shrink(fname, data) or data
221 221
222 222 class kwfilelog(filelog.filelog):
223 223 '''
224 224 Subclass of filelog to hook into its read, add, cmp methods.
225 225 Keywords are "stored" unexpanded, and processed on reading.
226 226 '''
227 227 def __init__(self, opener, kwt, path):
228 228 super(kwfilelog, self).__init__(opener, path)
229 229 self.kwt = kwt
230 230 self.path = path
231 231
232 232 def read(self, node):
233 233 '''Expands keywords when reading filelog.'''
234 234 data = super(kwfilelog, self).read(node)
235 235 return self.kwt.expand(self.path, node, data)
236 236
237 237 def add(self, text, meta, tr, link, p1=None, p2=None):
238 238 '''Removes keyword substitutions when adding to filelog.'''
239 239 text = self.kwt.shrink(self.path, text)
240 240 return super(kwfilelog, self).add(text, meta, tr, link, p1, p2)
241 241
242 242 def cmp(self, node, text):
243 243 '''Removes keyword substitutions for comparison.'''
244 244 text = self.kwt.shrink(self.path, text)
245 245 if self.renamed(node):
246 246 t2 = super(kwfilelog, self).read(node)
247 247 return t2 != text
248 248 return revlog.revlog.cmp(self, node, text)
249 249
250 250 def _status(ui, repo, kwt, *pats, **opts):
251 251 '''Bails out if [keyword] configuration is not active.
252 252 Returns status of working directory.'''
253 253 if kwt:
254 254 unknown = (opts.get('unknown') or opts.get('all')
255 255 or opts.get('untracked'))
256 256 return repo.status(match=cmdutil.match(repo, pats, opts), clean=True,
257 257 unknown=unknown)
258 258 if ui.configitems('keyword'):
259 259 raise util.Abort(_('[keyword] patterns cannot match'))
260 260 raise util.Abort(_('no [keyword] patterns configured'))
261 261
262 262 def _kwfwrite(ui, repo, expand, *pats, **opts):
263 263 '''Selects files and passes them to kwtemplater.overwrite.'''
264 264 if repo.dirstate.parents()[1] != nullid:
265 265 raise util.Abort(_('outstanding uncommitted merge'))
266 266 kwt = kwtools['templater']
267 267 status = _status(ui, repo, kwt, *pats, **opts)
268 268 modified, added, removed, deleted = status[:4]
269 269 if modified or added or removed or deleted:
270 270 raise util.Abort(_('outstanding uncommitted changes'))
271 271 wlock = lock = None
272 272 try:
273 273 wlock = repo.wlock()
274 274 lock = repo.lock()
275 275 kwt.overwrite(None, expand, status[6])
276 276 finally:
277 277 release(lock, wlock)
278 278
279 279 def demo(ui, repo, *args, **opts):
280 280 '''print [keywordmaps] configuration and an expansion example
281 281
282 282 Show current, custom, or default keyword template maps and their
283 283 expansions.
284 284
285 285 Extend the current configuration by specifying maps as arguments
286 286 and using -f/--rcfile to source an external hgrc file.
287 287
288 288 Use -d/--default to disable current configuration.
289 289
290 290 See "hg help templates" for information on templates and filters.
291 291 '''
292 292 def demoitems(section, items):
293 293 ui.write('[%s]\n' % section)
294 294 for k, v in sorted(items):
295 295 ui.write('%s = %s\n' % (k, v))
296 296
297 297 msg = 'hg keyword config and expansion example'
298 298 fn = 'demo.txt'
299 299 branchname = 'demobranch'
300 300 tmpdir = tempfile.mkdtemp('', 'kwdemo.')
301 301 ui.note(_('creating temporary repository at %s\n') % tmpdir)
302 302 repo = localrepo.localrepository(ui, tmpdir, True)
303 303 ui.setconfig('keyword', fn, '')
304 304
305 305 uikwmaps = ui.configitems('keywordmaps')
306 306 if args or opts.get('rcfile'):
307 307 ui.status(_('\n\tconfiguration using custom keyword template maps\n'))
308 308 if uikwmaps:
309 309 ui.status(_('\textending current template maps\n'))
310 310 if opts.get('default') or not uikwmaps:
311 311 ui.status(_('\toverriding default template maps\n'))
312 312 if opts.get('rcfile'):
313 313 ui.readconfig(opts.get('rcfile'))
314 314 if args:
315 315 # simulate hgrc parsing
316 316 rcmaps = ['[keywordmaps]\n'] + [a + '\n' for a in args]
317 317 fp = repo.opener('hgrc', 'w')
318 318 fp.writelines(rcmaps)
319 319 fp.close()
320 320 ui.readconfig(repo.join('hgrc'))
321 321 kwmaps = dict(ui.configitems('keywordmaps'))
322 322 elif opts.get('default'):
323 323 ui.status(_('\n\tconfiguration using default keyword template maps\n'))
324 324 kwmaps = kwtemplater.templates
325 325 if uikwmaps:
326 326 ui.status(_('\tdisabling current template maps\n'))
327 327 for k, v in kwmaps.iteritems():
328 328 ui.setconfig('keywordmaps', k, v)
329 329 else:
330 330 ui.status(_('\n\tconfiguration using current keyword template maps\n'))
331 331 kwmaps = dict(uikwmaps) or kwtemplater.templates
332 332
333 333 uisetup(ui)
334 334 reposetup(ui, repo)
335 335 for k, v in ui.configitems('extensions'):
336 336 if k.endswith('keyword'):
337 337 extension = '%s = %s' % (k, v)
338 338 break
339 339 ui.write('[extensions]\n%s\n' % extension)
340 340 demoitems('keyword', ui.configitems('keyword'))
341 341 demoitems('keywordmaps', kwmaps.iteritems())
342 342 keywords = '$' + '$\n$'.join(sorted(kwmaps.keys())) + '$\n'
343 343 repo.wopener(fn, 'w').write(keywords)
344 344 repo.add([fn])
345 345 path = repo.wjoin(fn)
346 346 ui.note(_('\nkeywords written to %s:\n') % path)
347 347 ui.note(keywords)
348 348 ui.note('\nhg -R "%s" branch "%s"\n' % (tmpdir, branchname))
349 349 # silence branch command if not verbose
350 350 quiet = ui.quiet
351 351 ui.quiet = not ui.verbose
352 352 commands.branch(ui, repo, branchname)
353 353 ui.quiet = quiet
354 354 for name, cmd in ui.configitems('hooks'):
355 355 if name.split('.', 1)[0].find('commit') > -1:
356 356 repo.ui.setconfig('hooks', name, '')
357 357 ui.note(_('unhooked all commit hooks\n'))
358 358 ui.note('hg -R "%s" ci -m "%s"\n' % (tmpdir, msg))
359 359 repo.commit(text=msg)
360 360 ui.status(_('\n\tkeywords expanded\n'))
361 361 ui.write(repo.wread(fn))
362 362 ui.debug('\nremoving temporary repository %s\n' % tmpdir)
363 363 shutil.rmtree(tmpdir, ignore_errors=True)
364 364
365 365 def expand(ui, repo, *pats, **opts):
366 366 '''expand keywords in the working directory
367 367
368 368 Run after (re)enabling keyword expansion.
369 369
370 370 kwexpand refuses to run if given files contain local changes.
371 371 '''
372 372 # 3rd argument sets expansion to True
373 373 _kwfwrite(ui, repo, True, *pats, **opts)
374 374
375 375 def files(ui, repo, *pats, **opts):
376 376 '''show files configured for keyword expansion
377 377
378 378 List which files in the working directory are matched by the
379 379 [keyword] configuration patterns.
380 380
381 381 Useful to prevent inadvertent keyword expansion and to speed up
382 382 execution by including only files that are actual candidates for
383 383 expansion.
384 384
385 385 See "hg help keyword" on how to construct patterns both for
386 386 inclusion and exclusion of files.
387 387
388 388 With -A/--all and -v/--verbose the codes used to show the status
389 389 of files are::
390 390
391 391 K = keyword expansion candidate
392 392 k = keyword expansion candidate (not tracked)
393 393 I = ignored
394 394 i = ignored (not tracked)
395 395 '''
396 396 kwt = kwtools['templater']
397 397 status = _status(ui, repo, kwt, *pats, **opts)
398 398 cwd = pats and repo.getcwd() or ''
399 399 modified, added, removed, deleted, unknown, ignored, clean = status
400 400 files = []
401 401 if not (opts.get('unknown') or opts.get('untracked')) or opts.get('all'):
402 402 files = sorted(modified + added + clean)
403 403 wctx = repo[None]
404 404 kwfiles = [f for f in files if kwt.iskwfile(f, wctx.flags)]
405 405 kwunknown = [f for f in unknown if kwt.iskwfile(f, wctx.flags)]
406 406 if not opts.get('ignore') or opts.get('all'):
407 407 showfiles = kwfiles, kwunknown
408 408 else:
409 409 showfiles = [], []
410 410 if opts.get('all') or opts.get('ignore'):
411 411 showfiles += ([f for f in files if f not in kwfiles],
412 412 [f for f in unknown if f not in kwunknown])
413 413 for char, filenames in zip('KkIi', showfiles):
414 414 fmt = (opts.get('all') or ui.verbose) and '%s %%s\n' % char or '%s\n'
415 415 for f in filenames:
416 416 ui.write(fmt % repo.pathto(f, cwd))
417 417
418 418 def shrink(ui, repo, *pats, **opts):
419 419 '''revert expanded keywords in the working directory
420 420
421 421 Run before changing/disabling active keywords or if you experience
422 422 problems with "hg import" or "hg merge".
423 423
424 424 kwshrink refuses to run if given files contain local changes.
425 425 '''
426 426 # 3rd argument sets expansion to False
427 427 _kwfwrite(ui, repo, False, *pats, **opts)
428 428
429 429
430 430 def uisetup(ui):
431 431 '''Collects [keyword] config in kwtools.
432 432 Monkeypatches dispatch._parse if needed.'''
433 433
434 434 for pat, opt in ui.configitems('keyword'):
435 435 if opt != 'ignore':
436 436 kwtools['inc'].append(pat)
437 437 else:
438 438 kwtools['exc'].append(pat)
439 439
440 440 if kwtools['inc']:
441 441 def kwdispatch_parse(orig, ui, args):
442 442 '''Monkeypatch dispatch._parse to obtain running hg command.'''
443 443 cmd, func, args, options, cmdoptions = orig(ui, args)
444 444 kwtools['hgcmd'] = cmd
445 445 return cmd, func, args, options, cmdoptions
446 446
447 447 extensions.wrapfunction(dispatch, '_parse', kwdispatch_parse)
448 448
449 449 def reposetup(ui, repo):
450 450 '''Sets up repo as kwrepo for keyword substitution.
451 451 Overrides file method to return kwfilelog instead of filelog
452 452 if file matches user configuration.
453 453 Wraps commit to overwrite configured files with updated
454 454 keyword substitutions.
455 455 Monkeypatches patch and webcommands.'''
456 456
457 457 try:
458 458 if (not repo.local() or not kwtools['inc']
459 459 or kwtools['hgcmd'] in nokwcommands.split()
460 460 or '.hg' in util.splitpath(repo.root)
461 461 or repo._url.startswith('bundle:')):
462 462 return
463 463 except AttributeError:
464 464 pass
465 465
466 466 kwtools['templater'] = kwt = kwtemplater(ui, repo)
467 467
468 468 class kwrepo(repo.__class__):
469 469 def file(self, f):
470 470 if f[0] == '/':
471 471 f = f[1:]
472 472 return kwfilelog(self.sopener, kwt, f)
473 473
474 474 def wread(self, filename):
475 475 data = super(kwrepo, self).wread(filename)
476 476 return kwt.wread(filename, data)
477 477
478 478 def commit(self, *args, **opts):
479 479 # use custom commitctx for user commands
480 480 # other extensions can still wrap repo.commitctx directly
481 481 self.commitctx = self.kwcommitctx
482 482 try:
483 return super(kwrepo, self).commit(*args, **opts)
483 self._kwcommithooks = {}
484 n = super(kwrepo, self).commit(*args, **opts)
485 if self._kwcommithooks:
486 xp1, xp2 = self._kwxp1, self._kwxp2
487 for name, cmd in self._kwcommithooks.iteritems():
488 ui.setconfig('hooks', name, cmd)
489 self.hook('commit', node=n, parent1=xp1, parent2=xp2)
490 return n
484 491 finally:
485 492 del self.commitctx
486 493
487 494 def kwcommitctx(self, ctx, error=False):
488 495 wlock = lock = None
489 496 try:
490 497 wlock = self.wlock()
491 498 lock = self.lock()
492 499 # store and postpone commit hooks
493 commithooks = {}
494 500 for name, cmd in ui.configitems('hooks'):
495 501 if name.split('.', 1)[0] == 'commit':
496 commithooks[name] = cmd
502 self._kwcommithooks[name] = cmd
497 503 ui.setconfig('hooks', name, None)
498 if commithooks:
504 if self._kwcommithooks:
499 505 # store parents for commit hooks
500 506 p1, p2 = ctx.p1(), ctx.p2()
501 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
507 self._kwxp1, self._kwxp2 = p1.hex(), p2 and p2.hex() or ''
502 508
503 509 n = super(kwrepo, self).commitctx(ctx, error)
504 510
505 511 kwt.overwrite(n, True, None)
506 if commithooks:
507 for name, cmd in commithooks.iteritems():
508 ui.setconfig('hooks', name, cmd)
509 self.hook('commit', node=n, parent1=xp1, parent2=xp2)
510 512 return n
511 513 finally:
512 514 release(lock, wlock)
513 515
514 516 # monkeypatches
515 517 def kwpatchfile_init(orig, self, ui, fname, opener,
516 518 missing=False, eol=None):
517 519 '''Monkeypatch/wrap patch.patchfile.__init__ to avoid
518 520 rejects or conflicts due to expanded keywords in working dir.'''
519 521 orig(self, ui, fname, opener, missing, eol)
520 522 # shrink keywords read from working dir
521 523 self.lines = kwt.shrinklines(self.fname, self.lines)
522 524
523 525 def kw_diff(orig, repo, node1=None, node2=None, match=None, changes=None,
524 526 opts=None):
525 527 '''Monkeypatch patch.diff to avoid expansion except when
526 528 comparing against working dir.'''
527 529 if node2 is not None:
528 530 kwt.match = util.never
529 531 elif node1 is not None and node1 != repo['.'].node():
530 532 kwt.restrict = True
531 533 return orig(repo, node1, node2, match, changes, opts)
532 534
533 535 def kwweb_skip(orig, web, req, tmpl):
534 536 '''Wraps webcommands.x turning off keyword expansion.'''
535 537 kwt.match = util.never
536 538 return orig(web, req, tmpl)
537 539
538 540 repo.__class__ = kwrepo
539 541
540 542 extensions.wrapfunction(patch.patchfile, '__init__', kwpatchfile_init)
541 543 if not kwt.restrict:
542 544 extensions.wrapfunction(patch, 'diff', kw_diff)
543 545 for c in 'annotate changeset rev filediff diff'.split():
544 546 extensions.wrapfunction(webcommands, c, kwweb_skip)
545 547
546 548 cmdtable = {
547 549 'kwdemo':
548 550 (demo,
549 551 [('d', 'default', None, _('show default keyword template maps')),
550 552 ('f', 'rcfile', '', _('read maps from rcfile'))],
551 553 _('hg kwdemo [-d] [-f RCFILE] [TEMPLATEMAP]...')),
552 554 'kwexpand': (expand, commands.walkopts,
553 555 _('hg kwexpand [OPTION]... [FILE]...')),
554 556 'kwfiles':
555 557 (files,
556 558 [('A', 'all', None, _('show keyword status flags of all files')),
557 559 ('i', 'ignore', None, _('show files excluded from expansion')),
558 560 ('u', 'unknown', None, _('only show unknown (not tracked) files')),
559 561 ('a', 'all', None,
560 562 _('show keyword status flags of all files (DEPRECATED)')),
561 563 ('u', 'untracked', None, _('only show untracked files (DEPRECATED)')),
562 564 ] + commands.walkopts,
563 565 _('hg kwfiles [OPTION]... [FILE]...')),
564 566 'kwshrink': (shrink, commands.walkopts,
565 567 _('hg kwshrink [OPTION]... [FILE]...')),
566 568 }
@@ -1,2219 +1,2216 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo
11 11 import changelog, dirstate, filelog, manifest, context
12 12 import lock, transaction, store, encoding
13 13 import util, extensions, hook, error
14 14 import match as match_
15 15 import merge as merge_
16 16 import tags as tags_
17 17 from lock import release
18 18 import weakref, stat, errno, os, time, inspect
19 19 propertycache = util.propertycache
20 20
21 21 class localrepository(repo.repository):
22 22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
23 23 supported = set('revlogv1 store fncache shared'.split())
24 24
25 25 def __init__(self, baseui, path=None, create=0):
26 26 repo.repository.__init__(self)
27 27 self.root = os.path.realpath(path)
28 28 self.path = os.path.join(self.root, ".hg")
29 29 self.origroot = path
30 30 self.opener = util.opener(self.path)
31 31 self.wopener = util.opener(self.root)
32 32 self.baseui = baseui
33 33 self.ui = baseui.copy()
34 34
35 35 try:
36 36 self.ui.readconfig(self.join("hgrc"), self.root)
37 37 extensions.loadall(self.ui)
38 38 except IOError:
39 39 pass
40 40
41 41 if not os.path.isdir(self.path):
42 42 if create:
43 43 if not os.path.exists(path):
44 44 os.mkdir(path)
45 45 os.mkdir(self.path)
46 46 requirements = ["revlogv1"]
47 47 if self.ui.configbool('format', 'usestore', True):
48 48 os.mkdir(os.path.join(self.path, "store"))
49 49 requirements.append("store")
50 50 if self.ui.configbool('format', 'usefncache', True):
51 51 requirements.append("fncache")
52 52 # create an invalid changelog
53 53 self.opener("00changelog.i", "a").write(
54 54 '\0\0\0\2' # represents revlogv2
55 55 ' dummy changelog to prevent using the old repo layout'
56 56 )
57 57 reqfile = self.opener("requires", "w")
58 58 for r in requirements:
59 59 reqfile.write("%s\n" % r)
60 60 reqfile.close()
61 61 else:
62 62 raise error.RepoError(_("repository %s not found") % path)
63 63 elif create:
64 64 raise error.RepoError(_("repository %s already exists") % path)
65 65 else:
66 66 # find requirements
67 67 requirements = set()
68 68 try:
69 69 requirements = set(self.opener("requires").read().splitlines())
70 70 except IOError, inst:
71 71 if inst.errno != errno.ENOENT:
72 72 raise
73 73 for r in requirements - self.supported:
74 74 raise error.RepoError(_("requirement '%s' not supported") % r)
75 75
76 76 self.sharedpath = self.path
77 77 try:
78 78 s = os.path.realpath(self.opener("sharedpath").read())
79 79 if not os.path.exists(s):
80 80 raise error.RepoError(
81 81 _('.hg/sharedpath points to nonexistent directory %s') % s)
82 82 self.sharedpath = s
83 83 except IOError, inst:
84 84 if inst.errno != errno.ENOENT:
85 85 raise
86 86
87 87 self.store = store.store(requirements, self.sharedpath, util.opener)
88 88 self.spath = self.store.path
89 89 self.sopener = self.store.opener
90 90 self.sjoin = self.store.join
91 91 self.opener.createmode = self.store.createmode
92 92 self.sopener.options = {}
93 93
94 94 # These two define the set of tags for this repository. _tags
95 95 # maps tag name to node; _tagtypes maps tag name to 'global' or
96 96 # 'local'. (Global tags are defined by .hgtags across all
97 97 # heads, and local tags are defined in .hg/localtags.) They
98 98 # constitute the in-memory cache of tags.
99 99 self._tags = None
100 100 self._tagtypes = None
101 101
102 102 self._branchcache = None # in UTF-8
103 103 self._branchcachetip = None
104 104 self.nodetagscache = None
105 105 self.filterpats = {}
106 106 self._datafilters = {}
107 107 self._transref = self._lockref = self._wlockref = None
108 108
109 109 @propertycache
110 110 def changelog(self):
111 111 c = changelog.changelog(self.sopener)
112 112 if 'HG_PENDING' in os.environ:
113 113 p = os.environ['HG_PENDING']
114 114 if p.startswith(self.root):
115 115 c.readpending('00changelog.i.a')
116 116 self.sopener.options['defversion'] = c.version
117 117 return c
118 118
119 119 @propertycache
120 120 def manifest(self):
121 121 return manifest.manifest(self.sopener)
122 122
123 123 @propertycache
124 124 def dirstate(self):
125 125 return dirstate.dirstate(self.opener, self.ui, self.root)
126 126
127 127 def __getitem__(self, changeid):
128 128 if changeid is None:
129 129 return context.workingctx(self)
130 130 return context.changectx(self, changeid)
131 131
132 132 def __contains__(self, changeid):
133 133 try:
134 134 return bool(self.lookup(changeid))
135 135 except error.RepoLookupError:
136 136 return False
137 137
138 138 def __nonzero__(self):
139 139 return True
140 140
141 141 def __len__(self):
142 142 return len(self.changelog)
143 143
144 144 def __iter__(self):
145 145 for i in xrange(len(self)):
146 146 yield i
147 147
148 148 def url(self):
149 149 return 'file:' + self.root
150 150
151 151 def hook(self, name, throw=False, **args):
152 152 return hook.hook(self.ui, self, name, throw, **args)
153 153
154 154 tag_disallowed = ':\r\n'
155 155
156 156 def _tag(self, names, node, message, local, user, date, extra={}):
157 157 if isinstance(names, str):
158 158 allchars = names
159 159 names = (names,)
160 160 else:
161 161 allchars = ''.join(names)
162 162 for c in self.tag_disallowed:
163 163 if c in allchars:
164 164 raise util.Abort(_('%r cannot be used in a tag name') % c)
165 165
166 166 for name in names:
167 167 self.hook('pretag', throw=True, node=hex(node), tag=name,
168 168 local=local)
169 169
170 170 def writetags(fp, names, munge, prevtags):
171 171 fp.seek(0, 2)
172 172 if prevtags and prevtags[-1] != '\n':
173 173 fp.write('\n')
174 174 for name in names:
175 175 m = munge and munge(name) or name
176 176 if self._tagtypes and name in self._tagtypes:
177 177 old = self._tags.get(name, nullid)
178 178 fp.write('%s %s\n' % (hex(old), m))
179 179 fp.write('%s %s\n' % (hex(node), m))
180 180 fp.close()
181 181
182 182 prevtags = ''
183 183 if local:
184 184 try:
185 185 fp = self.opener('localtags', 'r+')
186 186 except IOError:
187 187 fp = self.opener('localtags', 'a')
188 188 else:
189 189 prevtags = fp.read()
190 190
191 191 # local tags are stored in the current charset
192 192 writetags(fp, names, None, prevtags)
193 193 for name in names:
194 194 self.hook('tag', node=hex(node), tag=name, local=local)
195 195 return
196 196
197 197 try:
198 198 fp = self.wfile('.hgtags', 'rb+')
199 199 except IOError:
200 200 fp = self.wfile('.hgtags', 'ab')
201 201 else:
202 202 prevtags = fp.read()
203 203
204 204 # committed tags are stored in UTF-8
205 205 writetags(fp, names, encoding.fromlocal, prevtags)
206 206
207 207 if '.hgtags' not in self.dirstate:
208 208 self.add(['.hgtags'])
209 209
210 210 m = match_.exact(self.root, '', ['.hgtags'])
211 211 tagnode = self.commit(message, user, date, extra=extra, match=m)
212 212
213 213 for name in names:
214 214 self.hook('tag', node=hex(node), tag=name, local=local)
215 215
216 216 return tagnode
217 217
218 218 def tag(self, names, node, message, local, user, date):
219 219 '''tag a revision with one or more symbolic names.
220 220
221 221 names is a list of strings or, when adding a single tag, names may be a
222 222 string.
223 223
224 224 if local is True, the tags are stored in a per-repository file.
225 225 otherwise, they are stored in the .hgtags file, and a new
226 226 changeset is committed with the change.
227 227
228 228 keyword arguments:
229 229
230 230 local: whether to store tags in non-version-controlled file
231 231 (default False)
232 232
233 233 message: commit message to use if committing
234 234
235 235 user: name of user to use if committing
236 236
237 237 date: date tuple to use if committing'''
238 238
239 239 for x in self.status()[:5]:
240 240 if '.hgtags' in x:
241 241 raise util.Abort(_('working copy of .hgtags is changed '
242 242 '(please commit .hgtags manually)'))
243 243
244 244 self.tags() # instantiate the cache
245 245 self._tag(names, node, message, local, user, date)
246 246
247 247 def tags(self):
248 248 '''return a mapping of tag to node'''
249 249 if self._tags is None:
250 250 (self._tags, self._tagtypes) = self._findtags()
251 251
252 252 return self._tags
253 253
254 254 def _findtags(self):
255 255 '''Do the hard work of finding tags. Return a pair of dicts
256 256 (tags, tagtypes) where tags maps tag name to node, and tagtypes
257 257 maps tag name to a string like \'global\' or \'local\'.
258 258 Subclasses or extensions are free to add their own tags, but
259 259 should be aware that the returned dicts will be retained for the
260 260 duration of the localrepo object.'''
261 261
262 262 # XXX what tagtype should subclasses/extensions use? Currently
263 263 # mq and bookmarks add tags, but do not set the tagtype at all.
264 264 # Should each extension invent its own tag type? Should there
265 265 # be one tagtype for all such "virtual" tags? Or is the status
266 266 # quo fine?
267 267
268 268 alltags = {} # map tag name to (node, hist)
269 269 tagtypes = {}
270 270
271 271 tags_.findglobaltags(self.ui, self, alltags, tagtypes)
272 272 tags_.readlocaltags(self.ui, self, alltags, tagtypes)
273 273
274 274 # Build the return dicts. Have to re-encode tag names because
275 275 # the tags module always uses UTF-8 (in order not to lose info
276 276 # writing to the cache), but the rest of Mercurial wants them in
277 277 # local encoding.
278 278 tags = {}
279 279 for (name, (node, hist)) in alltags.iteritems():
280 280 if node != nullid:
281 281 tags[encoding.tolocal(name)] = node
282 282 tags['tip'] = self.changelog.tip()
283 283 tagtypes = dict([(encoding.tolocal(name), value)
284 284 for (name, value) in tagtypes.iteritems()])
285 285 return (tags, tagtypes)
286 286
287 287 def tagtype(self, tagname):
288 288 '''
289 289 return the type of the given tag. result can be:
290 290
291 291 'local' : a local tag
292 292 'global' : a global tag
293 293 None : tag does not exist
294 294 '''
295 295
296 296 self.tags()
297 297
298 298 return self._tagtypes.get(tagname)
299 299
300 300 def tagslist(self):
301 301 '''return a list of tags ordered by revision'''
302 302 l = []
303 303 for t, n in self.tags().iteritems():
304 304 try:
305 305 r = self.changelog.rev(n)
306 306 except:
307 307 r = -2 # sort to the beginning of the list if unknown
308 308 l.append((r, t, n))
309 309 return [(t, n) for r, t, n in sorted(l)]
310 310
311 311 def nodetags(self, node):
312 312 '''return the tags associated with a node'''
313 313 if not self.nodetagscache:
314 314 self.nodetagscache = {}
315 315 for t, n in self.tags().iteritems():
316 316 self.nodetagscache.setdefault(n, []).append(t)
317 317 return self.nodetagscache.get(node, [])
318 318
319 319 def _branchtags(self, partial, lrev):
320 320 # TODO: rename this function?
321 321 tiprev = len(self) - 1
322 322 if lrev != tiprev:
323 323 self._updatebranchcache(partial, lrev + 1, tiprev + 1)
324 324 self._writebranchcache(partial, self.changelog.tip(), tiprev)
325 325
326 326 return partial
327 327
328 328 def branchmap(self):
329 329 '''returns a dictionary {branch: [branchheads]}'''
330 330 tip = self.changelog.tip()
331 331 if self._branchcache is not None and self._branchcachetip == tip:
332 332 return self._branchcache
333 333
334 334 oldtip = self._branchcachetip
335 335 self._branchcachetip = tip
336 336 if oldtip is None or oldtip not in self.changelog.nodemap:
337 337 partial, last, lrev = self._readbranchcache()
338 338 else:
339 339 lrev = self.changelog.rev(oldtip)
340 340 partial = self._branchcache
341 341
342 342 self._branchtags(partial, lrev)
343 343 # this private cache holds all heads (not just tips)
344 344 self._branchcache = partial
345 345
346 346 return self._branchcache
347 347
348 348 def branchtags(self):
349 349 '''return a dict where branch names map to the tipmost head of
350 350 the branch, open heads come before closed'''
351 351 bt = {}
352 352 for bn, heads in self.branchmap().iteritems():
353 353 tip = heads[-1]
354 354 for h in reversed(heads):
355 355 if 'close' not in self.changelog.read(h)[5]:
356 356 tip = h
357 357 break
358 358 bt[bn] = tip
359 359 return bt
360 360
361 361
362 362 def _readbranchcache(self):
363 363 partial = {}
364 364 try:
365 365 f = self.opener("branchheads.cache")
366 366 lines = f.read().split('\n')
367 367 f.close()
368 368 except (IOError, OSError):
369 369 return {}, nullid, nullrev
370 370
371 371 try:
372 372 last, lrev = lines.pop(0).split(" ", 1)
373 373 last, lrev = bin(last), int(lrev)
374 374 if lrev >= len(self) or self[lrev].node() != last:
375 375 # invalidate the cache
376 376 raise ValueError('invalidating branch cache (tip differs)')
377 377 for l in lines:
378 378 if not l:
379 379 continue
380 380 node, label = l.split(" ", 1)
381 381 partial.setdefault(label.strip(), []).append(bin(node))
382 382 except KeyboardInterrupt:
383 383 raise
384 384 except Exception, inst:
385 385 if self.ui.debugflag:
386 386 self.ui.warn(str(inst), '\n')
387 387 partial, last, lrev = {}, nullid, nullrev
388 388 return partial, last, lrev
389 389
390 390 def _writebranchcache(self, branches, tip, tiprev):
391 391 try:
392 392 f = self.opener("branchheads.cache", "w", atomictemp=True)
393 393 f.write("%s %s\n" % (hex(tip), tiprev))
394 394 for label, nodes in branches.iteritems():
395 395 for node in nodes:
396 396 f.write("%s %s\n" % (hex(node), label))
397 397 f.rename()
398 398 except (IOError, OSError):
399 399 pass
400 400
401 401 def _updatebranchcache(self, partial, start, end):
402 402 # collect new branch entries
403 403 newbranches = {}
404 404 for r in xrange(start, end):
405 405 c = self[r]
406 406 newbranches.setdefault(c.branch(), []).append(c.node())
407 407 # if older branchheads are reachable from new ones, they aren't
408 408 # really branchheads. Note checking parents is insufficient:
409 409 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
410 410 for branch, newnodes in newbranches.iteritems():
411 411 bheads = partial.setdefault(branch, [])
412 412 bheads.extend(newnodes)
413 413 if len(bheads) < 2:
414 414 continue
415 415 newbheads = []
416 416 # starting from tip means fewer passes over reachable
417 417 while newnodes:
418 418 latest = newnodes.pop()
419 419 if latest not in bheads:
420 420 continue
421 421 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
422 422 reachable = self.changelog.reachable(latest, minbhrev)
423 423 bheads = [b for b in bheads if b not in reachable]
424 424 newbheads.insert(0, latest)
425 425 bheads.extend(newbheads)
426 426 partial[branch] = bheads
427 427
428 428 def lookup(self, key):
429 429 if isinstance(key, int):
430 430 return self.changelog.node(key)
431 431 elif key == '.':
432 432 return self.dirstate.parents()[0]
433 433 elif key == 'null':
434 434 return nullid
435 435 elif key == 'tip':
436 436 return self.changelog.tip()
437 437 n = self.changelog._match(key)
438 438 if n:
439 439 return n
440 440 if key in self.tags():
441 441 return self.tags()[key]
442 442 if key in self.branchtags():
443 443 return self.branchtags()[key]
444 444 n = self.changelog._partialmatch(key)
445 445 if n:
446 446 return n
447 447
448 448 # can't find key, check if it might have come from damaged dirstate
449 449 if key in self.dirstate.parents():
450 450 raise error.Abort(_("working directory has unknown parent '%s'!")
451 451 % short(key))
452 452 try:
453 453 if len(key) == 20:
454 454 key = hex(key)
455 455 except:
456 456 pass
457 457 raise error.RepoLookupError(_("unknown revision '%s'") % key)
458 458
459 459 def local(self):
460 460 return True
461 461
462 462 def join(self, f):
463 463 return os.path.join(self.path, f)
464 464
465 465 def wjoin(self, f):
466 466 return os.path.join(self.root, f)
467 467
468 468 def rjoin(self, f):
469 469 return os.path.join(self.root, util.pconvert(f))
470 470
471 471 def file(self, f):
472 472 if f[0] == '/':
473 473 f = f[1:]
474 474 return filelog.filelog(self.sopener, f)
475 475
476 476 def changectx(self, changeid):
477 477 return self[changeid]
478 478
479 479 def parents(self, changeid=None):
480 480 '''get list of changectxs for parents of changeid'''
481 481 return self[changeid].parents()
482 482
483 483 def filectx(self, path, changeid=None, fileid=None):
484 484 """changeid can be a changeset revision, node, or tag.
485 485 fileid can be a file revision or node."""
486 486 return context.filectx(self, path, changeid, fileid)
487 487
488 488 def getcwd(self):
489 489 return self.dirstate.getcwd()
490 490
491 491 def pathto(self, f, cwd=None):
492 492 return self.dirstate.pathto(f, cwd)
493 493
494 494 def wfile(self, f, mode='r'):
495 495 return self.wopener(f, mode)
496 496
497 497 def _link(self, f):
498 498 return os.path.islink(self.wjoin(f))
499 499
500 500 def _filter(self, filter, filename, data):
501 501 if filter not in self.filterpats:
502 502 l = []
503 503 for pat, cmd in self.ui.configitems(filter):
504 504 if cmd == '!':
505 505 continue
506 506 mf = match_.match(self.root, '', [pat])
507 507 fn = None
508 508 params = cmd
509 509 for name, filterfn in self._datafilters.iteritems():
510 510 if cmd.startswith(name):
511 511 fn = filterfn
512 512 params = cmd[len(name):].lstrip()
513 513 break
514 514 if not fn:
515 515 fn = lambda s, c, **kwargs: util.filter(s, c)
516 516 # Wrap old filters not supporting keyword arguments
517 517 if not inspect.getargspec(fn)[2]:
518 518 oldfn = fn
519 519 fn = lambda s, c, **kwargs: oldfn(s, c)
520 520 l.append((mf, fn, params))
521 521 self.filterpats[filter] = l
522 522
523 523 for mf, fn, cmd in self.filterpats[filter]:
524 524 if mf(filename):
525 525 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
526 526 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
527 527 break
528 528
529 529 return data
530 530
531 531 def adddatafilter(self, name, filter):
532 532 self._datafilters[name] = filter
533 533
534 534 def wread(self, filename):
535 535 if self._link(filename):
536 536 data = os.readlink(self.wjoin(filename))
537 537 else:
538 538 data = self.wopener(filename, 'r').read()
539 539 return self._filter("encode", filename, data)
540 540
541 541 def wwrite(self, filename, data, flags):
542 542 data = self._filter("decode", filename, data)
543 543 try:
544 544 os.unlink(self.wjoin(filename))
545 545 except OSError:
546 546 pass
547 547 if 'l' in flags:
548 548 self.wopener.symlink(data, filename)
549 549 else:
550 550 self.wopener(filename, 'w').write(data)
551 551 if 'x' in flags:
552 552 util.set_flags(self.wjoin(filename), False, True)
553 553
554 554 def wwritedata(self, filename, data):
555 555 return self._filter("decode", filename, data)
556 556
557 557 def transaction(self):
558 558 tr = self._transref and self._transref() or None
559 559 if tr and tr.running():
560 560 return tr.nest()
561 561
562 562 # abort here if the journal already exists
563 563 if os.path.exists(self.sjoin("journal")):
564 564 raise error.RepoError(
565 565 _("abandoned transaction found - run hg recover"))
566 566
567 567 # save dirstate for rollback
568 568 try:
569 569 ds = self.opener("dirstate").read()
570 570 except IOError:
571 571 ds = ""
572 572 self.opener("journal.dirstate", "w").write(ds)
573 573 self.opener("journal.branch", "w").write(self.dirstate.branch())
574 574
575 575 renames = [(self.sjoin("journal"), self.sjoin("undo")),
576 576 (self.join("journal.dirstate"), self.join("undo.dirstate")),
577 577 (self.join("journal.branch"), self.join("undo.branch"))]
578 578 tr = transaction.transaction(self.ui.warn, self.sopener,
579 579 self.sjoin("journal"),
580 580 aftertrans(renames),
581 581 self.store.createmode)
582 582 self._transref = weakref.ref(tr)
583 583 return tr
584 584
585 585 def recover(self):
586 586 lock = self.lock()
587 587 try:
588 588 if os.path.exists(self.sjoin("journal")):
589 589 self.ui.status(_("rolling back interrupted transaction\n"))
590 590 transaction.rollback(self.sopener, self.sjoin("journal"),
591 591 self.ui.warn)
592 592 self.invalidate()
593 593 return True
594 594 else:
595 595 self.ui.warn(_("no interrupted transaction available\n"))
596 596 return False
597 597 finally:
598 598 lock.release()
599 599
600 600 def rollback(self):
601 601 wlock = lock = None
602 602 try:
603 603 wlock = self.wlock()
604 604 lock = self.lock()
605 605 if os.path.exists(self.sjoin("undo")):
606 606 self.ui.status(_("rolling back last transaction\n"))
607 607 transaction.rollback(self.sopener, self.sjoin("undo"),
608 608 self.ui.warn)
609 609 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
610 610 try:
611 611 branch = self.opener("undo.branch").read()
612 612 self.dirstate.setbranch(branch)
613 613 except IOError:
614 614 self.ui.warn(_("Named branch could not be reset, "
615 615 "current branch still is: %s\n")
616 616 % encoding.tolocal(self.dirstate.branch()))
617 617 self.invalidate()
618 618 self.dirstate.invalidate()
619 619 self.destroyed()
620 620 else:
621 621 self.ui.warn(_("no rollback information available\n"))
622 622 finally:
623 623 release(lock, wlock)
624 624
625 625 def invalidate(self):
626 626 for a in "changelog manifest".split():
627 627 if a in self.__dict__:
628 628 delattr(self, a)
629 629 self._tags = None
630 630 self._tagtypes = None
631 631 self.nodetagscache = None
632 632 self._branchcache = None # in UTF-8
633 633 self._branchcachetip = None
634 634
635 635 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
636 636 try:
637 637 l = lock.lock(lockname, 0, releasefn, desc=desc)
638 638 except error.LockHeld, inst:
639 639 if not wait:
640 640 raise
641 641 self.ui.warn(_("waiting for lock on %s held by %r\n") %
642 642 (desc, inst.locker))
643 643 # default to 600 seconds timeout
644 644 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
645 645 releasefn, desc=desc)
646 646 if acquirefn:
647 647 acquirefn()
648 648 return l
649 649
650 650 def lock(self, wait=True):
651 651 '''Lock the repository store (.hg/store) and return a weak reference
652 652 to the lock. Use this before modifying the store (e.g. committing or
653 653 stripping). If you are opening a transaction, get a lock as well.)'''
654 654 l = self._lockref and self._lockref()
655 655 if l is not None and l.held:
656 656 l.lock()
657 657 return l
658 658
659 659 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
660 660 _('repository %s') % self.origroot)
661 661 self._lockref = weakref.ref(l)
662 662 return l
663 663
664 664 def wlock(self, wait=True):
665 665 '''Lock the non-store parts of the repository (everything under
666 666 .hg except .hg/store) and return a weak reference to the lock.
667 667 Use this before modifying files in .hg.'''
668 668 l = self._wlockref and self._wlockref()
669 669 if l is not None and l.held:
670 670 l.lock()
671 671 return l
672 672
673 673 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
674 674 self.dirstate.invalidate, _('working directory of %s') %
675 675 self.origroot)
676 676 self._wlockref = weakref.ref(l)
677 677 return l
678 678
679 679 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
680 680 """
681 681 commit an individual file as part of a larger transaction
682 682 """
683 683
684 684 fname = fctx.path()
685 685 text = fctx.data()
686 686 flog = self.file(fname)
687 687 fparent1 = manifest1.get(fname, nullid)
688 688 fparent2 = fparent2o = manifest2.get(fname, nullid)
689 689
690 690 meta = {}
691 691 copy = fctx.renamed()
692 692 if copy and copy[0] != fname:
693 693 # Mark the new revision of this file as a copy of another
694 694 # file. This copy data will effectively act as a parent
695 695 # of this new revision. If this is a merge, the first
696 696 # parent will be the nullid (meaning "look up the copy data")
697 697 # and the second one will be the other parent. For example:
698 698 #
699 699 # 0 --- 1 --- 3 rev1 changes file foo
700 700 # \ / rev2 renames foo to bar and changes it
701 701 # \- 2 -/ rev3 should have bar with all changes and
702 702 # should record that bar descends from
703 703 # bar in rev2 and foo in rev1
704 704 #
705 705 # this allows this merge to succeed:
706 706 #
707 707 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
708 708 # \ / merging rev3 and rev4 should use bar@rev2
709 709 # \- 2 --- 4 as the merge base
710 710 #
711 711
712 712 cfname = copy[0]
713 713 crev = manifest1.get(cfname)
714 714 newfparent = fparent2
715 715
716 716 if manifest2: # branch merge
717 717 if fparent2 == nullid or crev is None: # copied on remote side
718 718 if cfname in manifest2:
719 719 crev = manifest2[cfname]
720 720 newfparent = fparent1
721 721
722 722 # find source in nearest ancestor if we've lost track
723 723 if not crev:
724 724 self.ui.debug(" %s: searching for copy revision for %s\n" %
725 725 (fname, cfname))
726 726 for ancestor in self['.'].ancestors():
727 727 if cfname in ancestor:
728 728 crev = ancestor[cfname].filenode()
729 729 break
730 730
731 731 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
732 732 meta["copy"] = cfname
733 733 meta["copyrev"] = hex(crev)
734 734 fparent1, fparent2 = nullid, newfparent
735 735 elif fparent2 != nullid:
736 736 # is one parent an ancestor of the other?
737 737 fparentancestor = flog.ancestor(fparent1, fparent2)
738 738 if fparentancestor == fparent1:
739 739 fparent1, fparent2 = fparent2, nullid
740 740 elif fparentancestor == fparent2:
741 741 fparent2 = nullid
742 742
743 743 # is the file changed?
744 744 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
745 745 changelist.append(fname)
746 746 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
747 747
748 748 # are just the flags changed during merge?
749 749 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
750 750 changelist.append(fname)
751 751
752 752 return fparent1
753 753
754 754 def commit(self, text="", user=None, date=None, match=None, force=False,
755 755 editor=False, extra={}):
756 756 """Add a new revision to current repository.
757 757
758 758 Revision information is gathered from the working directory,
759 759 match can be used to filter the committed files. If editor is
760 760 supplied, it is called to get a commit message.
761 761 """
762 762
763 763 def fail(f, msg):
764 764 raise util.Abort('%s: %s' % (f, msg))
765 765
766 766 if not match:
767 767 match = match_.always(self.root, '')
768 768
769 769 if not force:
770 770 vdirs = []
771 771 match.dir = vdirs.append
772 772 match.bad = fail
773 773
774 774 wlock = self.wlock()
775 775 try:
776 776 p1, p2 = self.dirstate.parents()
777 777 wctx = self[None]
778 778
779 779 if (not force and p2 != nullid and match and
780 780 (match.files() or match.anypats())):
781 781 raise util.Abort(_('cannot partially commit a merge '
782 782 '(do not specify files or patterns)'))
783 783
784 784 changes = self.status(match=match, clean=force)
785 785 if force:
786 786 changes[0].extend(changes[6]) # mq may commit unchanged files
787 787
788 788 # check subrepos
789 789 subs = []
790 790 for s in wctx.substate:
791 791 if match(s) and wctx.sub(s).dirty():
792 792 subs.append(s)
793 793 if subs and '.hgsubstate' not in changes[0]:
794 794 changes[0].insert(0, '.hgsubstate')
795 795
796 796 # make sure all explicit patterns are matched
797 797 if not force and match.files():
798 798 matched = set(changes[0] + changes[1] + changes[2])
799 799
800 800 for f in match.files():
801 801 if f == '.' or f in matched or f in wctx.substate:
802 802 continue
803 803 if f in changes[3]: # missing
804 804 fail(f, _('file not found!'))
805 805 if f in vdirs: # visited directory
806 806 d = f + '/'
807 807 for mf in matched:
808 808 if mf.startswith(d):
809 809 break
810 810 else:
811 811 fail(f, _("no match under directory!"))
812 812 elif f not in self.dirstate:
813 813 fail(f, _("file not tracked!"))
814 814
815 815 if (not force and not extra.get("close") and p2 == nullid
816 816 and not (changes[0] or changes[1] or changes[2])
817 817 and self[None].branch() == self['.'].branch()):
818 818 return None
819 819
820 820 ms = merge_.mergestate(self)
821 821 for f in changes[0]:
822 822 if f in ms and ms[f] == 'u':
823 823 raise util.Abort(_("unresolved merge conflicts "
824 824 "(see hg resolve)"))
825 825
826 826 cctx = context.workingctx(self, (p1, p2), text, user, date,
827 827 extra, changes)
828 828 if editor:
829 829 cctx._text = editor(self, cctx, subs)
830 830 edited = (text != cctx._text)
831 831
832 832 # commit subs
833 833 if subs:
834 834 state = wctx.substate.copy()
835 835 for s in subs:
836 836 self.ui.status(_('committing subrepository %s\n') % s)
837 837 sr = wctx.sub(s).commit(cctx._text, user, date)
838 838 state[s] = (state[s][0], sr)
839 839 subrepo.writestate(self, state)
840 840
841 841 # Save commit message in case this transaction gets rolled back
842 842 # (e.g. by a pretxncommit hook). Leave the content alone on
843 843 # the assumption that the user will use the same editor again.
844 844 msgfile = self.opener('last-message.txt', 'wb')
845 845 msgfile.write(cctx._text)
846 846 msgfile.close()
847 847
848 848 try:
849 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
850 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
849 851 ret = self.commitctx(cctx, True)
850 852 except:
851 853 if edited:
852 854 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
853 855 self.ui.write(
854 856 _('note: commit message saved in %s\n') % msgfn)
855 857 raise
856 858
857 859 # update dirstate and mergestate
858 860 for f in changes[0] + changes[1]:
859 861 self.dirstate.normal(f)
860 862 for f in changes[2]:
861 863 self.dirstate.forget(f)
862 864 self.dirstate.setparents(ret)
863 865 ms.reset()
864
865 return ret
866
867 866 finally:
868 867 wlock.release()
869 868
869 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
870 return ret
871
870 872 def commitctx(self, ctx, error=False):
871 873 """Add a new revision to current repository.
872
873 874 Revision information is passed via the context argument.
874 875 """
875 876
876 877 tr = lock = None
877 878 removed = ctx.removed()
878 879 p1, p2 = ctx.p1(), ctx.p2()
879 880 m1 = p1.manifest().copy()
880 881 m2 = p2.manifest()
881 882 user = ctx.user()
882 883
883 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
884 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
885
886 884 lock = self.lock()
887 885 try:
888 886 tr = self.transaction()
889 887 trp = weakref.proxy(tr)
890 888
891 889 # check in files
892 890 new = {}
893 891 changed = []
894 892 linkrev = len(self)
895 893 for f in sorted(ctx.modified() + ctx.added()):
896 894 self.ui.note(f + "\n")
897 895 try:
898 896 fctx = ctx[f]
899 897 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
900 898 changed)
901 899 m1.set(f, fctx.flags())
902 900 except OSError, inst:
903 901 self.ui.warn(_("trouble committing %s!\n") % f)
904 902 raise
905 903 except IOError, inst:
906 904 errcode = getattr(inst, 'errno', errno.ENOENT)
907 905 if error or errcode and errcode != errno.ENOENT:
908 906 self.ui.warn(_("trouble committing %s!\n") % f)
909 907 raise
910 908 else:
911 909 removed.append(f)
912 910
913 911 # update manifest
914 912 m1.update(new)
915 913 removed = [f for f in sorted(removed) if f in m1 or f in m2]
916 914 drop = [f for f in removed if f in m1]
917 915 for f in drop:
918 916 del m1[f]
919 917 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
920 918 p2.manifestnode(), (new, drop))
921 919
922 920 # update changelog
923 921 self.changelog.delayupdate()
924 922 n = self.changelog.add(mn, changed + removed, ctx.description(),
925 923 trp, p1.node(), p2.node(),
926 924 user, ctx.date(), ctx.extra().copy())
927 925 p = lambda: self.changelog.writepending() and self.root or ""
926 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
928 927 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
929 928 parent2=xp2, pending=p)
930 929 self.changelog.finalize(trp)
931 930 tr.close()
932 931
933 932 if self._branchcache:
934 933 self.branchtags()
935
936 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
937 934 return n
938 935 finally:
939 936 del tr
940 937 lock.release()
941 938
942 939 def destroyed(self):
943 940 '''Inform the repository that nodes have been destroyed.
944 941 Intended for use by strip and rollback, so there's a common
945 942 place for anything that has to be done after destroying history.'''
946 943 # XXX it might be nice if we could take the list of destroyed
947 944 # nodes, but I don't see an easy way for rollback() to do that
948 945
949 946 # Ensure the persistent tag cache is updated. Doing it now
950 947 # means that the tag cache only has to worry about destroyed
951 948 # heads immediately after a strip/rollback. That in turn
952 949 # guarantees that "cachetip == currenttip" (comparing both rev
953 950 # and node) always means no nodes have been added or destroyed.
954 951
955 952 # XXX this is suboptimal when qrefresh'ing: we strip the current
956 953 # head, refresh the tag cache, then immediately add a new head.
957 954 # But I think doing it this way is necessary for the "instant
958 955 # tag cache retrieval" case to work.
959 956 tags_.findglobaltags(self.ui, self, {}, {})
960 957
961 958 def walk(self, match, node=None):
962 959 '''
963 960 walk recursively through the directory tree or a given
964 961 changeset, finding all files matched by the match
965 962 function
966 963 '''
967 964 return self[node].walk(match)
968 965
969 966 def status(self, node1='.', node2=None, match=None,
970 967 ignored=False, clean=False, unknown=False):
971 968 """return status of files between two nodes or node and working directory
972 969
973 970 If node1 is None, use the first dirstate parent instead.
974 971 If node2 is None, compare node1 with working directory.
975 972 """
976 973
977 974 def mfmatches(ctx):
978 975 mf = ctx.manifest().copy()
979 976 for fn in mf.keys():
980 977 if not match(fn):
981 978 del mf[fn]
982 979 return mf
983 980
984 981 if isinstance(node1, context.changectx):
985 982 ctx1 = node1
986 983 else:
987 984 ctx1 = self[node1]
988 985 if isinstance(node2, context.changectx):
989 986 ctx2 = node2
990 987 else:
991 988 ctx2 = self[node2]
992 989
993 990 working = ctx2.rev() is None
994 991 parentworking = working and ctx1 == self['.']
995 992 match = match or match_.always(self.root, self.getcwd())
996 993 listignored, listclean, listunknown = ignored, clean, unknown
997 994
998 995 # load earliest manifest first for caching reasons
999 996 if not working and ctx2.rev() < ctx1.rev():
1000 997 ctx2.manifest()
1001 998
1002 999 if not parentworking:
1003 1000 def bad(f, msg):
1004 1001 if f not in ctx1:
1005 1002 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1006 1003 match.bad = bad
1007 1004
1008 1005 if working: # we need to scan the working dir
1009 1006 subrepos = ctx1.substate.keys()
1010 1007 s = self.dirstate.status(match, subrepos, listignored,
1011 1008 listclean, listunknown)
1012 1009 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1013 1010
1014 1011 # check for any possibly clean files
1015 1012 if parentworking and cmp:
1016 1013 fixup = []
1017 1014 # do a full compare of any files that might have changed
1018 1015 for f in sorted(cmp):
1019 1016 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1020 1017 or ctx1[f].cmp(ctx2[f].data())):
1021 1018 modified.append(f)
1022 1019 else:
1023 1020 fixup.append(f)
1024 1021
1025 1022 if listclean:
1026 1023 clean += fixup
1027 1024
1028 1025 # update dirstate for files that are actually clean
1029 1026 if fixup:
1030 1027 try:
1031 1028 # updating the dirstate is optional
1032 1029 # so we don't wait on the lock
1033 1030 wlock = self.wlock(False)
1034 1031 try:
1035 1032 for f in fixup:
1036 1033 self.dirstate.normal(f)
1037 1034 finally:
1038 1035 wlock.release()
1039 1036 except error.LockError:
1040 1037 pass
1041 1038
1042 1039 if not parentworking:
1043 1040 mf1 = mfmatches(ctx1)
1044 1041 if working:
1045 1042 # we are comparing working dir against non-parent
1046 1043 # generate a pseudo-manifest for the working dir
1047 1044 mf2 = mfmatches(self['.'])
1048 1045 for f in cmp + modified + added:
1049 1046 mf2[f] = None
1050 1047 mf2.set(f, ctx2.flags(f))
1051 1048 for f in removed:
1052 1049 if f in mf2:
1053 1050 del mf2[f]
1054 1051 else:
1055 1052 # we are comparing two revisions
1056 1053 deleted, unknown, ignored = [], [], []
1057 1054 mf2 = mfmatches(ctx2)
1058 1055
1059 1056 modified, added, clean = [], [], []
1060 1057 for fn in mf2:
1061 1058 if fn in mf1:
1062 1059 if (mf1.flags(fn) != mf2.flags(fn) or
1063 1060 (mf1[fn] != mf2[fn] and
1064 1061 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1065 1062 modified.append(fn)
1066 1063 elif listclean:
1067 1064 clean.append(fn)
1068 1065 del mf1[fn]
1069 1066 else:
1070 1067 added.append(fn)
1071 1068 removed = mf1.keys()
1072 1069
1073 1070 r = modified, added, removed, deleted, unknown, ignored, clean
1074 1071 [l.sort() for l in r]
1075 1072 return r
1076 1073
1077 1074 def add(self, list):
1078 1075 wlock = self.wlock()
1079 1076 try:
1080 1077 rejected = []
1081 1078 for f in list:
1082 1079 p = self.wjoin(f)
1083 1080 try:
1084 1081 st = os.lstat(p)
1085 1082 except:
1086 1083 self.ui.warn(_("%s does not exist!\n") % f)
1087 1084 rejected.append(f)
1088 1085 continue
1089 1086 if st.st_size > 10000000:
1090 1087 self.ui.warn(_("%s: files over 10MB may cause memory and"
1091 1088 " performance problems\n"
1092 1089 "(use 'hg revert %s' to unadd the file)\n")
1093 1090 % (f, f))
1094 1091 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1095 1092 self.ui.warn(_("%s not added: only files and symlinks "
1096 1093 "supported currently\n") % f)
1097 1094 rejected.append(p)
1098 1095 elif self.dirstate[f] in 'amn':
1099 1096 self.ui.warn(_("%s already tracked!\n") % f)
1100 1097 elif self.dirstate[f] == 'r':
1101 1098 self.dirstate.normallookup(f)
1102 1099 else:
1103 1100 self.dirstate.add(f)
1104 1101 return rejected
1105 1102 finally:
1106 1103 wlock.release()
1107 1104
1108 1105 def forget(self, list):
1109 1106 wlock = self.wlock()
1110 1107 try:
1111 1108 for f in list:
1112 1109 if self.dirstate[f] != 'a':
1113 1110 self.ui.warn(_("%s not added!\n") % f)
1114 1111 else:
1115 1112 self.dirstate.forget(f)
1116 1113 finally:
1117 1114 wlock.release()
1118 1115
1119 1116 def remove(self, list, unlink=False):
1120 1117 if unlink:
1121 1118 for f in list:
1122 1119 try:
1123 1120 util.unlink(self.wjoin(f))
1124 1121 except OSError, inst:
1125 1122 if inst.errno != errno.ENOENT:
1126 1123 raise
1127 1124 wlock = self.wlock()
1128 1125 try:
1129 1126 for f in list:
1130 1127 if unlink and os.path.exists(self.wjoin(f)):
1131 1128 self.ui.warn(_("%s still exists!\n") % f)
1132 1129 elif self.dirstate[f] == 'a':
1133 1130 self.dirstate.forget(f)
1134 1131 elif f not in self.dirstate:
1135 1132 self.ui.warn(_("%s not tracked!\n") % f)
1136 1133 else:
1137 1134 self.dirstate.remove(f)
1138 1135 finally:
1139 1136 wlock.release()
1140 1137
1141 1138 def undelete(self, list):
1142 1139 manifests = [self.manifest.read(self.changelog.read(p)[0])
1143 1140 for p in self.dirstate.parents() if p != nullid]
1144 1141 wlock = self.wlock()
1145 1142 try:
1146 1143 for f in list:
1147 1144 if self.dirstate[f] != 'r':
1148 1145 self.ui.warn(_("%s not removed!\n") % f)
1149 1146 else:
1150 1147 m = f in manifests[0] and manifests[0] or manifests[1]
1151 1148 t = self.file(f).read(m[f])
1152 1149 self.wwrite(f, t, m.flags(f))
1153 1150 self.dirstate.normal(f)
1154 1151 finally:
1155 1152 wlock.release()
1156 1153
1157 1154 def copy(self, source, dest):
1158 1155 p = self.wjoin(dest)
1159 1156 if not (os.path.exists(p) or os.path.islink(p)):
1160 1157 self.ui.warn(_("%s does not exist!\n") % dest)
1161 1158 elif not (os.path.isfile(p) or os.path.islink(p)):
1162 1159 self.ui.warn(_("copy failed: %s is not a file or a "
1163 1160 "symbolic link\n") % dest)
1164 1161 else:
1165 1162 wlock = self.wlock()
1166 1163 try:
1167 1164 if self.dirstate[dest] in '?r':
1168 1165 self.dirstate.add(dest)
1169 1166 self.dirstate.copy(source, dest)
1170 1167 finally:
1171 1168 wlock.release()
1172 1169
1173 1170 def heads(self, start=None):
1174 1171 heads = self.changelog.heads(start)
1175 1172 # sort the output in rev descending order
1176 1173 heads = [(-self.changelog.rev(h), h) for h in heads]
1177 1174 return [n for (r, n) in sorted(heads)]
1178 1175
1179 1176 def branchheads(self, branch=None, start=None, closed=False):
1180 1177 '''return a (possibly filtered) list of heads for the given branch
1181 1178
1182 1179 Heads are returned in topological order, from newest to oldest.
1183 1180 If branch is None, use the dirstate branch.
1184 1181 If start is not None, return only heads reachable from start.
1185 1182 If closed is True, return heads that are marked as closed as well.
1186 1183 '''
1187 1184 if branch is None:
1188 1185 branch = self[None].branch()
1189 1186 branches = self.branchmap()
1190 1187 if branch not in branches:
1191 1188 return []
1192 1189 # the cache returns heads ordered lowest to highest
1193 1190 bheads = list(reversed(branches[branch]))
1194 1191 if start is not None:
1195 1192 # filter out the heads that cannot be reached from startrev
1196 1193 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1197 1194 bheads = [h for h in bheads if h in fbheads]
1198 1195 if not closed:
1199 1196 bheads = [h for h in bheads if
1200 1197 ('close' not in self.changelog.read(h)[5])]
1201 1198 return bheads
1202 1199
1203 1200 def branches(self, nodes):
1204 1201 if not nodes:
1205 1202 nodes = [self.changelog.tip()]
1206 1203 b = []
1207 1204 for n in nodes:
1208 1205 t = n
1209 1206 while 1:
1210 1207 p = self.changelog.parents(n)
1211 1208 if p[1] != nullid or p[0] == nullid:
1212 1209 b.append((t, n, p[0], p[1]))
1213 1210 break
1214 1211 n = p[0]
1215 1212 return b
1216 1213
1217 1214 def between(self, pairs):
1218 1215 r = []
1219 1216
1220 1217 for top, bottom in pairs:
1221 1218 n, l, i = top, [], 0
1222 1219 f = 1
1223 1220
1224 1221 while n != bottom and n != nullid:
1225 1222 p = self.changelog.parents(n)[0]
1226 1223 if i == f:
1227 1224 l.append(n)
1228 1225 f = f * 2
1229 1226 n = p
1230 1227 i += 1
1231 1228
1232 1229 r.append(l)
1233 1230
1234 1231 return r
1235 1232
1236 1233 def findincoming(self, remote, base=None, heads=None, force=False):
1237 1234 """Return list of roots of the subsets of missing nodes from remote
1238 1235
1239 1236 If base dict is specified, assume that these nodes and their parents
1240 1237 exist on the remote side and that no child of a node of base exists
1241 1238 in both remote and self.
1242 1239 Furthermore base will be updated to include the nodes that exists
1243 1240 in self and remote but no children exists in self and remote.
1244 1241 If a list of heads is specified, return only nodes which are heads
1245 1242 or ancestors of these heads.
1246 1243
1247 1244 All the ancestors of base are in self and in remote.
1248 1245 All the descendants of the list returned are missing in self.
1249 1246 (and so we know that the rest of the nodes are missing in remote, see
1250 1247 outgoing)
1251 1248 """
1252 1249 return self.findcommonincoming(remote, base, heads, force)[1]
1253 1250
1254 1251 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1255 1252 """Return a tuple (common, missing roots, heads) used to identify
1256 1253 missing nodes from remote.
1257 1254
1258 1255 If base dict is specified, assume that these nodes and their parents
1259 1256 exist on the remote side and that no child of a node of base exists
1260 1257 in both remote and self.
1261 1258 Furthermore base will be updated to include the nodes that exists
1262 1259 in self and remote but no children exists in self and remote.
1263 1260 If a list of heads is specified, return only nodes which are heads
1264 1261 or ancestors of these heads.
1265 1262
1266 1263 All the ancestors of base are in self and in remote.
1267 1264 """
1268 1265 m = self.changelog.nodemap
1269 1266 search = []
1270 1267 fetch = set()
1271 1268 seen = set()
1272 1269 seenbranch = set()
1273 1270 if base is None:
1274 1271 base = {}
1275 1272
1276 1273 if not heads:
1277 1274 heads = remote.heads()
1278 1275
1279 1276 if self.changelog.tip() == nullid:
1280 1277 base[nullid] = 1
1281 1278 if heads != [nullid]:
1282 1279 return [nullid], [nullid], list(heads)
1283 1280 return [nullid], [], []
1284 1281
1285 1282 # assume we're closer to the tip than the root
1286 1283 # and start by examining the heads
1287 1284 self.ui.status(_("searching for changes\n"))
1288 1285
1289 1286 unknown = []
1290 1287 for h in heads:
1291 1288 if h not in m:
1292 1289 unknown.append(h)
1293 1290 else:
1294 1291 base[h] = 1
1295 1292
1296 1293 heads = unknown
1297 1294 if not unknown:
1298 1295 return base.keys(), [], []
1299 1296
1300 1297 req = set(unknown)
1301 1298 reqcnt = 0
1302 1299
1303 1300 # search through remote branches
1304 1301 # a 'branch' here is a linear segment of history, with four parts:
1305 1302 # head, root, first parent, second parent
1306 1303 # (a branch always has two parents (or none) by definition)
1307 1304 unknown = remote.branches(unknown)
1308 1305 while unknown:
1309 1306 r = []
1310 1307 while unknown:
1311 1308 n = unknown.pop(0)
1312 1309 if n[0] in seen:
1313 1310 continue
1314 1311
1315 1312 self.ui.debug("examining %s:%s\n"
1316 1313 % (short(n[0]), short(n[1])))
1317 1314 if n[0] == nullid: # found the end of the branch
1318 1315 pass
1319 1316 elif n in seenbranch:
1320 1317 self.ui.debug("branch already found\n")
1321 1318 continue
1322 1319 elif n[1] and n[1] in m: # do we know the base?
1323 1320 self.ui.debug("found incomplete branch %s:%s\n"
1324 1321 % (short(n[0]), short(n[1])))
1325 1322 search.append(n[0:2]) # schedule branch range for scanning
1326 1323 seenbranch.add(n)
1327 1324 else:
1328 1325 if n[1] not in seen and n[1] not in fetch:
1329 1326 if n[2] in m and n[3] in m:
1330 1327 self.ui.debug("found new changeset %s\n" %
1331 1328 short(n[1]))
1332 1329 fetch.add(n[1]) # earliest unknown
1333 1330 for p in n[2:4]:
1334 1331 if p in m:
1335 1332 base[p] = 1 # latest known
1336 1333
1337 1334 for p in n[2:4]:
1338 1335 if p not in req and p not in m:
1339 1336 r.append(p)
1340 1337 req.add(p)
1341 1338 seen.add(n[0])
1342 1339
1343 1340 if r:
1344 1341 reqcnt += 1
1345 1342 self.ui.progress('searching', reqcnt, unit='queries')
1346 1343 self.ui.debug("request %d: %s\n" %
1347 1344 (reqcnt, " ".join(map(short, r))))
1348 1345 for p in xrange(0, len(r), 10):
1349 1346 for b in remote.branches(r[p:p + 10]):
1350 1347 self.ui.debug("received %s:%s\n" %
1351 1348 (short(b[0]), short(b[1])))
1352 1349 unknown.append(b)
1353 1350
1354 1351 # do binary search on the branches we found
1355 1352 while search:
1356 1353 newsearch = []
1357 1354 reqcnt += 1
1358 1355 self.ui.progress('searching', reqcnt, unit='queries')
1359 1356 for n, l in zip(search, remote.between(search)):
1360 1357 l.append(n[1])
1361 1358 p = n[0]
1362 1359 f = 1
1363 1360 for i in l:
1364 1361 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1365 1362 if i in m:
1366 1363 if f <= 2:
1367 1364 self.ui.debug("found new branch changeset %s\n" %
1368 1365 short(p))
1369 1366 fetch.add(p)
1370 1367 base[i] = 1
1371 1368 else:
1372 1369 self.ui.debug("narrowed branch search to %s:%s\n"
1373 1370 % (short(p), short(i)))
1374 1371 newsearch.append((p, i))
1375 1372 break
1376 1373 p, f = i, f * 2
1377 1374 search = newsearch
1378 1375
1379 1376 # sanity check our fetch list
1380 1377 for f in fetch:
1381 1378 if f in m:
1382 1379 raise error.RepoError(_("already have changeset ")
1383 1380 + short(f[:4]))
1384 1381
1385 1382 if base.keys() == [nullid]:
1386 1383 if force:
1387 1384 self.ui.warn(_("warning: repository is unrelated\n"))
1388 1385 else:
1389 1386 raise util.Abort(_("repository is unrelated"))
1390 1387
1391 1388 self.ui.debug("found new changesets starting at " +
1392 1389 " ".join([short(f) for f in fetch]) + "\n")
1393 1390
1394 1391 self.ui.progress('searching', None, unit='queries')
1395 1392 self.ui.debug("%d total queries\n" % reqcnt)
1396 1393
1397 1394 return base.keys(), list(fetch), heads
1398 1395
1399 1396 def findoutgoing(self, remote, base=None, heads=None, force=False):
1400 1397 """Return list of nodes that are roots of subsets not in remote
1401 1398
1402 1399 If base dict is specified, assume that these nodes and their parents
1403 1400 exist on the remote side.
1404 1401 If a list of heads is specified, return only nodes which are heads
1405 1402 or ancestors of these heads, and return a second element which
1406 1403 contains all remote heads which get new children.
1407 1404 """
1408 1405 if base is None:
1409 1406 base = {}
1410 1407 self.findincoming(remote, base, heads, force=force)
1411 1408
1412 1409 self.ui.debug("common changesets up to "
1413 1410 + " ".join(map(short, base.keys())) + "\n")
1414 1411
1415 1412 remain = set(self.changelog.nodemap)
1416 1413
1417 1414 # prune everything remote has from the tree
1418 1415 remain.remove(nullid)
1419 1416 remove = base.keys()
1420 1417 while remove:
1421 1418 n = remove.pop(0)
1422 1419 if n in remain:
1423 1420 remain.remove(n)
1424 1421 for p in self.changelog.parents(n):
1425 1422 remove.append(p)
1426 1423
1427 1424 # find every node whose parents have been pruned
1428 1425 subset = []
1429 1426 # find every remote head that will get new children
1430 1427 updated_heads = set()
1431 1428 for n in remain:
1432 1429 p1, p2 = self.changelog.parents(n)
1433 1430 if p1 not in remain and p2 not in remain:
1434 1431 subset.append(n)
1435 1432 if heads:
1436 1433 if p1 in heads:
1437 1434 updated_heads.add(p1)
1438 1435 if p2 in heads:
1439 1436 updated_heads.add(p2)
1440 1437
1441 1438 # this is the set of all roots we have to push
1442 1439 if heads:
1443 1440 return subset, list(updated_heads)
1444 1441 else:
1445 1442 return subset
1446 1443
1447 1444 def pull(self, remote, heads=None, force=False):
1448 1445 lock = self.lock()
1449 1446 try:
1450 1447 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1451 1448 force=force)
1452 1449 if fetch == [nullid]:
1453 1450 self.ui.status(_("requesting all changes\n"))
1454 1451
1455 1452 if not fetch:
1456 1453 self.ui.status(_("no changes found\n"))
1457 1454 return 0
1458 1455
1459 1456 if heads is None and remote.capable('changegroupsubset'):
1460 1457 heads = rheads
1461 1458
1462 1459 if heads is None:
1463 1460 cg = remote.changegroup(fetch, 'pull')
1464 1461 else:
1465 1462 if not remote.capable('changegroupsubset'):
1466 1463 raise util.Abort(_("Partial pull cannot be done because "
1467 1464 "other repository doesn't support "
1468 1465 "changegroupsubset."))
1469 1466 cg = remote.changegroupsubset(fetch, heads, 'pull')
1470 1467 return self.addchangegroup(cg, 'pull', remote.url())
1471 1468 finally:
1472 1469 lock.release()
1473 1470
1474 1471 def push(self, remote, force=False, revs=None):
1475 1472 # there are two ways to push to remote repo:
1476 1473 #
1477 1474 # addchangegroup assumes local user can lock remote
1478 1475 # repo (local filesystem, old ssh servers).
1479 1476 #
1480 1477 # unbundle assumes local user cannot lock remote repo (new ssh
1481 1478 # servers, http servers).
1482 1479
1483 1480 if remote.capable('unbundle'):
1484 1481 return self.push_unbundle(remote, force, revs)
1485 1482 return self.push_addchangegroup(remote, force, revs)
1486 1483
1487 1484 def prepush(self, remote, force, revs):
1488 1485 '''Analyze the local and remote repositories and determine which
1489 1486 changesets need to be pushed to the remote. Return a tuple
1490 1487 (changegroup, remoteheads). changegroup is a readable file-like
1491 1488 object whose read() returns successive changegroup chunks ready to
1492 1489 be sent over the wire. remoteheads is the list of remote heads.
1493 1490 '''
1494 1491 common = {}
1495 1492 remote_heads = remote.heads()
1496 1493 inc = self.findincoming(remote, common, remote_heads, force=force)
1497 1494
1498 1495 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1499 1496 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1500 1497
1501 1498 def checkbranch(lheads, rheads, updatelb, branchname=None):
1502 1499 '''
1503 1500 check whether there are more local heads than remote heads on
1504 1501 a specific branch.
1505 1502
1506 1503 lheads: local branch heads
1507 1504 rheads: remote branch heads
1508 1505 updatelb: outgoing local branch bases
1509 1506 '''
1510 1507
1511 1508 warn = 0
1512 1509
1513 1510 if not revs and len(lheads) > len(rheads):
1514 1511 warn = 1
1515 1512 else:
1516 1513 # add local heads involved in the push
1517 1514 updatelheads = [self.changelog.heads(x, lheads)
1518 1515 for x in updatelb]
1519 1516 newheads = set(sum(updatelheads, [])) & set(lheads)
1520 1517
1521 1518 if not newheads:
1522 1519 return True
1523 1520
1524 1521 # add heads we don't have or that are not involved in the push
1525 1522 for r in rheads:
1526 1523 if r in self.changelog.nodemap:
1527 1524 desc = self.changelog.heads(r, heads)
1528 1525 l = [h for h in heads if h in desc]
1529 1526 if not l:
1530 1527 newheads.add(r)
1531 1528 else:
1532 1529 newheads.add(r)
1533 1530 if len(newheads) > len(rheads):
1534 1531 warn = 1
1535 1532
1536 1533 if warn:
1537 1534 if branchname is not None:
1538 1535 msg = _("abort: push creates new remote heads"
1539 1536 " on branch '%s'!\n") % branchname
1540 1537 else:
1541 1538 msg = _("abort: push creates new remote heads!\n")
1542 1539 self.ui.warn(msg)
1543 1540 if len(lheads) > len(rheads):
1544 1541 self.ui.status(_("(did you forget to merge?"
1545 1542 " use push -f to force)\n"))
1546 1543 else:
1547 1544 self.ui.status(_("(you should pull and merge or"
1548 1545 " use push -f to force)\n"))
1549 1546 return False
1550 1547 return True
1551 1548
1552 1549 if not bases:
1553 1550 self.ui.status(_("no changes found\n"))
1554 1551 return None, 1
1555 1552 elif not force:
1556 1553 # Check for each named branch if we're creating new remote heads.
1557 1554 # To be a remote head after push, node must be either:
1558 1555 # - unknown locally
1559 1556 # - a local outgoing head descended from update
1560 1557 # - a remote head that's known locally and not
1561 1558 # ancestral to an outgoing head
1562 1559 #
1563 1560 # New named branches cannot be created without --force.
1564 1561
1565 1562 if remote_heads != [nullid]:
1566 1563 if remote.capable('branchmap'):
1567 1564 remotebrheads = remote.branchmap()
1568 1565
1569 1566 if not revs:
1570 1567 localbrheads = self.branchmap()
1571 1568 else:
1572 1569 localbrheads = {}
1573 1570 for n in heads:
1574 1571 branch = self[n].branch()
1575 1572 localbrheads.setdefault(branch, []).append(n)
1576 1573
1577 1574 newbranches = list(set(localbrheads) - set(remotebrheads))
1578 1575 if newbranches: # new branch requires --force
1579 1576 branchnames = ', '.join("%s" % b for b in newbranches)
1580 1577 self.ui.warn(_("abort: push creates "
1581 1578 "new remote branches: %s!\n")
1582 1579 % branchnames)
1583 1580 # propose 'push -b .' in the msg too?
1584 1581 self.ui.status(_("(use 'hg push -f' to force)\n"))
1585 1582 return None, 0
1586 1583 for branch, lheads in localbrheads.iteritems():
1587 1584 if branch in remotebrheads:
1588 1585 rheads = remotebrheads[branch]
1589 1586 if not checkbranch(lheads, rheads, update, branch):
1590 1587 return None, 0
1591 1588 else:
1592 1589 if not checkbranch(heads, remote_heads, update):
1593 1590 return None, 0
1594 1591
1595 1592 if inc:
1596 1593 self.ui.warn(_("note: unsynced remote changes!\n"))
1597 1594
1598 1595
1599 1596 if revs is None:
1600 1597 # use the fast path, no race possible on push
1601 1598 nodes = self.changelog.findmissing(common.keys())
1602 1599 cg = self._changegroup(nodes, 'push')
1603 1600 else:
1604 1601 cg = self.changegroupsubset(update, revs, 'push')
1605 1602 return cg, remote_heads
1606 1603
1607 1604 def push_addchangegroup(self, remote, force, revs):
1608 1605 lock = remote.lock()
1609 1606 try:
1610 1607 ret = self.prepush(remote, force, revs)
1611 1608 if ret[0] is not None:
1612 1609 cg, remote_heads = ret
1613 1610 return remote.addchangegroup(cg, 'push', self.url())
1614 1611 return ret[1]
1615 1612 finally:
1616 1613 lock.release()
1617 1614
1618 1615 def push_unbundle(self, remote, force, revs):
1619 1616 # local repo finds heads on server, finds out what revs it
1620 1617 # must push. once revs transferred, if server finds it has
1621 1618 # different heads (someone else won commit/push race), server
1622 1619 # aborts.
1623 1620
1624 1621 ret = self.prepush(remote, force, revs)
1625 1622 if ret[0] is not None:
1626 1623 cg, remote_heads = ret
1627 1624 if force:
1628 1625 remote_heads = ['force']
1629 1626 return remote.unbundle(cg, remote_heads, 'push')
1630 1627 return ret[1]
1631 1628
1632 1629 def changegroupinfo(self, nodes, source):
1633 1630 if self.ui.verbose or source == 'bundle':
1634 1631 self.ui.status(_("%d changesets found\n") % len(nodes))
1635 1632 if self.ui.debugflag:
1636 1633 self.ui.debug("list of changesets:\n")
1637 1634 for node in nodes:
1638 1635 self.ui.debug("%s\n" % hex(node))
1639 1636
1640 1637 def changegroupsubset(self, bases, heads, source, extranodes=None):
1641 1638 """Compute a changegroup consisting of all the nodes that are
1642 1639 descendents of any of the bases and ancestors of any of the heads.
1643 1640 Return a chunkbuffer object whose read() method will return
1644 1641 successive changegroup chunks.
1645 1642
1646 1643 It is fairly complex as determining which filenodes and which
1647 1644 manifest nodes need to be included for the changeset to be complete
1648 1645 is non-trivial.
1649 1646
1650 1647 Another wrinkle is doing the reverse, figuring out which changeset in
1651 1648 the changegroup a particular filenode or manifestnode belongs to.
1652 1649
1653 1650 The caller can specify some nodes that must be included in the
1654 1651 changegroup using the extranodes argument. It should be a dict
1655 1652 where the keys are the filenames (or 1 for the manifest), and the
1656 1653 values are lists of (node, linknode) tuples, where node is a wanted
1657 1654 node and linknode is the changelog node that should be transmitted as
1658 1655 the linkrev.
1659 1656 """
1660 1657
1661 1658 # Set up some initial variables
1662 1659 # Make it easy to refer to self.changelog
1663 1660 cl = self.changelog
1664 1661 # msng is short for missing - compute the list of changesets in this
1665 1662 # changegroup.
1666 1663 if not bases:
1667 1664 bases = [nullid]
1668 1665 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1669 1666
1670 1667 if extranodes is None:
1671 1668 # can we go through the fast path ?
1672 1669 heads.sort()
1673 1670 allheads = self.heads()
1674 1671 allheads.sort()
1675 1672 if heads == allheads:
1676 1673 return self._changegroup(msng_cl_lst, source)
1677 1674
1678 1675 # slow path
1679 1676 self.hook('preoutgoing', throw=True, source=source)
1680 1677
1681 1678 self.changegroupinfo(msng_cl_lst, source)
1682 1679 # Some bases may turn out to be superfluous, and some heads may be
1683 1680 # too. nodesbetween will return the minimal set of bases and heads
1684 1681 # necessary to re-create the changegroup.
1685 1682
1686 1683 # Known heads are the list of heads that it is assumed the recipient
1687 1684 # of this changegroup will know about.
1688 1685 knownheads = set()
1689 1686 # We assume that all parents of bases are known heads.
1690 1687 for n in bases:
1691 1688 knownheads.update(cl.parents(n))
1692 1689 knownheads.discard(nullid)
1693 1690 knownheads = list(knownheads)
1694 1691 if knownheads:
1695 1692 # Now that we know what heads are known, we can compute which
1696 1693 # changesets are known. The recipient must know about all
1697 1694 # changesets required to reach the known heads from the null
1698 1695 # changeset.
1699 1696 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1700 1697 junk = None
1701 1698 # Transform the list into a set.
1702 1699 has_cl_set = set(has_cl_set)
1703 1700 else:
1704 1701 # If there were no known heads, the recipient cannot be assumed to
1705 1702 # know about any changesets.
1706 1703 has_cl_set = set()
1707 1704
1708 1705 # Make it easy to refer to self.manifest
1709 1706 mnfst = self.manifest
1710 1707 # We don't know which manifests are missing yet
1711 1708 msng_mnfst_set = {}
1712 1709 # Nor do we know which filenodes are missing.
1713 1710 msng_filenode_set = {}
1714 1711
1715 1712 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1716 1713 junk = None
1717 1714
1718 1715 # A changeset always belongs to itself, so the changenode lookup
1719 1716 # function for a changenode is identity.
1720 1717 def identity(x):
1721 1718 return x
1722 1719
1723 1720 # If we determine that a particular file or manifest node must be a
1724 1721 # node that the recipient of the changegroup will already have, we can
1725 1722 # also assume the recipient will have all the parents. This function
1726 1723 # prunes them from the set of missing nodes.
1727 1724 def prune_parents(revlog, hasset, msngset):
1728 1725 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1729 1726 msngset.pop(revlog.node(r), None)
1730 1727
1731 1728 # Use the information collected in collect_manifests_and_files to say
1732 1729 # which changenode any manifestnode belongs to.
1733 1730 def lookup_manifest_link(mnfstnode):
1734 1731 return msng_mnfst_set[mnfstnode]
1735 1732
1736 1733 # A function generating function that sets up the initial environment
1737 1734 # the inner function.
1738 1735 def filenode_collector(changedfiles):
1739 1736 # This gathers information from each manifestnode included in the
1740 1737 # changegroup about which filenodes the manifest node references
1741 1738 # so we can include those in the changegroup too.
1742 1739 #
1743 1740 # It also remembers which changenode each filenode belongs to. It
1744 1741 # does this by assuming the a filenode belongs to the changenode
1745 1742 # the first manifest that references it belongs to.
1746 1743 def collect_msng_filenodes(mnfstnode):
1747 1744 r = mnfst.rev(mnfstnode)
1748 1745 if r - 1 in mnfst.parentrevs(r):
1749 1746 # If the previous rev is one of the parents,
1750 1747 # we only need to see a diff.
1751 1748 deltamf = mnfst.readdelta(mnfstnode)
1752 1749 # For each line in the delta
1753 1750 for f, fnode in deltamf.iteritems():
1754 1751 f = changedfiles.get(f, None)
1755 1752 # And if the file is in the list of files we care
1756 1753 # about.
1757 1754 if f is not None:
1758 1755 # Get the changenode this manifest belongs to
1759 1756 clnode = msng_mnfst_set[mnfstnode]
1760 1757 # Create the set of filenodes for the file if
1761 1758 # there isn't one already.
1762 1759 ndset = msng_filenode_set.setdefault(f, {})
1763 1760 # And set the filenode's changelog node to the
1764 1761 # manifest's if it hasn't been set already.
1765 1762 ndset.setdefault(fnode, clnode)
1766 1763 else:
1767 1764 # Otherwise we need a full manifest.
1768 1765 m = mnfst.read(mnfstnode)
1769 1766 # For every file in we care about.
1770 1767 for f in changedfiles:
1771 1768 fnode = m.get(f, None)
1772 1769 # If it's in the manifest
1773 1770 if fnode is not None:
1774 1771 # See comments above.
1775 1772 clnode = msng_mnfst_set[mnfstnode]
1776 1773 ndset = msng_filenode_set.setdefault(f, {})
1777 1774 ndset.setdefault(fnode, clnode)
1778 1775 return collect_msng_filenodes
1779 1776
1780 1777 # We have a list of filenodes we think we need for a file, lets remove
1781 1778 # all those we know the recipient must have.
1782 1779 def prune_filenodes(f, filerevlog):
1783 1780 msngset = msng_filenode_set[f]
1784 1781 hasset = set()
1785 1782 # If a 'missing' filenode thinks it belongs to a changenode we
1786 1783 # assume the recipient must have, then the recipient must have
1787 1784 # that filenode.
1788 1785 for n in msngset:
1789 1786 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1790 1787 if clnode in has_cl_set:
1791 1788 hasset.add(n)
1792 1789 prune_parents(filerevlog, hasset, msngset)
1793 1790
1794 1791 # A function generator function that sets up the a context for the
1795 1792 # inner function.
1796 1793 def lookup_filenode_link_func(fname):
1797 1794 msngset = msng_filenode_set[fname]
1798 1795 # Lookup the changenode the filenode belongs to.
1799 1796 def lookup_filenode_link(fnode):
1800 1797 return msngset[fnode]
1801 1798 return lookup_filenode_link
1802 1799
1803 1800 # Add the nodes that were explicitly requested.
1804 1801 def add_extra_nodes(name, nodes):
1805 1802 if not extranodes or name not in extranodes:
1806 1803 return
1807 1804
1808 1805 for node, linknode in extranodes[name]:
1809 1806 if node not in nodes:
1810 1807 nodes[node] = linknode
1811 1808
1812 1809 # Now that we have all theses utility functions to help out and
1813 1810 # logically divide up the task, generate the group.
1814 1811 def gengroup():
1815 1812 # The set of changed files starts empty.
1816 1813 changedfiles = {}
1817 1814 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1818 1815
1819 1816 # Create a changenode group generator that will call our functions
1820 1817 # back to lookup the owning changenode and collect information.
1821 1818 group = cl.group(msng_cl_lst, identity, collect)
1822 1819 cnt = 0
1823 1820 for chnk in group:
1824 1821 yield chnk
1825 1822 self.ui.progress('bundle changes', cnt, unit='chunks')
1826 1823 cnt += 1
1827 1824 self.ui.progress('bundle changes', None, unit='chunks')
1828 1825
1829 1826
1830 1827 # Figure out which manifest nodes (of the ones we think might be
1831 1828 # part of the changegroup) the recipient must know about and
1832 1829 # remove them from the changegroup.
1833 1830 has_mnfst_set = set()
1834 1831 for n in msng_mnfst_set:
1835 1832 # If a 'missing' manifest thinks it belongs to a changenode
1836 1833 # the recipient is assumed to have, obviously the recipient
1837 1834 # must have that manifest.
1838 1835 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1839 1836 if linknode in has_cl_set:
1840 1837 has_mnfst_set.add(n)
1841 1838 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1842 1839 add_extra_nodes(1, msng_mnfst_set)
1843 1840 msng_mnfst_lst = msng_mnfst_set.keys()
1844 1841 # Sort the manifestnodes by revision number.
1845 1842 msng_mnfst_lst.sort(key=mnfst.rev)
1846 1843 # Create a generator for the manifestnodes that calls our lookup
1847 1844 # and data collection functions back.
1848 1845 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1849 1846 filenode_collector(changedfiles))
1850 1847 cnt = 0
1851 1848 for chnk in group:
1852 1849 yield chnk
1853 1850 self.ui.progress('bundle manifests', cnt, unit='chunks')
1854 1851 cnt += 1
1855 1852 self.ui.progress('bundle manifests', None, unit='chunks')
1856 1853
1857 1854 # These are no longer needed, dereference and toss the memory for
1858 1855 # them.
1859 1856 msng_mnfst_lst = None
1860 1857 msng_mnfst_set.clear()
1861 1858
1862 1859 if extranodes:
1863 1860 for fname in extranodes:
1864 1861 if isinstance(fname, int):
1865 1862 continue
1866 1863 msng_filenode_set.setdefault(fname, {})
1867 1864 changedfiles[fname] = 1
1868 1865 # Go through all our files in order sorted by name.
1869 1866 cnt = 0
1870 1867 for fname in sorted(changedfiles):
1871 1868 filerevlog = self.file(fname)
1872 1869 if not len(filerevlog):
1873 1870 raise util.Abort(_("empty or missing revlog for %s") % fname)
1874 1871 # Toss out the filenodes that the recipient isn't really
1875 1872 # missing.
1876 1873 if fname in msng_filenode_set:
1877 1874 prune_filenodes(fname, filerevlog)
1878 1875 add_extra_nodes(fname, msng_filenode_set[fname])
1879 1876 msng_filenode_lst = msng_filenode_set[fname].keys()
1880 1877 else:
1881 1878 msng_filenode_lst = []
1882 1879 # If any filenodes are left, generate the group for them,
1883 1880 # otherwise don't bother.
1884 1881 if len(msng_filenode_lst) > 0:
1885 1882 yield changegroup.chunkheader(len(fname))
1886 1883 yield fname
1887 1884 # Sort the filenodes by their revision #
1888 1885 msng_filenode_lst.sort(key=filerevlog.rev)
1889 1886 # Create a group generator and only pass in a changenode
1890 1887 # lookup function as we need to collect no information
1891 1888 # from filenodes.
1892 1889 group = filerevlog.group(msng_filenode_lst,
1893 1890 lookup_filenode_link_func(fname))
1894 1891 for chnk in group:
1895 1892 self.ui.progress(
1896 1893 'bundle files', cnt, item=fname, unit='chunks')
1897 1894 cnt += 1
1898 1895 yield chnk
1899 1896 if fname in msng_filenode_set:
1900 1897 # Don't need this anymore, toss it to free memory.
1901 1898 del msng_filenode_set[fname]
1902 1899 # Signal that no more groups are left.
1903 1900 yield changegroup.closechunk()
1904 1901 self.ui.progress('bundle files', None, unit='chunks')
1905 1902
1906 1903 if msng_cl_lst:
1907 1904 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1908 1905
1909 1906 return util.chunkbuffer(gengroup())
1910 1907
1911 1908 def changegroup(self, basenodes, source):
1912 1909 # to avoid a race we use changegroupsubset() (issue1320)
1913 1910 return self.changegroupsubset(basenodes, self.heads(), source)
1914 1911
1915 1912 def _changegroup(self, nodes, source):
1916 1913 """Compute the changegroup of all nodes that we have that a recipient
1917 1914 doesn't. Return a chunkbuffer object whose read() method will return
1918 1915 successive changegroup chunks.
1919 1916
1920 1917 This is much easier than the previous function as we can assume that
1921 1918 the recipient has any changenode we aren't sending them.
1922 1919
1923 1920 nodes is the set of nodes to send"""
1924 1921
1925 1922 self.hook('preoutgoing', throw=True, source=source)
1926 1923
1927 1924 cl = self.changelog
1928 1925 revset = set([cl.rev(n) for n in nodes])
1929 1926 self.changegroupinfo(nodes, source)
1930 1927
1931 1928 def identity(x):
1932 1929 return x
1933 1930
1934 1931 def gennodelst(log):
1935 1932 for r in log:
1936 1933 if log.linkrev(r) in revset:
1937 1934 yield log.node(r)
1938 1935
1939 1936 def lookuprevlink_func(revlog):
1940 1937 def lookuprevlink(n):
1941 1938 return cl.node(revlog.linkrev(revlog.rev(n)))
1942 1939 return lookuprevlink
1943 1940
1944 1941 def gengroup():
1945 1942 '''yield a sequence of changegroup chunks (strings)'''
1946 1943 # construct a list of all changed files
1947 1944 changedfiles = {}
1948 1945 mmfs = {}
1949 1946 collect = changegroup.collector(cl, mmfs, changedfiles)
1950 1947
1951 1948 cnt = 0
1952 1949 for chnk in cl.group(nodes, identity, collect):
1953 1950 self.ui.progress('bundle changes', cnt, unit='chunks')
1954 1951 cnt += 1
1955 1952 yield chnk
1956 1953 self.ui.progress('bundle changes', None, unit='chunks')
1957 1954
1958 1955 mnfst = self.manifest
1959 1956 nodeiter = gennodelst(mnfst)
1960 1957 cnt = 0
1961 1958 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1962 1959 self.ui.progress('bundle manifests', cnt, unit='chunks')
1963 1960 cnt += 1
1964 1961 yield chnk
1965 1962 self.ui.progress('bundle manifests', None, unit='chunks')
1966 1963
1967 1964 cnt = 0
1968 1965 for fname in sorted(changedfiles):
1969 1966 filerevlog = self.file(fname)
1970 1967 if not len(filerevlog):
1971 1968 raise util.Abort(_("empty or missing revlog for %s") % fname)
1972 1969 nodeiter = gennodelst(filerevlog)
1973 1970 nodeiter = list(nodeiter)
1974 1971 if nodeiter:
1975 1972 yield changegroup.chunkheader(len(fname))
1976 1973 yield fname
1977 1974 lookup = lookuprevlink_func(filerevlog)
1978 1975 for chnk in filerevlog.group(nodeiter, lookup):
1979 1976 self.ui.progress(
1980 1977 'bundle files', cnt, item=fname, unit='chunks')
1981 1978 cnt += 1
1982 1979 yield chnk
1983 1980 self.ui.progress('bundle files', None, unit='chunks')
1984 1981
1985 1982 yield changegroup.closechunk()
1986 1983
1987 1984 if nodes:
1988 1985 self.hook('outgoing', node=hex(nodes[0]), source=source)
1989 1986
1990 1987 return util.chunkbuffer(gengroup())
1991 1988
1992 1989 def addchangegroup(self, source, srctype, url, emptyok=False):
1993 1990 """add changegroup to repo.
1994 1991
1995 1992 return values:
1996 1993 - nothing changed or no source: 0
1997 1994 - more heads than before: 1+added heads (2..n)
1998 1995 - less heads than before: -1-removed heads (-2..-n)
1999 1996 - number of heads stays the same: 1
2000 1997 """
2001 1998 def csmap(x):
2002 1999 self.ui.debug("add changeset %s\n" % short(x))
2003 2000 return len(cl)
2004 2001
2005 2002 def revmap(x):
2006 2003 return cl.rev(x)
2007 2004
2008 2005 if not source:
2009 2006 return 0
2010 2007
2011 2008 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2012 2009
2013 2010 changesets = files = revisions = 0
2014 2011
2015 2012 # write changelog data to temp files so concurrent readers will not see
2016 2013 # inconsistent view
2017 2014 cl = self.changelog
2018 2015 cl.delayupdate()
2019 2016 oldheads = len(cl.heads())
2020 2017
2021 2018 tr = self.transaction()
2022 2019 try:
2023 2020 trp = weakref.proxy(tr)
2024 2021 # pull off the changeset group
2025 2022 self.ui.status(_("adding changesets\n"))
2026 2023 clstart = len(cl)
2027 2024 class prog(object):
2028 2025 step = 'changesets'
2029 2026 count = 1
2030 2027 ui = self.ui
2031 2028 def __call__(self):
2032 2029 self.ui.progress(self.step, self.count, unit='chunks')
2033 2030 self.count += 1
2034 2031 pr = prog()
2035 2032 chunkiter = changegroup.chunkiter(source, progress=pr)
2036 2033 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2037 2034 raise util.Abort(_("received changelog group is empty"))
2038 2035 clend = len(cl)
2039 2036 changesets = clend - clstart
2040 2037 self.ui.progress('changesets', None)
2041 2038
2042 2039 # pull off the manifest group
2043 2040 self.ui.status(_("adding manifests\n"))
2044 2041 pr.step = 'manifests'
2045 2042 pr.count = 1
2046 2043 chunkiter = changegroup.chunkiter(source, progress=pr)
2047 2044 # no need to check for empty manifest group here:
2048 2045 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2049 2046 # no new manifest will be created and the manifest group will
2050 2047 # be empty during the pull
2051 2048 self.manifest.addgroup(chunkiter, revmap, trp)
2052 2049 self.ui.progress('manifests', None)
2053 2050
2054 2051 needfiles = {}
2055 2052 if self.ui.configbool('server', 'validate', default=False):
2056 2053 # validate incoming csets have their manifests
2057 2054 for cset in xrange(clstart, clend):
2058 2055 mfest = self.changelog.read(self.changelog.node(cset))[0]
2059 2056 mfest = self.manifest.readdelta(mfest)
2060 2057 # store file nodes we must see
2061 2058 for f, n in mfest.iteritems():
2062 2059 needfiles.setdefault(f, set()).add(n)
2063 2060
2064 2061 # process the files
2065 2062 self.ui.status(_("adding file changes\n"))
2066 2063 pr.step = 'files'
2067 2064 pr.count = 1
2068 2065 while 1:
2069 2066 f = changegroup.getchunk(source)
2070 2067 if not f:
2071 2068 break
2072 2069 self.ui.debug("adding %s revisions\n" % f)
2073 2070 fl = self.file(f)
2074 2071 o = len(fl)
2075 2072 chunkiter = changegroup.chunkiter(source, progress=pr)
2076 2073 if fl.addgroup(chunkiter, revmap, trp) is None:
2077 2074 raise util.Abort(_("received file revlog group is empty"))
2078 2075 revisions += len(fl) - o
2079 2076 files += 1
2080 2077 if f in needfiles:
2081 2078 needs = needfiles[f]
2082 2079 for new in xrange(o, len(fl)):
2083 2080 n = fl.node(new)
2084 2081 if n in needs:
2085 2082 needs.remove(n)
2086 2083 if not needs:
2087 2084 del needfiles[f]
2088 2085 self.ui.progress('files', None)
2089 2086
2090 2087 for f, needs in needfiles.iteritems():
2091 2088 fl = self.file(f)
2092 2089 for n in needs:
2093 2090 try:
2094 2091 fl.rev(n)
2095 2092 except error.LookupError:
2096 2093 raise util.Abort(
2097 2094 _('missing file data for %s:%s - run hg verify') %
2098 2095 (f, hex(n)))
2099 2096
2100 2097 newheads = len(cl.heads())
2101 2098 heads = ""
2102 2099 if oldheads and newheads != oldheads:
2103 2100 heads = _(" (%+d heads)") % (newheads - oldheads)
2104 2101
2105 2102 self.ui.status(_("added %d changesets"
2106 2103 " with %d changes to %d files%s\n")
2107 2104 % (changesets, revisions, files, heads))
2108 2105
2109 2106 if changesets > 0:
2110 2107 p = lambda: cl.writepending() and self.root or ""
2111 2108 self.hook('pretxnchangegroup', throw=True,
2112 2109 node=hex(cl.node(clstart)), source=srctype,
2113 2110 url=url, pending=p)
2114 2111
2115 2112 # make changelog see real files again
2116 2113 cl.finalize(trp)
2117 2114
2118 2115 tr.close()
2119 2116 finally:
2120 2117 del tr
2121 2118
2122 2119 if changesets > 0:
2123 2120 # forcefully update the on-disk branch cache
2124 2121 self.ui.debug("updating the branch cache\n")
2125 2122 self.branchtags()
2126 2123 self.hook("changegroup", node=hex(cl.node(clstart)),
2127 2124 source=srctype, url=url)
2128 2125
2129 2126 for i in xrange(clstart, clend):
2130 2127 self.hook("incoming", node=hex(cl.node(i)),
2131 2128 source=srctype, url=url)
2132 2129
2133 2130 # never return 0 here:
2134 2131 if newheads < oldheads:
2135 2132 return newheads - oldheads - 1
2136 2133 else:
2137 2134 return newheads - oldheads + 1
2138 2135
2139 2136
2140 2137 def stream_in(self, remote):
2141 2138 fp = remote.stream_out()
2142 2139 l = fp.readline()
2143 2140 try:
2144 2141 resp = int(l)
2145 2142 except ValueError:
2146 2143 raise error.ResponseError(
2147 2144 _('Unexpected response from remote server:'), l)
2148 2145 if resp == 1:
2149 2146 raise util.Abort(_('operation forbidden by server'))
2150 2147 elif resp == 2:
2151 2148 raise util.Abort(_('locking the remote repository failed'))
2152 2149 elif resp != 0:
2153 2150 raise util.Abort(_('the server sent an unknown error code'))
2154 2151 self.ui.status(_('streaming all changes\n'))
2155 2152 l = fp.readline()
2156 2153 try:
2157 2154 total_files, total_bytes = map(int, l.split(' ', 1))
2158 2155 except (ValueError, TypeError):
2159 2156 raise error.ResponseError(
2160 2157 _('Unexpected response from remote server:'), l)
2161 2158 self.ui.status(_('%d files to transfer, %s of data\n') %
2162 2159 (total_files, util.bytecount(total_bytes)))
2163 2160 start = time.time()
2164 2161 for i in xrange(total_files):
2165 2162 # XXX doesn't support '\n' or '\r' in filenames
2166 2163 l = fp.readline()
2167 2164 try:
2168 2165 name, size = l.split('\0', 1)
2169 2166 size = int(size)
2170 2167 except (ValueError, TypeError):
2171 2168 raise error.ResponseError(
2172 2169 _('Unexpected response from remote server:'), l)
2173 2170 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2174 2171 # for backwards compat, name was partially encoded
2175 2172 ofp = self.sopener(store.decodedir(name), 'w')
2176 2173 for chunk in util.filechunkiter(fp, limit=size):
2177 2174 ofp.write(chunk)
2178 2175 ofp.close()
2179 2176 elapsed = time.time() - start
2180 2177 if elapsed <= 0:
2181 2178 elapsed = 0.001
2182 2179 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2183 2180 (util.bytecount(total_bytes), elapsed,
2184 2181 util.bytecount(total_bytes / elapsed)))
2185 2182 self.invalidate()
2186 2183 return len(self.heads()) + 1
2187 2184
2188 2185 def clone(self, remote, heads=[], stream=False):
2189 2186 '''clone remote repository.
2190 2187
2191 2188 keyword arguments:
2192 2189 heads: list of revs to clone (forces use of pull)
2193 2190 stream: use streaming clone if possible'''
2194 2191
2195 2192 # now, all clients that can request uncompressed clones can
2196 2193 # read repo formats supported by all servers that can serve
2197 2194 # them.
2198 2195
2199 2196 # if revlog format changes, client will have to check version
2200 2197 # and format flags on "stream" capability, and use
2201 2198 # uncompressed only if compatible.
2202 2199
2203 2200 if stream and not heads and remote.capable('stream'):
2204 2201 return self.stream_in(remote)
2205 2202 return self.pull(remote, heads)
2206 2203
2207 2204 # used to avoid circular references so destructors work
2208 2205 def aftertrans(files):
2209 2206 renamefiles = [tuple(t) for t in files]
2210 2207 def a():
2211 2208 for src, dest in renamefiles:
2212 2209 util.rename(src, dest)
2213 2210 return a
2214 2211
2215 2212 def instance(ui, path, create):
2216 2213 return localrepository(ui, util.drop_scheme('file', path), create)
2217 2214
2218 2215 def islocal(path):
2219 2216 return True
@@ -1,518 +1,519 b''
1 1 # merge.py - directory-level update/merge handling for Mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import nullid, nullrev, hex, bin
9 9 from i18n import _
10 10 import util, filemerge, copies, subrepo
11 11 import errno, os, shutil
12 12
13 13 class mergestate(object):
14 14 '''track 3-way merge state of individual files'''
15 15 def __init__(self, repo):
16 16 self._repo = repo
17 17 self._read()
18 18 def reset(self, node=None):
19 19 self._state = {}
20 20 if node:
21 21 self._local = node
22 22 shutil.rmtree(self._repo.join("merge"), True)
23 23 def _read(self):
24 24 self._state = {}
25 25 try:
26 26 localnode = None
27 27 f = self._repo.opener("merge/state")
28 28 for i, l in enumerate(f):
29 29 if i == 0:
30 30 localnode = l[:-1]
31 31 else:
32 32 bits = l[:-1].split("\0")
33 33 self._state[bits[0]] = bits[1:]
34 34 self._local = bin(localnode)
35 35 except IOError, err:
36 36 if err.errno != errno.ENOENT:
37 37 raise
38 38 def _write(self):
39 39 f = self._repo.opener("merge/state", "w")
40 40 f.write(hex(self._local) + "\n")
41 41 for d, v in self._state.iteritems():
42 42 f.write("\0".join([d] + v) + "\n")
43 43 def add(self, fcl, fco, fca, fd, flags):
44 44 hash = util.sha1(fcl.path()).hexdigest()
45 45 self._repo.opener("merge/" + hash, "w").write(fcl.data())
46 46 self._state[fd] = ['u', hash, fcl.path(), fca.path(),
47 47 hex(fca.filenode()), fco.path(), flags]
48 48 self._write()
49 49 def __contains__(self, dfile):
50 50 return dfile in self._state
51 51 def __getitem__(self, dfile):
52 52 return self._state[dfile][0]
53 53 def __iter__(self):
54 54 l = self._state.keys()
55 55 l.sort()
56 56 for f in l:
57 57 yield f
58 58 def mark(self, dfile, state):
59 59 self._state[dfile][0] = state
60 60 self._write()
61 61 def resolve(self, dfile, wctx, octx):
62 62 if self[dfile] == 'r':
63 63 return 0
64 64 state, hash, lfile, afile, anode, ofile, flags = self._state[dfile]
65 65 f = self._repo.opener("merge/" + hash)
66 66 self._repo.wwrite(dfile, f.read(), flags)
67 67 fcd = wctx[dfile]
68 68 fco = octx[ofile]
69 69 fca = self._repo.filectx(afile, fileid=anode)
70 70 r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca)
71 71 if not r:
72 72 self.mark(dfile, 'r')
73 73 return r
74 74
75 75 def _checkunknown(wctx, mctx):
76 76 "check for collisions between unknown files and files in mctx"
77 77 for f in wctx.unknown():
78 78 if f in mctx and mctx[f].cmp(wctx[f].data()):
79 79 raise util.Abort(_("untracked file in working directory differs"
80 80 " from file in requested revision: '%s'") % f)
81 81
82 82 def _checkcollision(mctx):
83 83 "check for case folding collisions in the destination context"
84 84 folded = {}
85 85 for fn in mctx:
86 86 fold = fn.lower()
87 87 if fold in folded:
88 88 raise util.Abort(_("case-folding collision between %s and %s")
89 89 % (fn, folded[fold]))
90 90 folded[fold] = fn
91 91
92 92 def _forgetremoved(wctx, mctx, branchmerge):
93 93 """
94 94 Forget removed files
95 95
96 96 If we're jumping between revisions (as opposed to merging), and if
97 97 neither the working directory nor the target rev has the file,
98 98 then we need to remove it from the dirstate, to prevent the
99 99 dirstate from listing the file when it is no longer in the
100 100 manifest.
101 101
102 102 If we're merging, and the other revision has removed a file
103 103 that is not present in the working directory, we need to mark it
104 104 as removed.
105 105 """
106 106
107 107 action = []
108 108 state = branchmerge and 'r' or 'f'
109 109 for f in wctx.deleted():
110 110 if f not in mctx:
111 111 action.append((f, state))
112 112
113 113 if not branchmerge:
114 114 for f in wctx.removed():
115 115 if f not in mctx:
116 116 action.append((f, "f"))
117 117
118 118 return action
119 119
120 120 def manifestmerge(repo, p1, p2, pa, overwrite, partial):
121 121 """
122 122 Merge p1 and p2 with ancestor ma and generate merge action list
123 123
124 124 overwrite = whether we clobber working files
125 125 partial = function to filter file lists
126 126 """
127 127
128 128 def fmerge(f, f2, fa):
129 129 """merge flags"""
130 130 a, m, n = ma.flags(fa), m1.flags(f), m2.flags(f2)
131 131 if m == n: # flags agree
132 132 return m # unchanged
133 133 if m and n and not a: # flags set, don't agree, differ from parent
134 134 r = repo.ui.promptchoice(
135 135 _(" conflicting flags for %s\n"
136 136 "(n)one, e(x)ec or sym(l)ink?") % f,
137 137 (_("&None"), _("E&xec"), _("Sym&link")), 0)
138 138 if r == 1:
139 139 return "x" # Exec
140 140 if r == 2:
141 141 return "l" # Symlink
142 142 return ""
143 143 if m and m != a: # changed from a to m
144 144 return m
145 145 if n and n != a: # changed from a to n
146 146 return n
147 147 return '' # flag was cleared
148 148
149 149 def act(msg, m, f, *args):
150 150 repo.ui.debug(" %s: %s -> %s\n" % (f, msg, m))
151 151 action.append((f, m) + args)
152 152
153 153 action, copy = [], {}
154 154
155 155 if overwrite:
156 156 pa = p1
157 157 elif pa == p2: # backwards
158 158 pa = p1.p1()
159 159 elif pa and repo.ui.configbool("merge", "followcopies", True):
160 160 dirs = repo.ui.configbool("merge", "followdirs", True)
161 161 copy, diverge = copies.copies(repo, p1, p2, pa, dirs)
162 162 for of, fl in diverge.iteritems():
163 163 act("divergent renames", "dr", of, fl)
164 164
165 165 repo.ui.note(_("resolving manifests\n"))
166 166 repo.ui.debug(" overwrite %s partial %s\n" % (overwrite, bool(partial)))
167 167 repo.ui.debug(" ancestor %s local %s remote %s\n" % (pa, p1, p2))
168 168
169 169 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
170 170 copied = set(copy.values())
171 171
172 172 if not overwrite and '.hgsubstate' in m1:
173 173 # check whether sub state is modified
174 174 for s in p1.substate:
175 175 if p1.sub(s).dirty():
176 176 m1['.hgsubstate'] += "+"
177 177 break
178 178
179 179 # Compare manifests
180 180 for f, n in m1.iteritems():
181 181 if partial and not partial(f):
182 182 continue
183 183 if f in m2:
184 184 rflags = fmerge(f, f, f)
185 185 a = ma.get(f, nullid)
186 186 if n == m2[f] or m2[f] == a: # same or local newer
187 187 if m1.flags(f) != rflags:
188 188 act("update permissions", "e", f, rflags)
189 189 elif n == a: # remote newer
190 190 act("remote is newer", "g", f, rflags)
191 191 else: # both changed
192 192 act("versions differ", "m", f, f, f, rflags, False)
193 193 elif f in copied: # files we'll deal with on m2 side
194 194 pass
195 195 elif f in copy:
196 196 f2 = copy[f]
197 197 if f2 not in m2: # directory rename
198 198 act("remote renamed directory to " + f2, "d",
199 199 f, None, f2, m1.flags(f))
200 200 else: # case 2 A,B/B/B or case 4,21 A/B/B
201 201 act("local copied/moved to " + f2, "m",
202 202 f, f2, f, fmerge(f, f2, f2), False)
203 203 elif f in ma: # clean, a different, no remote
204 204 if n != ma[f]:
205 205 if repo.ui.promptchoice(
206 206 _(" local changed %s which remote deleted\n"
207 207 "use (c)hanged version or (d)elete?") % f,
208 208 (_("&Changed"), _("&Delete")), 0):
209 209 act("prompt delete", "r", f)
210 210 else:
211 211 act("prompt keep", "a", f)
212 212 elif n[20:] == "a": # added, no remote
213 213 act("remote deleted", "f", f)
214 214 elif n[20:] != "u":
215 215 act("other deleted", "r", f)
216 216
217 217 for f, n in m2.iteritems():
218 218 if partial and not partial(f):
219 219 continue
220 220 if f in m1 or f in copied: # files already visited
221 221 continue
222 222 if f in copy:
223 223 f2 = copy[f]
224 224 if f2 not in m1: # directory rename
225 225 act("local renamed directory to " + f2, "d",
226 226 None, f, f2, m2.flags(f))
227 227 elif f2 in m2: # rename case 1, A/A,B/A
228 228 act("remote copied to " + f, "m",
229 229 f2, f, f, fmerge(f2, f, f2), False)
230 230 else: # case 3,20 A/B/A
231 231 act("remote moved to " + f, "m",
232 232 f2, f, f, fmerge(f2, f, f2), True)
233 233 elif f not in ma:
234 234 act("remote created", "g", f, m2.flags(f))
235 235 elif n != ma[f]:
236 236 if repo.ui.promptchoice(
237 237 _("remote changed %s which local deleted\n"
238 238 "use (c)hanged version or leave (d)eleted?") % f,
239 239 (_("&Changed"), _("&Deleted")), 0) == 0:
240 240 act("prompt recreating", "g", f, m2.flags(f))
241 241
242 242 return action
243 243
244 244 def actionkey(a):
245 245 return a[1] == 'r' and -1 or 0, a
246 246
247 247 def applyupdates(repo, action, wctx, mctx):
248 248 "apply the merge action list to the working directory"
249 249
250 250 updated, merged, removed, unresolved = 0, 0, 0, 0
251 251 ms = mergestate(repo)
252 252 ms.reset(wctx.parents()[0].node())
253 253 moves = []
254 254 action.sort(key=actionkey)
255 255 substate = wctx.substate # prime
256 256
257 257 # prescan for merges
258 258 u = repo.ui
259 259 for a in action:
260 260 f, m = a[:2]
261 261 if m == 'm': # merge
262 262 f2, fd, flags, move = a[2:]
263 263 if f == '.hgsubstate': # merged internally
264 264 continue
265 265 repo.ui.debug("preserving %s for resolve of %s\n" % (f, fd))
266 266 fcl = wctx[f]
267 267 fco = mctx[f2]
268 268 fca = fcl.ancestor(fco) or repo.filectx(f, fileid=nullrev)
269 269 ms.add(fcl, fco, fca, fd, flags)
270 270 if f != fd and move:
271 271 moves.append(f)
272 272
273 273 # remove renamed files after safely stored
274 274 for f in moves:
275 275 if util.lexists(repo.wjoin(f)):
276 276 repo.ui.debug("removing %s\n" % f)
277 277 os.unlink(repo.wjoin(f))
278 278
279 279 audit_path = util.path_auditor(repo.root)
280 280
281 281 numupdates = len(action)
282 282 for i, a in enumerate(action):
283 283 f, m = a[:2]
284 284 u.progress('update', i + 1, item=f, total=numupdates, unit='files')
285 285 if f and f[0] == "/":
286 286 continue
287 287 if m == "r": # remove
288 288 repo.ui.note(_("removing %s\n") % f)
289 289 audit_path(f)
290 290 if f == '.hgsubstate': # subrepo states need updating
291 291 subrepo.submerge(repo, wctx, mctx, wctx)
292 292 try:
293 293 util.unlink(repo.wjoin(f))
294 294 except OSError, inst:
295 295 if inst.errno != errno.ENOENT:
296 296 repo.ui.warn(_("update failed to remove %s: %s!\n") %
297 297 (f, inst.strerror))
298 298 removed += 1
299 299 elif m == "m": # merge
300 300 if f == '.hgsubstate': # subrepo states need updating
301 301 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx))
302 302 continue
303 303 f2, fd, flags, move = a[2:]
304 304 r = ms.resolve(fd, wctx, mctx)
305 305 if r is not None and r > 0:
306 306 unresolved += 1
307 307 else:
308 308 if r is None:
309 309 updated += 1
310 310 else:
311 311 merged += 1
312 312 util.set_flags(repo.wjoin(fd), 'l' in flags, 'x' in flags)
313 313 if f != fd and move and util.lexists(repo.wjoin(f)):
314 314 repo.ui.debug("removing %s\n" % f)
315 315 os.unlink(repo.wjoin(f))
316 316 elif m == "g": # get
317 317 flags = a[2]
318 318 repo.ui.note(_("getting %s\n") % f)
319 319 t = mctx.filectx(f).data()
320 320 repo.wwrite(f, t, flags)
321 321 updated += 1
322 322 if f == '.hgsubstate': # subrepo states need updating
323 323 subrepo.submerge(repo, wctx, mctx, wctx)
324 324 elif m == "d": # directory rename
325 325 f2, fd, flags = a[2:]
326 326 if f:
327 327 repo.ui.note(_("moving %s to %s\n") % (f, fd))
328 328 t = wctx.filectx(f).data()
329 329 repo.wwrite(fd, t, flags)
330 330 util.unlink(repo.wjoin(f))
331 331 if f2:
332 332 repo.ui.note(_("getting %s to %s\n") % (f2, fd))
333 333 t = mctx.filectx(f2).data()
334 334 repo.wwrite(fd, t, flags)
335 335 updated += 1
336 336 elif m == "dr": # divergent renames
337 337 fl = a[2]
338 338 repo.ui.warn(_("warning: detected divergent renames of %s to:\n") % f)
339 339 for nf in fl:
340 340 repo.ui.warn(" %s\n" % nf)
341 341 elif m == "e": # exec
342 342 flags = a[2]
343 343 util.set_flags(repo.wjoin(f), 'l' in flags, 'x' in flags)
344 344 u.progress('update', None, total=numupdates, unit='files')
345 345
346 346 return updated, merged, removed, unresolved
347 347
348 348 def recordupdates(repo, action, branchmerge):
349 349 "record merge actions to the dirstate"
350 350
351 351 for a in action:
352 352 f, m = a[:2]
353 353 if m == "r": # remove
354 354 if branchmerge:
355 355 repo.dirstate.remove(f)
356 356 else:
357 357 repo.dirstate.forget(f)
358 358 elif m == "a": # re-add
359 359 if not branchmerge:
360 360 repo.dirstate.add(f)
361 361 elif m == "f": # forget
362 362 repo.dirstate.forget(f)
363 363 elif m == "e": # exec change
364 364 repo.dirstate.normallookup(f)
365 365 elif m == "g": # get
366 366 if branchmerge:
367 367 repo.dirstate.normaldirty(f)
368 368 else:
369 369 repo.dirstate.normal(f)
370 370 elif m == "m": # merge
371 371 f2, fd, flag, move = a[2:]
372 372 if branchmerge:
373 373 # We've done a branch merge, mark this file as merged
374 374 # so that we properly record the merger later
375 375 repo.dirstate.merge(fd)
376 376 if f != f2: # copy/rename
377 377 if move:
378 378 repo.dirstate.remove(f)
379 379 if f != fd:
380 380 repo.dirstate.copy(f, fd)
381 381 else:
382 382 repo.dirstate.copy(f2, fd)
383 383 else:
384 384 # We've update-merged a locally modified file, so
385 385 # we set the dirstate to emulate a normal checkout
386 386 # of that file some time in the past. Thus our
387 387 # merge will appear as a normal local file
388 388 # modification.
389 389 repo.dirstate.normallookup(fd)
390 390 if move:
391 391 repo.dirstate.forget(f)
392 392 elif m == "d": # directory rename
393 393 f2, fd, flag = a[2:]
394 394 if not f2 and f not in repo.dirstate:
395 395 # untracked file moved
396 396 continue
397 397 if branchmerge:
398 398 repo.dirstate.add(fd)
399 399 if f:
400 400 repo.dirstate.remove(f)
401 401 repo.dirstate.copy(f, fd)
402 402 if f2:
403 403 repo.dirstate.copy(f2, fd)
404 404 else:
405 405 repo.dirstate.normal(fd)
406 406 if f:
407 407 repo.dirstate.forget(f)
408 408
409 409 def update(repo, node, branchmerge, force, partial):
410 410 """
411 411 Perform a merge between the working directory and the given node
412 412
413 413 node = the node to update to, or None if unspecified
414 414 branchmerge = whether to merge between branches
415 415 force = whether to force branch merging or file overwriting
416 416 partial = a function to filter file lists (dirstate not updated)
417 417
418 418 The table below shows all the behaviors of the update command
419 419 given the -c and -C or no options, whether the working directory
420 420 is dirty, whether a revision is specified, and the relationship of
421 421 the parent rev to the target rev (linear, on the same named
422 422 branch, or on another named branch).
423 423
424 424 This logic is tested by test-update-branches.
425 425
426 426 -c -C dirty rev | linear same cross
427 427 n n n n | ok (1) x
428 428 n n n y | ok ok ok
429 429 n n y * | merge (2) (2)
430 430 n y * * | --- discard ---
431 431 y n y * | --- (3) ---
432 432 y n n * | --- ok ---
433 433 y y * * | --- (4) ---
434 434
435 435 x = can't happen
436 436 * = don't-care
437 437 1 = abort: crosses branches (use 'hg merge' or 'hg update -c')
438 438 2 = abort: crosses branches (use 'hg merge' to merge or
439 439 use 'hg update -C' to discard changes)
440 440 3 = abort: uncommitted local changes
441 441 4 = incompatible options (checked in commands.py)
442 442 """
443 443
444 444 onode = node
445 445 wlock = repo.wlock()
446 446 try:
447 447 wc = repo[None]
448 448 if node is None:
449 449 # tip of current branch
450 450 try:
451 451 node = repo.branchtags()[wc.branch()]
452 452 except KeyError:
453 453 if wc.branch() == "default": # no default branch!
454 454 node = repo.lookup("tip") # update to tip
455 455 else:
456 456 raise util.Abort(_("branch %s not found") % wc.branch())
457 457 overwrite = force and not branchmerge
458 458 pl = wc.parents()
459 459 p1, p2 = pl[0], repo[node]
460 460 pa = p1.ancestor(p2)
461 461 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
462 462 fastforward = False
463 463
464 464 ### check phase
465 465 if not overwrite and len(pl) > 1:
466 466 raise util.Abort(_("outstanding uncommitted merges"))
467 467 if branchmerge:
468 468 if pa == p2:
469 469 raise util.Abort(_("can't merge with ancestor"))
470 470 elif pa == p1:
471 471 if p1.branch() != p2.branch():
472 472 fastforward = True
473 473 else:
474 474 raise util.Abort(_("nothing to merge (use 'hg update'"
475 475 " or check 'hg heads')"))
476 476 if not force and (wc.files() or wc.deleted()):
477 477 raise util.Abort(_("outstanding uncommitted changes "
478 478 "(use 'hg status' to list changes)"))
479 479 elif not overwrite:
480 480 if pa == p1 or pa == p2: # linear
481 481 pass # all good
482 482 elif wc.files() or wc.deleted():
483 483 raise util.Abort(_("crosses branches (use 'hg merge' to merge "
484 484 "or use 'hg update -C' to discard changes)"))
485 485 elif onode is None:
486 486 raise util.Abort(_("crosses branches (use 'hg merge' or use "
487 487 "'hg update -c')"))
488 488 else:
489 489 # Allow jumping branches if clean and specific rev given
490 490 overwrite = True
491 491
492 492 ### calculate phase
493 493 action = []
494 494 if not force:
495 495 _checkunknown(wc, p2)
496 496 if not util.checkcase(repo.path):
497 497 _checkcollision(p2)
498 498 action += _forgetremoved(wc, p2, branchmerge)
499 499 action += manifestmerge(repo, wc, p2, pa, overwrite, partial)
500 500
501 501 ### apply phase
502 502 if not branchmerge: # just jump to the new rev
503 503 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
504 504 if not partial:
505 505 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
506 506
507 507 stats = applyupdates(repo, action, wc, p2)
508 508
509 509 if not partial:
510 510 recordupdates(repo, action, branchmerge)
511 511 repo.dirstate.setparents(fp1, fp2)
512 512 if not branchmerge and not fastforward:
513 513 repo.dirstate.setbranch(p2.branch())
514 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
515
516 return stats
517 514 finally:
518 515 wlock.release()
516
517 if not partial:
518 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
519 return stats
@@ -1,265 +1,273 b''
1 1 #!/bin/sh
2 2
3 3 cp "$TESTDIR"/printenv.py .
4 4
5 5 # commit hooks can see env vars
6 6 hg init a
7 7 cd a
8 8 echo "[hooks]" > .hg/hgrc
9 9 echo 'commit = unset HG_LOCAL HG_TAG; python ../printenv.py commit' >> .hg/hgrc
10 10 echo 'commit.b = unset HG_LOCAL HG_TAG; python ../printenv.py commit.b' >> .hg/hgrc
11 11 echo 'precommit = unset HG_LOCAL HG_NODE HG_TAG; python ../printenv.py precommit' >> .hg/hgrc
12 12 echo 'pretxncommit = unset HG_LOCAL HG_TAG; python ../printenv.py pretxncommit' >> .hg/hgrc
13 13 echo 'pretxncommit.tip = hg -q tip' >> .hg/hgrc
14 14 echo 'pre-identify = python ../printenv.py pre-identify 1' >> .hg/hgrc
15 15 echo 'pre-cat = python ../printenv.py pre-cat' >> .hg/hgrc
16 16 echo 'post-cat = python ../printenv.py post-cat' >> .hg/hgrc
17 17 echo a > a
18 18 hg add a
19 19 hg commit -m a -d "1000000 0"
20 20
21 21 hg clone . ../b
22 22 cd ../b
23 23
24 24 # changegroup hooks can see env vars
25 25 echo '[hooks]' > .hg/hgrc
26 26 echo 'prechangegroup = python ../printenv.py prechangegroup' >> .hg/hgrc
27 27 echo 'changegroup = python ../printenv.py changegroup' >> .hg/hgrc
28 28 echo 'incoming = python ../printenv.py incoming' >> .hg/hgrc
29 29
30 30 # pretxncommit and commit hooks can see both parents of merge
31 31 cd ../a
32 32 echo b >> a
33 33 hg commit -m a1 -d "1 0"
34 34 hg update -C 0
35 35 echo b > b
36 36 hg add b
37 37 hg commit -m b -d '1 0'
38 38 hg merge 1
39 39 hg commit -m merge -d '2 0'
40 40
41 41 # test generic hooks
42 42 hg id
43 43 hg cat b
44 44
45 45 cd ../b
46 46 hg pull ../a
47 47
48 48 # tag hooks can see env vars
49 49 cd ../a
50 50 echo 'pretag = python ../printenv.py pretag' >> .hg/hgrc
51 51 echo 'tag = unset HG_PARENT1 HG_PARENT2; python ../printenv.py tag' >> .hg/hgrc
52 52 hg tag -d '3 0' a
53 53 hg tag -l la
54 54
55 55 # pretag hook can forbid tagging
56 56 echo 'pretag.forbid = python ../printenv.py pretag.forbid 1' >> .hg/hgrc
57 57 hg tag -d '4 0' fa
58 58 hg tag -l fla
59 59
60 60 # pretxncommit hook can see changeset, can roll back txn, changeset
61 61 # no more there after
62 62 echo 'pretxncommit.forbid0 = hg tip -q' >> .hg/hgrc
63 63 echo 'pretxncommit.forbid1 = python ../printenv.py pretxncommit.forbid 1' >> .hg/hgrc
64 64 echo z > z
65 65 hg add z
66 66 hg -q tip
67 67 hg commit -m 'fail' -d '4 0'
68 68 hg -q tip
69 69
70 70 # precommit hook can prevent commit
71 71 echo 'precommit.forbid = python ../printenv.py precommit.forbid 1' >> .hg/hgrc
72 72 hg commit -m 'fail' -d '4 0'
73 73 hg -q tip
74 74
75 75 # preupdate hook can prevent update
76 76 echo 'preupdate = python ../printenv.py preupdate' >> .hg/hgrc
77 77 hg update 1
78 78
79 79 # update hook
80 80 echo 'update = python ../printenv.py update' >> .hg/hgrc
81 81 hg update
82 82
83 83 # prechangegroup hook can prevent incoming changes
84 84 cd ../b
85 85 hg -q tip
86 86 echo '[hooks]' > .hg/hgrc
87 87 echo 'prechangegroup.forbid = python ../printenv.py prechangegroup.forbid 1' >> .hg/hgrc
88 88 hg pull ../a
89 89
90 90 # pretxnchangegroup hook can see incoming changes, can roll back txn,
91 91 # incoming changes no longer there after
92 92 echo '[hooks]' > .hg/hgrc
93 93 echo 'pretxnchangegroup.forbid0 = hg tip -q' >> .hg/hgrc
94 94 echo 'pretxnchangegroup.forbid1 = python ../printenv.py pretxnchangegroup.forbid 1' >> .hg/hgrc
95 95 hg pull ../a
96 96 hg -q tip
97 97
98 98 # outgoing hooks can see env vars
99 99 rm .hg/hgrc
100 100 echo '[hooks]' > ../a/.hg/hgrc
101 101 echo 'preoutgoing = python ../printenv.py preoutgoing' >> ../a/.hg/hgrc
102 102 echo 'outgoing = python ../printenv.py outgoing' >> ../a/.hg/hgrc
103 103 hg pull ../a
104 104 hg rollback
105 105
106 106 # preoutgoing hook can prevent outgoing changes
107 107 echo 'preoutgoing.forbid = python ../printenv.py preoutgoing.forbid 1' >> ../a/.hg/hgrc
108 108 hg pull ../a
109 109
110 110 # outgoing hooks work for local clones
111 111 cd ..
112 112 echo '[hooks]' > a/.hg/hgrc
113 113 echo 'preoutgoing = python ../printenv.py preoutgoing' >> a/.hg/hgrc
114 114 echo 'outgoing = python ../printenv.py outgoing' >> a/.hg/hgrc
115 115 hg clone a c
116 116 rm -rf c
117 117
118 118 # preoutgoing hook can prevent outgoing changes for local clones
119 119 echo 'preoutgoing.forbid = python ../printenv.py preoutgoing.forbid 1' >> a/.hg/hgrc
120 120 hg clone a zzz
121 121 cd b
122 122
123 123 cat > hooktests.py <<EOF
124 124 from mercurial import util
125 125
126 126 uncallable = 0
127 127
128 128 def printargs(args):
129 129 args.pop('ui', None)
130 130 args.pop('repo', None)
131 131 a = list(args.items())
132 132 a.sort()
133 133 print 'hook args:'
134 134 for k, v in a:
135 135 print ' ', k, v
136 136
137 137 def passhook(**args):
138 138 printargs(args)
139 139
140 140 def failhook(**args):
141 141 printargs(args)
142 142 return True
143 143
144 144 class LocalException(Exception):
145 145 pass
146 146
147 147 def raisehook(**args):
148 148 raise LocalException('exception from hook')
149 149
150 150 def aborthook(**args):
151 151 raise util.Abort('raise abort from hook')
152 152
153 153 def brokenhook(**args):
154 154 return 1 + {}
155 155
156 156 class container:
157 157 unreachable = 1
158 158 EOF
159 159
160 160 echo '# test python hooks'
161 161 PYTHONPATH="`pwd`:$PYTHONPATH"
162 162 export PYTHONPATH
163 163
164 164 echo '[hooks]' > ../a/.hg/hgrc
165 165 echo 'preoutgoing.broken = python:hooktests.brokenhook' >> ../a/.hg/hgrc
166 166 hg pull ../a 2>&1 | grep 'raised an exception'
167 167
168 168 echo '[hooks]' > ../a/.hg/hgrc
169 169 echo 'preoutgoing.raise = python:hooktests.raisehook' >> ../a/.hg/hgrc
170 170 hg pull ../a 2>&1 | grep 'raised an exception'
171 171
172 172 echo '[hooks]' > ../a/.hg/hgrc
173 173 echo 'preoutgoing.abort = python:hooktests.aborthook' >> ../a/.hg/hgrc
174 174 hg pull ../a
175 175
176 176 echo '[hooks]' > ../a/.hg/hgrc
177 177 echo 'preoutgoing.fail = python:hooktests.failhook' >> ../a/.hg/hgrc
178 178 hg pull ../a
179 179
180 180 echo '[hooks]' > ../a/.hg/hgrc
181 181 echo 'preoutgoing.uncallable = python:hooktests.uncallable' >> ../a/.hg/hgrc
182 182 hg pull ../a
183 183
184 184 echo '[hooks]' > ../a/.hg/hgrc
185 185 echo 'preoutgoing.nohook = python:hooktests.nohook' >> ../a/.hg/hgrc
186 186 hg pull ../a
187 187
188 188 echo '[hooks]' > ../a/.hg/hgrc
189 189 echo 'preoutgoing.nomodule = python:nomodule' >> ../a/.hg/hgrc
190 190 hg pull ../a
191 191
192 192 echo '[hooks]' > ../a/.hg/hgrc
193 193 echo 'preoutgoing.badmodule = python:nomodule.nowhere' >> ../a/.hg/hgrc
194 194 hg pull ../a
195 195
196 196 echo '[hooks]' > ../a/.hg/hgrc
197 197 echo 'preoutgoing.unreachable = python:hooktests.container.unreachable' >> ../a/.hg/hgrc
198 198 hg pull ../a
199 199
200 200 echo '[hooks]' > ../a/.hg/hgrc
201 201 echo 'preoutgoing.pass = python:hooktests.passhook' >> ../a/.hg/hgrc
202 202 hg pull ../a
203 203
204 204 echo '# make sure --traceback works'
205 205 echo '[hooks]' > .hg/hgrc
206 206 echo 'commit.abort = python:hooktests.aborthook' >> .hg/hgrc
207 207
208 echo a >> a
209 hg --traceback commit -A -m a 2>&1 | grep '^Traceback'
208 echo aa > a
209 hg --traceback commit -d '0 0' -ma 2>&1 | grep '^Traceback'
210 210
211 211 cd ..
212 212 hg init c
213 213 cd c
214 214
215 215 cat > hookext.py <<EOF
216 216 def autohook(**args):
217 217 print "Automatically installed hook"
218 218
219 219 def reposetup(ui, repo):
220 220 repo.ui.setconfig("hooks", "commit.auto", autohook)
221 221 EOF
222 222 echo '[extensions]' >> .hg/hgrc
223 223 echo 'hookext = hookext.py' >> .hg/hgrc
224 224
225 225 touch foo
226 226 hg add foo
227 hg ci -m 'add foo'
227 hg ci -d '0 0' -m 'add foo'
228 228 echo >> foo
229 hg ci --debug -m 'change foo' | sed -e 's/ at .*>/>/'
229 hg ci --debug -d '0 0' -m 'change foo' | sed -e 's/ at .*>/>/'
230 230
231 231 hg showconfig hooks | sed -e 's/ at .*>/>/'
232 232
233 233 echo '# test python hook configured with python:[file]:[hook] syntax'
234 234 cd ..
235 235 mkdir d
236 236 cd d
237 237 hg init repo
238 238 mkdir hooks
239 239
240 240 cd hooks
241 241 cat > testhooks.py <<EOF
242 242 def testhook(**args):
243 243 print 'hook works'
244 244 EOF
245 245 echo '[hooks]' > ../repo/.hg/hgrc
246 246 echo "pre-commit.test = python:`pwd`/testhooks.py:testhook" >> ../repo/.hg/hgrc
247 247
248 248 cd ../repo
249 hg commit
249 hg commit -d '0 0'
250 250
251 251 cd ../../b
252 252 echo '# make sure --traceback works on hook import failure'
253 253 cat > importfail.py <<EOF
254 254 import somebogusmodule
255 255 # dereference something in the module to force demandimport to load it
256 256 somebogusmodule.whatever
257 257 EOF
258 258
259 259 echo '[hooks]' > .hg/hgrc
260 260 echo 'precommit.importfail = python:importfail.whatever' >> .hg/hgrc
261 261
262 262 echo a >> a
263 hg --traceback commit -Ama 2>&1 | egrep '^(exception|Traceback|ImportError)'
263 hg --traceback commit -d '0 0' -ma 2>&1 | egrep '^(exception|Traceback|ImportError)'
264
265 echo '# commit and update hooks should run after command completion (issue 1827)'
266 echo '[hooks]' > .hg/hgrc
267 echo 'commit = hg id' >> .hg/hgrc
268 echo 'update = hg id' >> .hg/hgrc
269 echo bb > a
270 hg ci -d '0 0' -ma
271 hg up 0
264 272
265 273 exit 0
@@ -1,173 +1,177 b''
1 1 precommit hook: HG_PARENT1=0000000000000000000000000000000000000000
2 2 pretxncommit hook: HG_NODE=29b62aeb769fdf78d8d9c5f28b017f76d7ef824b HG_PARENT1=0000000000000000000000000000000000000000 HG_PENDING=$HGTMP/test-hook/a
3 3 0:29b62aeb769f
4 4 commit hook: HG_NODE=29b62aeb769fdf78d8d9c5f28b017f76d7ef824b HG_PARENT1=0000000000000000000000000000000000000000
5 5 commit.b hook: HG_NODE=29b62aeb769fdf78d8d9c5f28b017f76d7ef824b HG_PARENT1=0000000000000000000000000000000000000000
6 6 updating to branch default
7 7 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
8 8 precommit hook: HG_PARENT1=29b62aeb769fdf78d8d9c5f28b017f76d7ef824b
9 9 pretxncommit hook: HG_NODE=b702efe9688826e3a91283852b328b84dbf37bc2 HG_PARENT1=29b62aeb769fdf78d8d9c5f28b017f76d7ef824b HG_PENDING=$HGTMP/test-hook/a
10 10 1:b702efe96888
11 11 commit hook: HG_NODE=b702efe9688826e3a91283852b328b84dbf37bc2 HG_PARENT1=29b62aeb769fdf78d8d9c5f28b017f76d7ef824b
12 12 commit.b hook: HG_NODE=b702efe9688826e3a91283852b328b84dbf37bc2 HG_PARENT1=29b62aeb769fdf78d8d9c5f28b017f76d7ef824b
13 13 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
14 14 precommit hook: HG_PARENT1=29b62aeb769fdf78d8d9c5f28b017f76d7ef824b
15 15 pretxncommit hook: HG_NODE=1324a5531bac09b329c3845d35ae6a7526874edb HG_PARENT1=29b62aeb769fdf78d8d9c5f28b017f76d7ef824b HG_PENDING=$HGTMP/test-hook/a
16 16 2:1324a5531bac
17 17 commit hook: HG_NODE=1324a5531bac09b329c3845d35ae6a7526874edb HG_PARENT1=29b62aeb769fdf78d8d9c5f28b017f76d7ef824b
18 18 commit.b hook: HG_NODE=1324a5531bac09b329c3845d35ae6a7526874edb HG_PARENT1=29b62aeb769fdf78d8d9c5f28b017f76d7ef824b
19 19 created new head
20 20 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
21 21 (branch merge, don't forget to commit)
22 22 precommit hook: HG_PARENT1=1324a5531bac09b329c3845d35ae6a7526874edb HG_PARENT2=b702efe9688826e3a91283852b328b84dbf37bc2
23 23 pretxncommit hook: HG_NODE=4c52fb2e402287dd5dc052090682536c8406c321 HG_PARENT1=1324a5531bac09b329c3845d35ae6a7526874edb HG_PARENT2=b702efe9688826e3a91283852b328b84dbf37bc2 HG_PENDING=$HGTMP/test-hook/a
24 24 3:4c52fb2e4022
25 25 commit hook: HG_NODE=4c52fb2e402287dd5dc052090682536c8406c321 HG_PARENT1=1324a5531bac09b329c3845d35ae6a7526874edb HG_PARENT2=b702efe9688826e3a91283852b328b84dbf37bc2
26 26 commit.b hook: HG_NODE=4c52fb2e402287dd5dc052090682536c8406c321 HG_PARENT1=1324a5531bac09b329c3845d35ae6a7526874edb HG_PARENT2=b702efe9688826e3a91283852b328b84dbf37bc2
27 27 pre-identify hook: HG_ARGS=id
28 28 warning: pre-identify hook exited with status 1
29 29 pre-cat hook: HG_ARGS=cat b
30 30 post-cat hook: HG_ARGS=cat b HG_RESULT=0
31 31 b
32 32 prechangegroup hook: HG_SOURCE=pull HG_URL=file:
33 33 changegroup hook: HG_NODE=b702efe9688826e3a91283852b328b84dbf37bc2 HG_SOURCE=pull HG_URL=file:
34 34 incoming hook: HG_NODE=b702efe9688826e3a91283852b328b84dbf37bc2 HG_SOURCE=pull HG_URL=file:
35 35 incoming hook: HG_NODE=1324a5531bac09b329c3845d35ae6a7526874edb HG_SOURCE=pull HG_URL=file:
36 36 incoming hook: HG_NODE=4c52fb2e402287dd5dc052090682536c8406c321 HG_SOURCE=pull HG_URL=file:
37 37 pulling from ../a
38 38 searching for changes
39 39 adding changesets
40 40 adding manifests
41 41 adding file changes
42 42 added 3 changesets with 2 changes to 2 files
43 43 (run 'hg update' to get a working copy)
44 44 pretag hook: HG_LOCAL=0 HG_NODE=4c52fb2e402287dd5dc052090682536c8406c321 HG_TAG=a
45 45 precommit hook: HG_PARENT1=4c52fb2e402287dd5dc052090682536c8406c321
46 46 pretxncommit hook: HG_NODE=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198 HG_PARENT1=4c52fb2e402287dd5dc052090682536c8406c321 HG_PENDING=$HGTMP/test-hook/a
47 47 4:8ea2ef7ad3e8
48 48 commit hook: HG_NODE=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198 HG_PARENT1=4c52fb2e402287dd5dc052090682536c8406c321
49 49 commit.b hook: HG_NODE=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198 HG_PARENT1=4c52fb2e402287dd5dc052090682536c8406c321
50 50 tag hook: HG_LOCAL=0 HG_NODE=4c52fb2e402287dd5dc052090682536c8406c321 HG_TAG=a
51 51 pretag hook: HG_LOCAL=1 HG_NODE=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198 HG_TAG=la
52 52 tag hook: HG_LOCAL=1 HG_NODE=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198 HG_TAG=la
53 53 pretag hook: HG_LOCAL=0 HG_NODE=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198 HG_TAG=fa
54 54 pretag.forbid hook: HG_LOCAL=0 HG_NODE=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198 HG_TAG=fa
55 55 abort: pretag.forbid hook exited with status 1
56 56 pretag hook: HG_LOCAL=1 HG_NODE=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198 HG_TAG=fla
57 57 pretag.forbid hook: HG_LOCAL=1 HG_NODE=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198 HG_TAG=fla
58 58 abort: pretag.forbid hook exited with status 1
59 59 4:8ea2ef7ad3e8
60 60 precommit hook: HG_PARENT1=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198
61 61 pretxncommit hook: HG_NODE=fad284daf8c032148abaffcd745dafeceefceb61 HG_PARENT1=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198 HG_PENDING=$HGTMP/test-hook/a
62 62 5:fad284daf8c0
63 63 5:fad284daf8c0
64 64 pretxncommit.forbid hook: HG_NODE=fad284daf8c032148abaffcd745dafeceefceb61 HG_PARENT1=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198 HG_PENDING=$HGTMP/test-hook/a
65 65 transaction abort!
66 66 rollback completed
67 67 abort: pretxncommit.forbid1 hook exited with status 1
68 68 4:8ea2ef7ad3e8
69 69 precommit hook: HG_PARENT1=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198
70 70 precommit.forbid hook: HG_PARENT1=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198
71 71 abort: precommit.forbid hook exited with status 1
72 72 4:8ea2ef7ad3e8
73 73 preupdate hook: HG_PARENT1=b702efe96888
74 74 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
75 75 preupdate hook: HG_PARENT1=8ea2ef7ad3e8
76 76 update hook: HG_ERROR=0 HG_PARENT1=8ea2ef7ad3e8
77 77 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
78 78 3:4c52fb2e4022
79 79 prechangegroup.forbid hook: HG_SOURCE=pull HG_URL=file:
80 80 pulling from ../a
81 81 searching for changes
82 82 abort: prechangegroup.forbid hook exited with status 1
83 83 4:8ea2ef7ad3e8
84 84 pretxnchangegroup.forbid hook: HG_NODE=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198 HG_PENDING=$HGTMP/test-hook/b HG_SOURCE=pull HG_URL=file:
85 85 pulling from ../a
86 86 searching for changes
87 87 adding changesets
88 88 adding manifests
89 89 adding file changes
90 90 added 1 changesets with 1 changes to 1 files
91 91 transaction abort!
92 92 rollback completed
93 93 abort: pretxnchangegroup.forbid1 hook exited with status 1
94 94 3:4c52fb2e4022
95 95 preoutgoing hook: HG_SOURCE=pull
96 96 outgoing hook: HG_NODE=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198 HG_SOURCE=pull
97 97 pulling from ../a
98 98 searching for changes
99 99 adding changesets
100 100 adding manifests
101 101 adding file changes
102 102 added 1 changesets with 1 changes to 1 files
103 103 (run 'hg update' to get a working copy)
104 104 rolling back last transaction
105 105 preoutgoing hook: HG_SOURCE=pull
106 106 preoutgoing.forbid hook: HG_SOURCE=pull
107 107 pulling from ../a
108 108 searching for changes
109 109 abort: preoutgoing.forbid hook exited with status 1
110 110 preoutgoing hook: HG_SOURCE=clone
111 111 outgoing hook: HG_NODE=0000000000000000000000000000000000000000 HG_SOURCE=clone
112 112 updating to branch default
113 113 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
114 114 preoutgoing hook: HG_SOURCE=clone
115 115 preoutgoing.forbid hook: HG_SOURCE=clone
116 116 abort: preoutgoing.forbid hook exited with status 1
117 117 # test python hooks
118 118 error: preoutgoing.broken hook raised an exception: unsupported operand type(s) for +: 'int' and 'dict'
119 119 error: preoutgoing.raise hook raised an exception: exception from hook
120 120 pulling from ../a
121 121 searching for changes
122 122 error: preoutgoing.abort hook failed: raise abort from hook
123 123 abort: raise abort from hook
124 124 pulling from ../a
125 125 searching for changes
126 126 hook args:
127 127 hooktype preoutgoing
128 128 source pull
129 129 abort: preoutgoing.fail hook failed
130 130 pulling from ../a
131 131 searching for changes
132 132 abort: preoutgoing.uncallable hook is invalid ("hooktests.uncallable" is not callable)
133 133 pulling from ../a
134 134 searching for changes
135 135 abort: preoutgoing.nohook hook is invalid ("hooktests.nohook" is not defined)
136 136 pulling from ../a
137 137 searching for changes
138 138 abort: preoutgoing.nomodule hook is invalid ("nomodule" not in a module)
139 139 pulling from ../a
140 140 searching for changes
141 141 abort: preoutgoing.badmodule hook is invalid (import of "nomodule" failed)
142 142 pulling from ../a
143 143 searching for changes
144 144 abort: preoutgoing.unreachable hook is invalid (import of "hooktests.container" failed)
145 145 pulling from ../a
146 146 searching for changes
147 147 hook args:
148 148 hooktype preoutgoing
149 149 source pull
150 150 adding changesets
151 151 adding manifests
152 152 adding file changes
153 153 added 1 changesets with 1 changes to 1 files
154 154 (run 'hg update' to get a working copy)
155 155 # make sure --traceback works
156 156 Traceback (most recent call last):
157 157 Automatically installed hook
158 158 foo
159 159 calling hook commit.auto: <function autohook>
160 160 Automatically installed hook
161 161 committed changeset 1:52998019f6252a2b893452765fcb0a47351a5708
162 162 hooks.commit.auto=<function autohook>
163 163 # test python hook configured with python:[file]:[hook] syntax
164 164 hook works
165 165 nothing changed
166 166 # make sure --traceback works on hook import failure
167 167 exception from first failed import attempt:
168 168 Traceback (most recent call last):
169 169 ImportError: No module named somebogusmodule
170 170 exception from second failed import attempt:
171 171 Traceback (most recent call last):
172 172 ImportError: No module named hgext_importfail
173 173 Traceback (most recent call last):
174 # commit and update hooks should run after command completion (issue 1827)
175 8da618c33484 tip
176 29b62aeb769f
177 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
General Comments 0
You need to be logged in to leave comments. Login now