##// END OF EJS Templates
Handle functions as the value of a hooks.<name> config variable...
Alexis S. L. Carvalho -
r4070:961ccb61 default
parent child Browse files
Show More
@@ -1,1903 +1,1907 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import _
10 10 import repo, appendfile, changegroup
11 11 import changelog, dirstate, filelog, manifest, context
12 12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
13 13 import os, revlog, time, util
14 14
15 15 class localrepository(repo.repository):
16 16 capabilities = ('lookup', 'changegroupsubset')
17 17 supported = ('revlogv1', 'store')
18 18
19 19 def __del__(self):
20 20 self.transhandle = None
21 21 def __init__(self, parentui, path=None, create=0):
22 22 repo.repository.__init__(self)
23 23 if not path:
24 24 p = os.getcwd()
25 25 while not os.path.isdir(os.path.join(p, ".hg")):
26 26 oldp = p
27 27 p = os.path.dirname(p)
28 28 if p == oldp:
29 29 raise repo.RepoError(_("There is no Mercurial repository"
30 30 " here (.hg not found)"))
31 31 path = p
32 32
33 33 self.path = os.path.join(path, ".hg")
34 34 self.root = os.path.realpath(path)
35 35 self.origroot = path
36 36 self.opener = util.opener(self.path)
37 37 self.wopener = util.opener(self.root)
38 38
39 39 if not os.path.isdir(self.path):
40 40 if create:
41 41 if not os.path.exists(path):
42 42 os.mkdir(path)
43 43 os.mkdir(self.path)
44 44 os.mkdir(os.path.join(self.path, "store"))
45 45 requirements = ("revlogv1", "store")
46 46 reqfile = self.opener("requires", "w")
47 47 for r in requirements:
48 48 reqfile.write("%s\n" % r)
49 49 reqfile.close()
50 50 # create an invalid changelog
51 51 self.opener("00changelog.i", "a").write(
52 52 '\0\0\0\2' # represents revlogv2
53 53 ' dummy changelog to prevent using the old repo layout'
54 54 )
55 55 else:
56 56 raise repo.RepoError(_("repository %s not found") % path)
57 57 elif create:
58 58 raise repo.RepoError(_("repository %s already exists") % path)
59 59 else:
60 60 # find requirements
61 61 try:
62 62 requirements = self.opener("requires").read().splitlines()
63 63 except IOError, inst:
64 64 if inst.errno != errno.ENOENT:
65 65 raise
66 66 requirements = []
67 67 # check them
68 68 for r in requirements:
69 69 if r not in self.supported:
70 70 raise repo.RepoError(_("requirement '%s' not supported") % r)
71 71
72 72 # setup store
73 73 if "store" in requirements:
74 74 self.encodefn = util.encodefilename
75 75 self.decodefn = util.decodefilename
76 76 self.spath = os.path.join(self.path, "store")
77 77 else:
78 78 self.encodefn = lambda x: x
79 79 self.decodefn = lambda x: x
80 80 self.spath = self.path
81 81 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
82 82
83 83 self.ui = ui.ui(parentui=parentui)
84 84 try:
85 85 self.ui.readconfig(self.join("hgrc"), self.root)
86 86 except IOError:
87 87 pass
88 88
89 89 v = self.ui.configrevlog()
90 90 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
91 91 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
92 92 fl = v.get('flags', None)
93 93 flags = 0
94 94 if fl != None:
95 95 for x in fl.split():
96 96 flags |= revlog.flagstr(x)
97 97 elif self.revlogv1:
98 98 flags = revlog.REVLOG_DEFAULT_FLAGS
99 99
100 100 v = self.revlogversion | flags
101 101 self.manifest = manifest.manifest(self.sopener, v)
102 102 self.changelog = changelog.changelog(self.sopener, v)
103 103
104 104 fallback = self.ui.config('ui', 'fallbackencoding')
105 105 if fallback:
106 106 util._fallbackencoding = fallback
107 107
108 108 # the changelog might not have the inline index flag
109 109 # on. If the format of the changelog is the same as found in
110 110 # .hgrc, apply any flags found in the .hgrc as well.
111 111 # Otherwise, just version from the changelog
112 112 v = self.changelog.version
113 113 if v == self.revlogversion:
114 114 v |= flags
115 115 self.revlogversion = v
116 116
117 117 self.tagscache = None
118 118 self.branchcache = None
119 119 self.nodetagscache = None
120 120 self.filterpats = {}
121 121 self.transhandle = None
122 122
123 123 self._link = lambda x: False
124 124 if util.checklink(self.root):
125 125 r = self.root # avoid circular reference in lambda
126 126 self._link = lambda x: util.is_link(os.path.join(r, x))
127 127
128 128 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
129 129
130 130 def url(self):
131 131 return 'file:' + self.root
132 132
133 133 def hook(self, name, throw=False, **args):
134 134 def callhook(hname, funcname):
135 135 '''call python hook. hook is callable object, looked up as
136 136 name in python module. if callable returns "true", hook
137 137 fails, else passes. if hook raises exception, treated as
138 138 hook failure. exception propagates if throw is "true".
139 139
140 140 reason for "true" meaning "hook failed" is so that
141 141 unmodified commands (e.g. mercurial.commands.update) can
142 142 be run as hooks without wrappers to convert return values.'''
143 143
144 144 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
145 d = funcname.rfind('.')
146 if d == -1:
147 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
148 % (hname, funcname))
149 modname = funcname[:d]
150 try:
151 obj = __import__(modname)
152 except ImportError:
145 obj = funcname
146 if not callable(obj):
147 d = funcname.rfind('.')
148 if d == -1:
149 raise util.Abort(_('%s hook is invalid ("%s" not in '
150 'a module)') % (hname, funcname))
151 modname = funcname[:d]
153 152 try:
154 # extensions are loaded with hgext_ prefix
155 obj = __import__("hgext_%s" % modname)
153 obj = __import__(modname)
156 154 except ImportError:
155 try:
156 # extensions are loaded with hgext_ prefix
157 obj = __import__("hgext_%s" % modname)
158 except ImportError:
159 raise util.Abort(_('%s hook is invalid '
160 '(import of "%s" failed)') %
161 (hname, modname))
162 try:
163 for p in funcname.split('.')[1:]:
164 obj = getattr(obj, p)
165 except AttributeError, err:
157 166 raise util.Abort(_('%s hook is invalid '
158 '(import of "%s" failed)') %
159 (hname, modname))
160 try:
161 for p in funcname.split('.')[1:]:
162 obj = getattr(obj, p)
163 except AttributeError, err:
164 raise util.Abort(_('%s hook is invalid '
165 '("%s" is not defined)') %
166 (hname, funcname))
167 if not callable(obj):
168 raise util.Abort(_('%s hook is invalid '
169 '("%s" is not callable)') %
170 (hname, funcname))
167 '("%s" is not defined)') %
168 (hname, funcname))
169 if not callable(obj):
170 raise util.Abort(_('%s hook is invalid '
171 '("%s" is not callable)') %
172 (hname, funcname))
171 173 try:
172 174 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
173 175 except (KeyboardInterrupt, util.SignalInterrupt):
174 176 raise
175 177 except Exception, exc:
176 178 if isinstance(exc, util.Abort):
177 179 self.ui.warn(_('error: %s hook failed: %s\n') %
178 180 (hname, exc.args[0]))
179 181 else:
180 182 self.ui.warn(_('error: %s hook raised an exception: '
181 183 '%s\n') % (hname, exc))
182 184 if throw:
183 185 raise
184 186 self.ui.print_exc()
185 187 return True
186 188 if r:
187 189 if throw:
188 190 raise util.Abort(_('%s hook failed') % hname)
189 191 self.ui.warn(_('warning: %s hook failed\n') % hname)
190 192 return r
191 193
192 194 def runhook(name, cmd):
193 195 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
194 196 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
195 197 r = util.system(cmd, environ=env, cwd=self.root)
196 198 if r:
197 199 desc, r = util.explain_exit(r)
198 200 if throw:
199 201 raise util.Abort(_('%s hook %s') % (name, desc))
200 202 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
201 203 return r
202 204
203 205 r = False
204 206 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
205 207 if hname.split(".", 1)[0] == name and cmd]
206 208 hooks.sort()
207 209 for hname, cmd in hooks:
208 if cmd.startswith('python:'):
210 if callable(cmd):
211 r = callhook(hname, cmd) or r
212 elif cmd.startswith('python:'):
209 213 r = callhook(hname, cmd[7:].strip()) or r
210 214 else:
211 215 r = runhook(hname, cmd) or r
212 216 return r
213 217
214 218 tag_disallowed = ':\r\n'
215 219
216 220 def tag(self, name, node, message, local, user, date):
217 221 '''tag a revision with a symbolic name.
218 222
219 223 if local is True, the tag is stored in a per-repository file.
220 224 otherwise, it is stored in the .hgtags file, and a new
221 225 changeset is committed with the change.
222 226
223 227 keyword arguments:
224 228
225 229 local: whether to store tag in non-version-controlled file
226 230 (default False)
227 231
228 232 message: commit message to use if committing
229 233
230 234 user: name of user to use if committing
231 235
232 236 date: date tuple to use if committing'''
233 237
234 238 for c in self.tag_disallowed:
235 239 if c in name:
236 240 raise util.Abort(_('%r cannot be used in a tag name') % c)
237 241
238 242 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
239 243
240 244 if local:
241 245 # local tags are stored in the current charset
242 246 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
243 247 self.hook('tag', node=hex(node), tag=name, local=local)
244 248 return
245 249
246 250 for x in self.status()[:5]:
247 251 if '.hgtags' in x:
248 252 raise util.Abort(_('working copy of .hgtags is changed '
249 253 '(please commit .hgtags manually)'))
250 254
251 255 # committed tags are stored in UTF-8
252 256 line = '%s %s\n' % (hex(node), util.fromlocal(name))
253 257 self.wfile('.hgtags', 'ab').write(line)
254 258 if self.dirstate.state('.hgtags') == '?':
255 259 self.add(['.hgtags'])
256 260
257 261 self.commit(['.hgtags'], message, user, date)
258 262 self.hook('tag', node=hex(node), tag=name, local=local)
259 263
260 264 def tags(self):
261 265 '''return a mapping of tag to node'''
262 266 if not self.tagscache:
263 267 self.tagscache = {}
264 268
265 269 def parsetag(line, context):
266 270 if not line:
267 271 return
268 272 s = l.split(" ", 1)
269 273 if len(s) != 2:
270 274 self.ui.warn(_("%s: cannot parse entry\n") % context)
271 275 return
272 276 node, key = s
273 277 key = util.tolocal(key.strip()) # stored in UTF-8
274 278 try:
275 279 bin_n = bin(node)
276 280 except TypeError:
277 281 self.ui.warn(_("%s: node '%s' is not well formed\n") %
278 282 (context, node))
279 283 return
280 284 if bin_n not in self.changelog.nodemap:
281 285 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
282 286 (context, key))
283 287 return
284 288 self.tagscache[key] = bin_n
285 289
286 290 # read the tags file from each head, ending with the tip,
287 291 # and add each tag found to the map, with "newer" ones
288 292 # taking precedence
289 293 f = None
290 294 for rev, node, fnode in self._hgtagsnodes():
291 295 f = (f and f.filectx(fnode) or
292 296 self.filectx('.hgtags', fileid=fnode))
293 297 count = 0
294 298 for l in f.data().splitlines():
295 299 count += 1
296 300 parsetag(l, _("%s, line %d") % (str(f), count))
297 301
298 302 try:
299 303 f = self.opener("localtags")
300 304 count = 0
301 305 for l in f:
302 306 # localtags are stored in the local character set
303 307 # while the internal tag table is stored in UTF-8
304 308 l = util.fromlocal(l)
305 309 count += 1
306 310 parsetag(l, _("localtags, line %d") % count)
307 311 except IOError:
308 312 pass
309 313
310 314 self.tagscache['tip'] = self.changelog.tip()
311 315
312 316 return self.tagscache
313 317
314 318 def _hgtagsnodes(self):
315 319 heads = self.heads()
316 320 heads.reverse()
317 321 last = {}
318 322 ret = []
319 323 for node in heads:
320 324 c = self.changectx(node)
321 325 rev = c.rev()
322 326 try:
323 327 fnode = c.filenode('.hgtags')
324 328 except revlog.LookupError:
325 329 continue
326 330 ret.append((rev, node, fnode))
327 331 if fnode in last:
328 332 ret[last[fnode]] = None
329 333 last[fnode] = len(ret) - 1
330 334 return [item for item in ret if item]
331 335
332 336 def tagslist(self):
333 337 '''return a list of tags ordered by revision'''
334 338 l = []
335 339 for t, n in self.tags().items():
336 340 try:
337 341 r = self.changelog.rev(n)
338 342 except:
339 343 r = -2 # sort to the beginning of the list if unknown
340 344 l.append((r, t, n))
341 345 l.sort()
342 346 return [(t, n) for r, t, n in l]
343 347
344 348 def nodetags(self, node):
345 349 '''return the tags associated with a node'''
346 350 if not self.nodetagscache:
347 351 self.nodetagscache = {}
348 352 for t, n in self.tags().items():
349 353 self.nodetagscache.setdefault(n, []).append(t)
350 354 return self.nodetagscache.get(node, [])
351 355
352 356 def _branchtags(self):
353 357 partial, last, lrev = self._readbranchcache()
354 358
355 359 tiprev = self.changelog.count() - 1
356 360 if lrev != tiprev:
357 361 self._updatebranchcache(partial, lrev+1, tiprev+1)
358 362 self._writebranchcache(partial, self.changelog.tip(), tiprev)
359 363
360 364 return partial
361 365
362 366 def branchtags(self):
363 367 if self.branchcache is not None:
364 368 return self.branchcache
365 369
366 370 self.branchcache = {} # avoid recursion in changectx
367 371 partial = self._branchtags()
368 372
369 373 # the branch cache is stored on disk as UTF-8, but in the local
370 374 # charset internally
371 375 for k, v in partial.items():
372 376 self.branchcache[util.tolocal(k)] = v
373 377 return self.branchcache
374 378
375 379 def _readbranchcache(self):
376 380 partial = {}
377 381 try:
378 382 f = self.opener("branches.cache")
379 383 lines = f.read().split('\n')
380 384 f.close()
381 385 last, lrev = lines.pop(0).rstrip().split(" ", 1)
382 386 last, lrev = bin(last), int(lrev)
383 387 if not (lrev < self.changelog.count() and
384 388 self.changelog.node(lrev) == last): # sanity check
385 389 # invalidate the cache
386 390 raise ValueError('Invalid branch cache: unknown tip')
387 391 for l in lines:
388 392 if not l: continue
389 393 node, label = l.rstrip().split(" ", 1)
390 394 partial[label] = bin(node)
391 395 except (KeyboardInterrupt, util.SignalInterrupt):
392 396 raise
393 397 except Exception, inst:
394 398 if self.ui.debugflag:
395 399 self.ui.warn(str(inst), '\n')
396 400 partial, last, lrev = {}, nullid, nullrev
397 401 return partial, last, lrev
398 402
399 403 def _writebranchcache(self, branches, tip, tiprev):
400 404 try:
401 405 f = self.opener("branches.cache", "w")
402 406 f.write("%s %s\n" % (hex(tip), tiprev))
403 407 for label, node in branches.iteritems():
404 408 f.write("%s %s\n" % (hex(node), label))
405 409 except IOError:
406 410 pass
407 411
408 412 def _updatebranchcache(self, partial, start, end):
409 413 for r in xrange(start, end):
410 414 c = self.changectx(r)
411 415 b = c.branch()
412 416 if b:
413 417 partial[b] = c.node()
414 418
415 419 def lookup(self, key):
416 420 if key == '.':
417 421 key = self.dirstate.parents()[0]
418 422 if key == nullid:
419 423 raise repo.RepoError(_("no revision checked out"))
420 424 elif key == 'null':
421 425 return nullid
422 426 n = self.changelog._match(key)
423 427 if n:
424 428 return n
425 429 if key in self.tags():
426 430 return self.tags()[key]
427 431 if key in self.branchtags():
428 432 return self.branchtags()[key]
429 433 n = self.changelog._partialmatch(key)
430 434 if n:
431 435 return n
432 436 raise repo.RepoError(_("unknown revision '%s'") % key)
433 437
434 438 def dev(self):
435 439 return os.lstat(self.path).st_dev
436 440
437 441 def local(self):
438 442 return True
439 443
440 444 def join(self, f):
441 445 return os.path.join(self.path, f)
442 446
443 447 def sjoin(self, f):
444 448 f = self.encodefn(f)
445 449 return os.path.join(self.spath, f)
446 450
447 451 def wjoin(self, f):
448 452 return os.path.join(self.root, f)
449 453
450 454 def file(self, f):
451 455 if f[0] == '/':
452 456 f = f[1:]
453 457 return filelog.filelog(self.sopener, f, self.revlogversion)
454 458
455 459 def changectx(self, changeid=None):
456 460 return context.changectx(self, changeid)
457 461
458 462 def workingctx(self):
459 463 return context.workingctx(self)
460 464
461 465 def parents(self, changeid=None):
462 466 '''
463 467 get list of changectxs for parents of changeid or working directory
464 468 '''
465 469 if changeid is None:
466 470 pl = self.dirstate.parents()
467 471 else:
468 472 n = self.changelog.lookup(changeid)
469 473 pl = self.changelog.parents(n)
470 474 if pl[1] == nullid:
471 475 return [self.changectx(pl[0])]
472 476 return [self.changectx(pl[0]), self.changectx(pl[1])]
473 477
474 478 def filectx(self, path, changeid=None, fileid=None):
475 479 """changeid can be a changeset revision, node, or tag.
476 480 fileid can be a file revision or node."""
477 481 return context.filectx(self, path, changeid, fileid)
478 482
479 483 def getcwd(self):
480 484 return self.dirstate.getcwd()
481 485
482 486 def wfile(self, f, mode='r'):
483 487 return self.wopener(f, mode)
484 488
485 489 def _filter(self, filter, filename, data):
486 490 if filter not in self.filterpats:
487 491 l = []
488 492 for pat, cmd in self.ui.configitems(filter):
489 493 mf = util.matcher(self.root, "", [pat], [], [])[1]
490 494 l.append((mf, cmd))
491 495 self.filterpats[filter] = l
492 496
493 497 for mf, cmd in self.filterpats[filter]:
494 498 if mf(filename):
495 499 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
496 500 data = util.filter(data, cmd)
497 501 break
498 502
499 503 return data
500 504
501 505 def wread(self, filename):
502 506 if self._link(filename):
503 507 data = os.readlink(self.wjoin(filename))
504 508 else:
505 509 data = self.wopener(filename, 'r').read()
506 510 return self._filter("encode", filename, data)
507 511
508 512 def wwrite(self, filename, data, flags):
509 513 data = self._filter("decode", filename, data)
510 514 if "l" in flags:
511 515 try:
512 516 os.unlink(self.wjoin(filename))
513 517 except OSError:
514 518 pass
515 519 os.symlink(data, self.wjoin(filename))
516 520 else:
517 521 try:
518 522 if self._link(filename):
519 523 os.unlink(self.wjoin(filename))
520 524 except OSError:
521 525 pass
522 526 self.wopener(filename, 'w').write(data)
523 527 util.set_exec(self.wjoin(filename), "x" in flags)
524 528
525 529 def wwritedata(self, filename, data):
526 530 return self._filter("decode", filename, data)
527 531
528 532 def transaction(self):
529 533 tr = self.transhandle
530 534 if tr != None and tr.running():
531 535 return tr.nest()
532 536
533 537 # save dirstate for rollback
534 538 try:
535 539 ds = self.opener("dirstate").read()
536 540 except IOError:
537 541 ds = ""
538 542 self.opener("journal.dirstate", "w").write(ds)
539 543
540 544 renames = [(self.sjoin("journal"), self.sjoin("undo")),
541 545 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
542 546 tr = transaction.transaction(self.ui.warn, self.sopener,
543 547 self.sjoin("journal"),
544 548 aftertrans(renames))
545 549 self.transhandle = tr
546 550 return tr
547 551
548 552 def recover(self):
549 553 l = self.lock()
550 554 if os.path.exists(self.sjoin("journal")):
551 555 self.ui.status(_("rolling back interrupted transaction\n"))
552 556 transaction.rollback(self.sopener, self.sjoin("journal"))
553 557 self.reload()
554 558 return True
555 559 else:
556 560 self.ui.warn(_("no interrupted transaction available\n"))
557 561 return False
558 562
559 563 def rollback(self, wlock=None):
560 564 if not wlock:
561 565 wlock = self.wlock()
562 566 l = self.lock()
563 567 if os.path.exists(self.sjoin("undo")):
564 568 self.ui.status(_("rolling back last transaction\n"))
565 569 transaction.rollback(self.sopener, self.sjoin("undo"))
566 570 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
567 571 self.reload()
568 572 self.wreload()
569 573 else:
570 574 self.ui.warn(_("no rollback information available\n"))
571 575
572 576 def wreload(self):
573 577 self.dirstate.read()
574 578
575 579 def reload(self):
576 580 self.changelog.load()
577 581 self.manifest.load()
578 582 self.tagscache = None
579 583 self.nodetagscache = None
580 584
581 585 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
582 586 desc=None):
583 587 try:
584 588 l = lock.lock(lockname, 0, releasefn, desc=desc)
585 589 except lock.LockHeld, inst:
586 590 if not wait:
587 591 raise
588 592 self.ui.warn(_("waiting for lock on %s held by %r\n") %
589 593 (desc, inst.locker))
590 594 # default to 600 seconds timeout
591 595 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
592 596 releasefn, desc=desc)
593 597 if acquirefn:
594 598 acquirefn()
595 599 return l
596 600
597 601 def lock(self, wait=1):
598 602 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
599 603 desc=_('repository %s') % self.origroot)
600 604
601 605 def wlock(self, wait=1):
602 606 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
603 607 self.wreload,
604 608 desc=_('working directory of %s') % self.origroot)
605 609
606 610 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
607 611 """
608 612 commit an individual file as part of a larger transaction
609 613 """
610 614
611 615 t = self.wread(fn)
612 616 fl = self.file(fn)
613 617 fp1 = manifest1.get(fn, nullid)
614 618 fp2 = manifest2.get(fn, nullid)
615 619
616 620 meta = {}
617 621 cp = self.dirstate.copied(fn)
618 622 if cp:
619 623 # Mark the new revision of this file as a copy of another
620 624 # file. This copy data will effectively act as a parent
621 625 # of this new revision. If this is a merge, the first
622 626 # parent will be the nullid (meaning "look up the copy data")
623 627 # and the second one will be the other parent. For example:
624 628 #
625 629 # 0 --- 1 --- 3 rev1 changes file foo
626 630 # \ / rev2 renames foo to bar and changes it
627 631 # \- 2 -/ rev3 should have bar with all changes and
628 632 # should record that bar descends from
629 633 # bar in rev2 and foo in rev1
630 634 #
631 635 # this allows this merge to succeed:
632 636 #
633 637 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
634 638 # \ / merging rev3 and rev4 should use bar@rev2
635 639 # \- 2 --- 4 as the merge base
636 640 #
637 641 meta["copy"] = cp
638 642 if not manifest2: # not a branch merge
639 643 meta["copyrev"] = hex(manifest1.get(cp, nullid))
640 644 fp2 = nullid
641 645 elif fp2 != nullid: # copied on remote side
642 646 meta["copyrev"] = hex(manifest1.get(cp, nullid))
643 647 elif fp1 != nullid: # copied on local side, reversed
644 648 meta["copyrev"] = hex(manifest2.get(cp))
645 649 fp2 = fp1
646 650 else: # directory rename
647 651 meta["copyrev"] = hex(manifest1.get(cp, nullid))
648 652 self.ui.debug(_(" %s: copy %s:%s\n") %
649 653 (fn, cp, meta["copyrev"]))
650 654 fp1 = nullid
651 655 elif fp2 != nullid:
652 656 # is one parent an ancestor of the other?
653 657 fpa = fl.ancestor(fp1, fp2)
654 658 if fpa == fp1:
655 659 fp1, fp2 = fp2, nullid
656 660 elif fpa == fp2:
657 661 fp2 = nullid
658 662
659 663 # is the file unmodified from the parent? report existing entry
660 664 if fp2 == nullid and not fl.cmp(fp1, t):
661 665 return fp1
662 666
663 667 changelist.append(fn)
664 668 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
665 669
666 670 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
667 671 if p1 is None:
668 672 p1, p2 = self.dirstate.parents()
669 673 return self.commit(files=files, text=text, user=user, date=date,
670 674 p1=p1, p2=p2, wlock=wlock, extra=extra)
671 675
672 676 def commit(self, files=None, text="", user=None, date=None,
673 677 match=util.always, force=False, lock=None, wlock=None,
674 678 force_editor=False, p1=None, p2=None, extra={}):
675 679
676 680 commit = []
677 681 remove = []
678 682 changed = []
679 683 use_dirstate = (p1 is None) # not rawcommit
680 684 extra = extra.copy()
681 685
682 686 if use_dirstate:
683 687 if files:
684 688 for f in files:
685 689 s = self.dirstate.state(f)
686 690 if s in 'nmai':
687 691 commit.append(f)
688 692 elif s == 'r':
689 693 remove.append(f)
690 694 else:
691 695 self.ui.warn(_("%s not tracked!\n") % f)
692 696 else:
693 697 changes = self.status(match=match)[:5]
694 698 modified, added, removed, deleted, unknown = changes
695 699 commit = modified + added
696 700 remove = removed
697 701 else:
698 702 commit = files
699 703
700 704 if use_dirstate:
701 705 p1, p2 = self.dirstate.parents()
702 706 update_dirstate = True
703 707 else:
704 708 p1, p2 = p1, p2 or nullid
705 709 update_dirstate = (self.dirstate.parents()[0] == p1)
706 710
707 711 c1 = self.changelog.read(p1)
708 712 c2 = self.changelog.read(p2)
709 713 m1 = self.manifest.read(c1[0]).copy()
710 714 m2 = self.manifest.read(c2[0])
711 715
712 716 if use_dirstate:
713 717 branchname = self.workingctx().branch()
714 718 try:
715 719 branchname = branchname.decode('UTF-8').encode('UTF-8')
716 720 except UnicodeDecodeError:
717 721 raise util.Abort(_('branch name not in UTF-8!'))
718 722 else:
719 723 branchname = ""
720 724
721 725 if use_dirstate:
722 726 oldname = c1[5].get("branch", "") # stored in UTF-8
723 727 if not commit and not remove and not force and p2 == nullid and \
724 728 branchname == oldname:
725 729 self.ui.status(_("nothing changed\n"))
726 730 return None
727 731
728 732 xp1 = hex(p1)
729 733 if p2 == nullid: xp2 = ''
730 734 else: xp2 = hex(p2)
731 735
732 736 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
733 737
734 738 if not wlock:
735 739 wlock = self.wlock()
736 740 if not lock:
737 741 lock = self.lock()
738 742 tr = self.transaction()
739 743
740 744 # check in files
741 745 new = {}
742 746 linkrev = self.changelog.count()
743 747 commit.sort()
744 748 is_exec = util.execfunc(self.root, m1.execf)
745 749 is_link = util.linkfunc(self.root, m1.linkf)
746 750 for f in commit:
747 751 self.ui.note(f + "\n")
748 752 try:
749 753 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
750 754 m1.set(f, is_exec(f), is_link(f))
751 755 except (OSError, IOError):
752 756 if use_dirstate:
753 757 self.ui.warn(_("trouble committing %s!\n") % f)
754 758 raise
755 759 else:
756 760 remove.append(f)
757 761
758 762 # update manifest
759 763 m1.update(new)
760 764 remove.sort()
761 765 removed = []
762 766
763 767 for f in remove:
764 768 if f in m1:
765 769 del m1[f]
766 770 removed.append(f)
767 771 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
768 772
769 773 # add changeset
770 774 new = new.keys()
771 775 new.sort()
772 776
773 777 user = user or self.ui.username()
774 778 if not text or force_editor:
775 779 edittext = []
776 780 if text:
777 781 edittext.append(text)
778 782 edittext.append("")
779 783 edittext.append("HG: user: %s" % user)
780 784 if p2 != nullid:
781 785 edittext.append("HG: branch merge")
782 786 if branchname:
783 787 edittext.append("HG: branch %s" % util.tolocal(branchname))
784 788 edittext.extend(["HG: changed %s" % f for f in changed])
785 789 edittext.extend(["HG: removed %s" % f for f in removed])
786 790 if not changed and not remove:
787 791 edittext.append("HG: no files changed")
788 792 edittext.append("")
789 793 # run editor in the repository root
790 794 olddir = os.getcwd()
791 795 os.chdir(self.root)
792 796 text = self.ui.edit("\n".join(edittext), user)
793 797 os.chdir(olddir)
794 798
795 799 lines = [line.rstrip() for line in text.rstrip().splitlines()]
796 800 while lines and not lines[0]:
797 801 del lines[0]
798 802 if not lines:
799 803 return None
800 804 text = '\n'.join(lines)
801 805 if branchname:
802 806 extra["branch"] = branchname
803 807 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
804 808 user, date, extra)
805 809 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
806 810 parent2=xp2)
807 811 tr.close()
808 812
809 813 if self.branchcache and "branch" in extra:
810 814 self.branchcache[util.tolocal(extra["branch"])] = n
811 815
812 816 if use_dirstate or update_dirstate:
813 817 self.dirstate.setparents(n)
814 818 if use_dirstate:
815 819 self.dirstate.update(new, "n")
816 820 self.dirstate.forget(removed)
817 821
818 822 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
819 823 return n
820 824
821 825 def walk(self, node=None, files=[], match=util.always, badmatch=None):
822 826 '''
823 827 walk recursively through the directory tree or a given
824 828 changeset, finding all files matched by the match
825 829 function
826 830
827 831 results are yielded in a tuple (src, filename), where src
828 832 is one of:
829 833 'f' the file was found in the directory tree
830 834 'm' the file was only in the dirstate and not in the tree
831 835 'b' file was not found and matched badmatch
832 836 '''
833 837
834 838 if node:
835 839 fdict = dict.fromkeys(files)
836 840 for fn in self.manifest.read(self.changelog.read(node)[0]):
837 841 for ffn in fdict:
838 842 # match if the file is the exact name or a directory
839 843 if ffn == fn or fn.startswith("%s/" % ffn):
840 844 del fdict[ffn]
841 845 break
842 846 if match(fn):
843 847 yield 'm', fn
844 848 for fn in fdict:
845 849 if badmatch and badmatch(fn):
846 850 if match(fn):
847 851 yield 'b', fn
848 852 else:
849 853 self.ui.warn(_('%s: No such file in rev %s\n') % (
850 854 util.pathto(self.getcwd(), fn), short(node)))
851 855 else:
852 856 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
853 857 yield src, fn
854 858
855 859 def status(self, node1=None, node2=None, files=[], match=util.always,
856 860 wlock=None, list_ignored=False, list_clean=False):
857 861 """return status of files between two nodes or node and working directory
858 862
859 863 If node1 is None, use the first dirstate parent instead.
860 864 If node2 is None, compare node1 with working directory.
861 865 """
862 866
863 867 def fcmp(fn, mf):
864 868 t1 = self.wread(fn)
865 869 return self.file(fn).cmp(mf.get(fn, nullid), t1)
866 870
867 871 def mfmatches(node):
868 872 change = self.changelog.read(node)
869 873 mf = self.manifest.read(change[0]).copy()
870 874 for fn in mf.keys():
871 875 if not match(fn):
872 876 del mf[fn]
873 877 return mf
874 878
875 879 modified, added, removed, deleted, unknown = [], [], [], [], []
876 880 ignored, clean = [], []
877 881
878 882 compareworking = False
879 883 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
880 884 compareworking = True
881 885
882 886 if not compareworking:
883 887 # read the manifest from node1 before the manifest from node2,
884 888 # so that we'll hit the manifest cache if we're going through
885 889 # all the revisions in parent->child order.
886 890 mf1 = mfmatches(node1)
887 891
888 892 # are we comparing the working directory?
889 893 if not node2:
890 894 if not wlock:
891 895 try:
892 896 wlock = self.wlock(wait=0)
893 897 except lock.LockException:
894 898 wlock = None
895 899 (lookup, modified, added, removed, deleted, unknown,
896 900 ignored, clean) = self.dirstate.status(files, match,
897 901 list_ignored, list_clean)
898 902
899 903 # are we comparing working dir against its parent?
900 904 if compareworking:
901 905 if lookup:
902 906 # do a full compare of any files that might have changed
903 907 mf2 = mfmatches(self.dirstate.parents()[0])
904 908 for f in lookup:
905 909 if fcmp(f, mf2):
906 910 modified.append(f)
907 911 else:
908 912 clean.append(f)
909 913 if wlock is not None:
910 914 self.dirstate.update([f], "n")
911 915 else:
912 916 # we are comparing working dir against non-parent
913 917 # generate a pseudo-manifest for the working dir
914 918 # XXX: create it in dirstate.py ?
915 919 mf2 = mfmatches(self.dirstate.parents()[0])
916 920 is_exec = util.execfunc(self.root, mf2.execf)
917 921 is_link = util.linkfunc(self.root, mf2.linkf)
918 922 for f in lookup + modified + added:
919 923 mf2[f] = ""
920 924 mf2.set(f, is_exec(f), is_link(f))
921 925 for f in removed:
922 926 if f in mf2:
923 927 del mf2[f]
924 928 else:
925 929 # we are comparing two revisions
926 930 mf2 = mfmatches(node2)
927 931
928 932 if not compareworking:
929 933 # flush lists from dirstate before comparing manifests
930 934 modified, added, clean = [], [], []
931 935
932 936 # make sure to sort the files so we talk to the disk in a
933 937 # reasonable order
934 938 mf2keys = mf2.keys()
935 939 mf2keys.sort()
936 940 for fn in mf2keys:
937 941 if mf1.has_key(fn):
938 942 if mf1.flags(fn) != mf2.flags(fn) or \
939 943 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
940 944 modified.append(fn)
941 945 elif list_clean:
942 946 clean.append(fn)
943 947 del mf1[fn]
944 948 else:
945 949 added.append(fn)
946 950
947 951 removed = mf1.keys()
948 952
949 953 # sort and return results:
950 954 for l in modified, added, removed, deleted, unknown, ignored, clean:
951 955 l.sort()
952 956 return (modified, added, removed, deleted, unknown, ignored, clean)
953 957
954 958 def add(self, list, wlock=None):
955 959 if not wlock:
956 960 wlock = self.wlock()
957 961 for f in list:
958 962 p = self.wjoin(f)
959 963 islink = os.path.islink(p)
960 964 if not islink and not os.path.exists(p):
961 965 self.ui.warn(_("%s does not exist!\n") % f)
962 966 elif not islink and not os.path.isfile(p):
963 967 self.ui.warn(_("%s not added: only files and symlinks "
964 968 "supported currently\n") % f)
965 969 elif self.dirstate.state(f) in 'an':
966 970 self.ui.warn(_("%s already tracked!\n") % f)
967 971 else:
968 972 self.dirstate.update([f], "a")
969 973
970 974 def forget(self, list, wlock=None):
971 975 if not wlock:
972 976 wlock = self.wlock()
973 977 for f in list:
974 978 if self.dirstate.state(f) not in 'ai':
975 979 self.ui.warn(_("%s not added!\n") % f)
976 980 else:
977 981 self.dirstate.forget([f])
978 982
979 983 def remove(self, list, unlink=False, wlock=None):
980 984 if unlink:
981 985 for f in list:
982 986 try:
983 987 util.unlink(self.wjoin(f))
984 988 except OSError, inst:
985 989 if inst.errno != errno.ENOENT:
986 990 raise
987 991 if not wlock:
988 992 wlock = self.wlock()
989 993 for f in list:
990 994 p = self.wjoin(f)
991 995 if os.path.exists(p):
992 996 self.ui.warn(_("%s still exists!\n") % f)
993 997 elif self.dirstate.state(f) == 'a':
994 998 self.dirstate.forget([f])
995 999 elif f not in self.dirstate:
996 1000 self.ui.warn(_("%s not tracked!\n") % f)
997 1001 else:
998 1002 self.dirstate.update([f], "r")
999 1003
1000 1004 def undelete(self, list, wlock=None):
1001 1005 p = self.dirstate.parents()[0]
1002 1006 mn = self.changelog.read(p)[0]
1003 1007 m = self.manifest.read(mn)
1004 1008 if not wlock:
1005 1009 wlock = self.wlock()
1006 1010 for f in list:
1007 1011 if self.dirstate.state(f) not in "r":
1008 1012 self.ui.warn("%s not removed!\n" % f)
1009 1013 else:
1010 1014 t = self.file(f).read(m[f])
1011 1015 self.wwrite(f, t, m.flags(f))
1012 1016 self.dirstate.update([f], "n")
1013 1017
1014 1018 def copy(self, source, dest, wlock=None):
1015 1019 p = self.wjoin(dest)
1016 1020 if not os.path.exists(p):
1017 1021 self.ui.warn(_("%s does not exist!\n") % dest)
1018 1022 elif not os.path.isfile(p):
1019 1023 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
1020 1024 else:
1021 1025 if not wlock:
1022 1026 wlock = self.wlock()
1023 1027 if self.dirstate.state(dest) == '?':
1024 1028 self.dirstate.update([dest], "a")
1025 1029 self.dirstate.copy(source, dest)
1026 1030
1027 1031 def heads(self, start=None):
1028 1032 heads = self.changelog.heads(start)
1029 1033 # sort the output in rev descending order
1030 1034 heads = [(-self.changelog.rev(h), h) for h in heads]
1031 1035 heads.sort()
1032 1036 return [n for (r, n) in heads]
1033 1037
1034 1038 def branches(self, nodes):
1035 1039 if not nodes:
1036 1040 nodes = [self.changelog.tip()]
1037 1041 b = []
1038 1042 for n in nodes:
1039 1043 t = n
1040 1044 while 1:
1041 1045 p = self.changelog.parents(n)
1042 1046 if p[1] != nullid or p[0] == nullid:
1043 1047 b.append((t, n, p[0], p[1]))
1044 1048 break
1045 1049 n = p[0]
1046 1050 return b
1047 1051
1048 1052 def between(self, pairs):
1049 1053 r = []
1050 1054
1051 1055 for top, bottom in pairs:
1052 1056 n, l, i = top, [], 0
1053 1057 f = 1
1054 1058
1055 1059 while n != bottom:
1056 1060 p = self.changelog.parents(n)[0]
1057 1061 if i == f:
1058 1062 l.append(n)
1059 1063 f = f * 2
1060 1064 n = p
1061 1065 i += 1
1062 1066
1063 1067 r.append(l)
1064 1068
1065 1069 return r
1066 1070
1067 1071 def findincoming(self, remote, base=None, heads=None, force=False):
1068 1072 """Return list of roots of the subsets of missing nodes from remote
1069 1073
1070 1074 If base dict is specified, assume that these nodes and their parents
1071 1075 exist on the remote side and that no child of a node of base exists
1072 1076 in both remote and self.
1073 1077 Furthermore base will be updated to include the nodes that exists
1074 1078 in self and remote but no children exists in self and remote.
1075 1079 If a list of heads is specified, return only nodes which are heads
1076 1080 or ancestors of these heads.
1077 1081
1078 1082 All the ancestors of base are in self and in remote.
1079 1083 All the descendants of the list returned are missing in self.
1080 1084 (and so we know that the rest of the nodes are missing in remote, see
1081 1085 outgoing)
1082 1086 """
1083 1087 m = self.changelog.nodemap
1084 1088 search = []
1085 1089 fetch = {}
1086 1090 seen = {}
1087 1091 seenbranch = {}
1088 1092 if base == None:
1089 1093 base = {}
1090 1094
1091 1095 if not heads:
1092 1096 heads = remote.heads()
1093 1097
1094 1098 if self.changelog.tip() == nullid:
1095 1099 base[nullid] = 1
1096 1100 if heads != [nullid]:
1097 1101 return [nullid]
1098 1102 return []
1099 1103
1100 1104 # assume we're closer to the tip than the root
1101 1105 # and start by examining the heads
1102 1106 self.ui.status(_("searching for changes\n"))
1103 1107
1104 1108 unknown = []
1105 1109 for h in heads:
1106 1110 if h not in m:
1107 1111 unknown.append(h)
1108 1112 else:
1109 1113 base[h] = 1
1110 1114
1111 1115 if not unknown:
1112 1116 return []
1113 1117
1114 1118 req = dict.fromkeys(unknown)
1115 1119 reqcnt = 0
1116 1120
1117 1121 # search through remote branches
1118 1122 # a 'branch' here is a linear segment of history, with four parts:
1119 1123 # head, root, first parent, second parent
1120 1124 # (a branch always has two parents (or none) by definition)
1121 1125 unknown = remote.branches(unknown)
1122 1126 while unknown:
1123 1127 r = []
1124 1128 while unknown:
1125 1129 n = unknown.pop(0)
1126 1130 if n[0] in seen:
1127 1131 continue
1128 1132
1129 1133 self.ui.debug(_("examining %s:%s\n")
1130 1134 % (short(n[0]), short(n[1])))
1131 1135 if n[0] == nullid: # found the end of the branch
1132 1136 pass
1133 1137 elif n in seenbranch:
1134 1138 self.ui.debug(_("branch already found\n"))
1135 1139 continue
1136 1140 elif n[1] and n[1] in m: # do we know the base?
1137 1141 self.ui.debug(_("found incomplete branch %s:%s\n")
1138 1142 % (short(n[0]), short(n[1])))
1139 1143 search.append(n) # schedule branch range for scanning
1140 1144 seenbranch[n] = 1
1141 1145 else:
1142 1146 if n[1] not in seen and n[1] not in fetch:
1143 1147 if n[2] in m and n[3] in m:
1144 1148 self.ui.debug(_("found new changeset %s\n") %
1145 1149 short(n[1]))
1146 1150 fetch[n[1]] = 1 # earliest unknown
1147 1151 for p in n[2:4]:
1148 1152 if p in m:
1149 1153 base[p] = 1 # latest known
1150 1154
1151 1155 for p in n[2:4]:
1152 1156 if p not in req and p not in m:
1153 1157 r.append(p)
1154 1158 req[p] = 1
1155 1159 seen[n[0]] = 1
1156 1160
1157 1161 if r:
1158 1162 reqcnt += 1
1159 1163 self.ui.debug(_("request %d: %s\n") %
1160 1164 (reqcnt, " ".join(map(short, r))))
1161 1165 for p in xrange(0, len(r), 10):
1162 1166 for b in remote.branches(r[p:p+10]):
1163 1167 self.ui.debug(_("received %s:%s\n") %
1164 1168 (short(b[0]), short(b[1])))
1165 1169 unknown.append(b)
1166 1170
1167 1171 # do binary search on the branches we found
1168 1172 while search:
1169 1173 n = search.pop(0)
1170 1174 reqcnt += 1
1171 1175 l = remote.between([(n[0], n[1])])[0]
1172 1176 l.append(n[1])
1173 1177 p = n[0]
1174 1178 f = 1
1175 1179 for i in l:
1176 1180 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1177 1181 if i in m:
1178 1182 if f <= 2:
1179 1183 self.ui.debug(_("found new branch changeset %s\n") %
1180 1184 short(p))
1181 1185 fetch[p] = 1
1182 1186 base[i] = 1
1183 1187 else:
1184 1188 self.ui.debug(_("narrowed branch search to %s:%s\n")
1185 1189 % (short(p), short(i)))
1186 1190 search.append((p, i))
1187 1191 break
1188 1192 p, f = i, f * 2
1189 1193
1190 1194 # sanity check our fetch list
1191 1195 for f in fetch.keys():
1192 1196 if f in m:
1193 1197 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1194 1198
1195 1199 if base.keys() == [nullid]:
1196 1200 if force:
1197 1201 self.ui.warn(_("warning: repository is unrelated\n"))
1198 1202 else:
1199 1203 raise util.Abort(_("repository is unrelated"))
1200 1204
1201 1205 self.ui.debug(_("found new changesets starting at ") +
1202 1206 " ".join([short(f) for f in fetch]) + "\n")
1203 1207
1204 1208 self.ui.debug(_("%d total queries\n") % reqcnt)
1205 1209
1206 1210 return fetch.keys()
1207 1211
1208 1212 def findoutgoing(self, remote, base=None, heads=None, force=False):
1209 1213 """Return list of nodes that are roots of subsets not in remote
1210 1214
1211 1215 If base dict is specified, assume that these nodes and their parents
1212 1216 exist on the remote side.
1213 1217 If a list of heads is specified, return only nodes which are heads
1214 1218 or ancestors of these heads, and return a second element which
1215 1219 contains all remote heads which get new children.
1216 1220 """
1217 1221 if base == None:
1218 1222 base = {}
1219 1223 self.findincoming(remote, base, heads, force=force)
1220 1224
1221 1225 self.ui.debug(_("common changesets up to ")
1222 1226 + " ".join(map(short, base.keys())) + "\n")
1223 1227
1224 1228 remain = dict.fromkeys(self.changelog.nodemap)
1225 1229
1226 1230 # prune everything remote has from the tree
1227 1231 del remain[nullid]
1228 1232 remove = base.keys()
1229 1233 while remove:
1230 1234 n = remove.pop(0)
1231 1235 if n in remain:
1232 1236 del remain[n]
1233 1237 for p in self.changelog.parents(n):
1234 1238 remove.append(p)
1235 1239
1236 1240 # find every node whose parents have been pruned
1237 1241 subset = []
1238 1242 # find every remote head that will get new children
1239 1243 updated_heads = {}
1240 1244 for n in remain:
1241 1245 p1, p2 = self.changelog.parents(n)
1242 1246 if p1 not in remain and p2 not in remain:
1243 1247 subset.append(n)
1244 1248 if heads:
1245 1249 if p1 in heads:
1246 1250 updated_heads[p1] = True
1247 1251 if p2 in heads:
1248 1252 updated_heads[p2] = True
1249 1253
1250 1254 # this is the set of all roots we have to push
1251 1255 if heads:
1252 1256 return subset, updated_heads.keys()
1253 1257 else:
1254 1258 return subset
1255 1259
1256 1260 def pull(self, remote, heads=None, force=False, lock=None):
1257 1261 mylock = False
1258 1262 if not lock:
1259 1263 lock = self.lock()
1260 1264 mylock = True
1261 1265
1262 1266 try:
1263 1267 fetch = self.findincoming(remote, force=force)
1264 1268 if fetch == [nullid]:
1265 1269 self.ui.status(_("requesting all changes\n"))
1266 1270
1267 1271 if not fetch:
1268 1272 self.ui.status(_("no changes found\n"))
1269 1273 return 0
1270 1274
1271 1275 if heads is None:
1272 1276 cg = remote.changegroup(fetch, 'pull')
1273 1277 else:
1274 1278 if 'changegroupsubset' not in remote.capabilities:
1275 1279 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1276 1280 cg = remote.changegroupsubset(fetch, heads, 'pull')
1277 1281 return self.addchangegroup(cg, 'pull', remote.url())
1278 1282 finally:
1279 1283 if mylock:
1280 1284 lock.release()
1281 1285
1282 1286 def push(self, remote, force=False, revs=None):
1283 1287 # there are two ways to push to remote repo:
1284 1288 #
1285 1289 # addchangegroup assumes local user can lock remote
1286 1290 # repo (local filesystem, old ssh servers).
1287 1291 #
1288 1292 # unbundle assumes local user cannot lock remote repo (new ssh
1289 1293 # servers, http servers).
1290 1294
1291 1295 if remote.capable('unbundle'):
1292 1296 return self.push_unbundle(remote, force, revs)
1293 1297 return self.push_addchangegroup(remote, force, revs)
1294 1298
1295 1299 def prepush(self, remote, force, revs):
1296 1300 base = {}
1297 1301 remote_heads = remote.heads()
1298 1302 inc = self.findincoming(remote, base, remote_heads, force=force)
1299 1303
1300 1304 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1301 1305 if revs is not None:
1302 1306 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1303 1307 else:
1304 1308 bases, heads = update, self.changelog.heads()
1305 1309
1306 1310 if not bases:
1307 1311 self.ui.status(_("no changes found\n"))
1308 1312 return None, 1
1309 1313 elif not force:
1310 1314 # check if we're creating new remote heads
1311 1315 # to be a remote head after push, node must be either
1312 1316 # - unknown locally
1313 1317 # - a local outgoing head descended from update
1314 1318 # - a remote head that's known locally and not
1315 1319 # ancestral to an outgoing head
1316 1320
1317 1321 warn = 0
1318 1322
1319 1323 if remote_heads == [nullid]:
1320 1324 warn = 0
1321 1325 elif not revs and len(heads) > len(remote_heads):
1322 1326 warn = 1
1323 1327 else:
1324 1328 newheads = list(heads)
1325 1329 for r in remote_heads:
1326 1330 if r in self.changelog.nodemap:
1327 1331 desc = self.changelog.heads(r, heads)
1328 1332 l = [h for h in heads if h in desc]
1329 1333 if not l:
1330 1334 newheads.append(r)
1331 1335 else:
1332 1336 newheads.append(r)
1333 1337 if len(newheads) > len(remote_heads):
1334 1338 warn = 1
1335 1339
1336 1340 if warn:
1337 1341 self.ui.warn(_("abort: push creates new remote branches!\n"))
1338 1342 self.ui.status(_("(did you forget to merge?"
1339 1343 " use push -f to force)\n"))
1340 1344 return None, 1
1341 1345 elif inc:
1342 1346 self.ui.warn(_("note: unsynced remote changes!\n"))
1343 1347
1344 1348
1345 1349 if revs is None:
1346 1350 cg = self.changegroup(update, 'push')
1347 1351 else:
1348 1352 cg = self.changegroupsubset(update, revs, 'push')
1349 1353 return cg, remote_heads
1350 1354
1351 1355 def push_addchangegroup(self, remote, force, revs):
1352 1356 lock = remote.lock()
1353 1357
1354 1358 ret = self.prepush(remote, force, revs)
1355 1359 if ret[0] is not None:
1356 1360 cg, remote_heads = ret
1357 1361 return remote.addchangegroup(cg, 'push', self.url())
1358 1362 return ret[1]
1359 1363
1360 1364 def push_unbundle(self, remote, force, revs):
1361 1365 # local repo finds heads on server, finds out what revs it
1362 1366 # must push. once revs transferred, if server finds it has
1363 1367 # different heads (someone else won commit/push race), server
1364 1368 # aborts.
1365 1369
1366 1370 ret = self.prepush(remote, force, revs)
1367 1371 if ret[0] is not None:
1368 1372 cg, remote_heads = ret
1369 1373 if force: remote_heads = ['force']
1370 1374 return remote.unbundle(cg, remote_heads, 'push')
1371 1375 return ret[1]
1372 1376
1373 1377 def changegroupinfo(self, nodes):
1374 1378 self.ui.note(_("%d changesets found\n") % len(nodes))
1375 1379 if self.ui.debugflag:
1376 1380 self.ui.debug(_("List of changesets:\n"))
1377 1381 for node in nodes:
1378 1382 self.ui.debug("%s\n" % hex(node))
1379 1383
1380 1384 def changegroupsubset(self, bases, heads, source):
1381 1385 """This function generates a changegroup consisting of all the nodes
1382 1386 that are descendents of any of the bases, and ancestors of any of
1383 1387 the heads.
1384 1388
1385 1389 It is fairly complex as determining which filenodes and which
1386 1390 manifest nodes need to be included for the changeset to be complete
1387 1391 is non-trivial.
1388 1392
1389 1393 Another wrinkle is doing the reverse, figuring out which changeset in
1390 1394 the changegroup a particular filenode or manifestnode belongs to."""
1391 1395
1392 1396 self.hook('preoutgoing', throw=True, source=source)
1393 1397
1394 1398 # Set up some initial variables
1395 1399 # Make it easy to refer to self.changelog
1396 1400 cl = self.changelog
1397 1401 # msng is short for missing - compute the list of changesets in this
1398 1402 # changegroup.
1399 1403 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1400 1404 self.changegroupinfo(msng_cl_lst)
1401 1405 # Some bases may turn out to be superfluous, and some heads may be
1402 1406 # too. nodesbetween will return the minimal set of bases and heads
1403 1407 # necessary to re-create the changegroup.
1404 1408
1405 1409 # Known heads are the list of heads that it is assumed the recipient
1406 1410 # of this changegroup will know about.
1407 1411 knownheads = {}
1408 1412 # We assume that all parents of bases are known heads.
1409 1413 for n in bases:
1410 1414 for p in cl.parents(n):
1411 1415 if p != nullid:
1412 1416 knownheads[p] = 1
1413 1417 knownheads = knownheads.keys()
1414 1418 if knownheads:
1415 1419 # Now that we know what heads are known, we can compute which
1416 1420 # changesets are known. The recipient must know about all
1417 1421 # changesets required to reach the known heads from the null
1418 1422 # changeset.
1419 1423 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1420 1424 junk = None
1421 1425 # Transform the list into an ersatz set.
1422 1426 has_cl_set = dict.fromkeys(has_cl_set)
1423 1427 else:
1424 1428 # If there were no known heads, the recipient cannot be assumed to
1425 1429 # know about any changesets.
1426 1430 has_cl_set = {}
1427 1431
1428 1432 # Make it easy to refer to self.manifest
1429 1433 mnfst = self.manifest
1430 1434 # We don't know which manifests are missing yet
1431 1435 msng_mnfst_set = {}
1432 1436 # Nor do we know which filenodes are missing.
1433 1437 msng_filenode_set = {}
1434 1438
1435 1439 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1436 1440 junk = None
1437 1441
1438 1442 # A changeset always belongs to itself, so the changenode lookup
1439 1443 # function for a changenode is identity.
1440 1444 def identity(x):
1441 1445 return x
1442 1446
1443 1447 # A function generating function. Sets up an environment for the
1444 1448 # inner function.
1445 1449 def cmp_by_rev_func(revlog):
1446 1450 # Compare two nodes by their revision number in the environment's
1447 1451 # revision history. Since the revision number both represents the
1448 1452 # most efficient order to read the nodes in, and represents a
1449 1453 # topological sorting of the nodes, this function is often useful.
1450 1454 def cmp_by_rev(a, b):
1451 1455 return cmp(revlog.rev(a), revlog.rev(b))
1452 1456 return cmp_by_rev
1453 1457
1454 1458 # If we determine that a particular file or manifest node must be a
1455 1459 # node that the recipient of the changegroup will already have, we can
1456 1460 # also assume the recipient will have all the parents. This function
1457 1461 # prunes them from the set of missing nodes.
1458 1462 def prune_parents(revlog, hasset, msngset):
1459 1463 haslst = hasset.keys()
1460 1464 haslst.sort(cmp_by_rev_func(revlog))
1461 1465 for node in haslst:
1462 1466 parentlst = [p for p in revlog.parents(node) if p != nullid]
1463 1467 while parentlst:
1464 1468 n = parentlst.pop()
1465 1469 if n not in hasset:
1466 1470 hasset[n] = 1
1467 1471 p = [p for p in revlog.parents(n) if p != nullid]
1468 1472 parentlst.extend(p)
1469 1473 for n in hasset:
1470 1474 msngset.pop(n, None)
1471 1475
1472 1476 # This is a function generating function used to set up an environment
1473 1477 # for the inner function to execute in.
1474 1478 def manifest_and_file_collector(changedfileset):
1475 1479 # This is an information gathering function that gathers
1476 1480 # information from each changeset node that goes out as part of
1477 1481 # the changegroup. The information gathered is a list of which
1478 1482 # manifest nodes are potentially required (the recipient may
1479 1483 # already have them) and total list of all files which were
1480 1484 # changed in any changeset in the changegroup.
1481 1485 #
1482 1486 # We also remember the first changenode we saw any manifest
1483 1487 # referenced by so we can later determine which changenode 'owns'
1484 1488 # the manifest.
1485 1489 def collect_manifests_and_files(clnode):
1486 1490 c = cl.read(clnode)
1487 1491 for f in c[3]:
1488 1492 # This is to make sure we only have one instance of each
1489 1493 # filename string for each filename.
1490 1494 changedfileset.setdefault(f, f)
1491 1495 msng_mnfst_set.setdefault(c[0], clnode)
1492 1496 return collect_manifests_and_files
1493 1497
1494 1498 # Figure out which manifest nodes (of the ones we think might be part
1495 1499 # of the changegroup) the recipient must know about and remove them
1496 1500 # from the changegroup.
1497 1501 def prune_manifests():
1498 1502 has_mnfst_set = {}
1499 1503 for n in msng_mnfst_set:
1500 1504 # If a 'missing' manifest thinks it belongs to a changenode
1501 1505 # the recipient is assumed to have, obviously the recipient
1502 1506 # must have that manifest.
1503 1507 linknode = cl.node(mnfst.linkrev(n))
1504 1508 if linknode in has_cl_set:
1505 1509 has_mnfst_set[n] = 1
1506 1510 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1507 1511
1508 1512 # Use the information collected in collect_manifests_and_files to say
1509 1513 # which changenode any manifestnode belongs to.
1510 1514 def lookup_manifest_link(mnfstnode):
1511 1515 return msng_mnfst_set[mnfstnode]
1512 1516
1513 1517 # A function generating function that sets up the initial environment
1514 1518 # the inner function.
1515 1519 def filenode_collector(changedfiles):
1516 1520 next_rev = [0]
1517 1521 # This gathers information from each manifestnode included in the
1518 1522 # changegroup about which filenodes the manifest node references
1519 1523 # so we can include those in the changegroup too.
1520 1524 #
1521 1525 # It also remembers which changenode each filenode belongs to. It
1522 1526 # does this by assuming the a filenode belongs to the changenode
1523 1527 # the first manifest that references it belongs to.
1524 1528 def collect_msng_filenodes(mnfstnode):
1525 1529 r = mnfst.rev(mnfstnode)
1526 1530 if r == next_rev[0]:
1527 1531 # If the last rev we looked at was the one just previous,
1528 1532 # we only need to see a diff.
1529 1533 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1530 1534 # For each line in the delta
1531 1535 for dline in delta.splitlines():
1532 1536 # get the filename and filenode for that line
1533 1537 f, fnode = dline.split('\0')
1534 1538 fnode = bin(fnode[:40])
1535 1539 f = changedfiles.get(f, None)
1536 1540 # And if the file is in the list of files we care
1537 1541 # about.
1538 1542 if f is not None:
1539 1543 # Get the changenode this manifest belongs to
1540 1544 clnode = msng_mnfst_set[mnfstnode]
1541 1545 # Create the set of filenodes for the file if
1542 1546 # there isn't one already.
1543 1547 ndset = msng_filenode_set.setdefault(f, {})
1544 1548 # And set the filenode's changelog node to the
1545 1549 # manifest's if it hasn't been set already.
1546 1550 ndset.setdefault(fnode, clnode)
1547 1551 else:
1548 1552 # Otherwise we need a full manifest.
1549 1553 m = mnfst.read(mnfstnode)
1550 1554 # For every file in we care about.
1551 1555 for f in changedfiles:
1552 1556 fnode = m.get(f, None)
1553 1557 # If it's in the manifest
1554 1558 if fnode is not None:
1555 1559 # See comments above.
1556 1560 clnode = msng_mnfst_set[mnfstnode]
1557 1561 ndset = msng_filenode_set.setdefault(f, {})
1558 1562 ndset.setdefault(fnode, clnode)
1559 1563 # Remember the revision we hope to see next.
1560 1564 next_rev[0] = r + 1
1561 1565 return collect_msng_filenodes
1562 1566
1563 1567 # We have a list of filenodes we think we need for a file, lets remove
1564 1568 # all those we now the recipient must have.
1565 1569 def prune_filenodes(f, filerevlog):
1566 1570 msngset = msng_filenode_set[f]
1567 1571 hasset = {}
1568 1572 # If a 'missing' filenode thinks it belongs to a changenode we
1569 1573 # assume the recipient must have, then the recipient must have
1570 1574 # that filenode.
1571 1575 for n in msngset:
1572 1576 clnode = cl.node(filerevlog.linkrev(n))
1573 1577 if clnode in has_cl_set:
1574 1578 hasset[n] = 1
1575 1579 prune_parents(filerevlog, hasset, msngset)
1576 1580
1577 1581 # A function generator function that sets up the a context for the
1578 1582 # inner function.
1579 1583 def lookup_filenode_link_func(fname):
1580 1584 msngset = msng_filenode_set[fname]
1581 1585 # Lookup the changenode the filenode belongs to.
1582 1586 def lookup_filenode_link(fnode):
1583 1587 return msngset[fnode]
1584 1588 return lookup_filenode_link
1585 1589
1586 1590 # Now that we have all theses utility functions to help out and
1587 1591 # logically divide up the task, generate the group.
1588 1592 def gengroup():
1589 1593 # The set of changed files starts empty.
1590 1594 changedfiles = {}
1591 1595 # Create a changenode group generator that will call our functions
1592 1596 # back to lookup the owning changenode and collect information.
1593 1597 group = cl.group(msng_cl_lst, identity,
1594 1598 manifest_and_file_collector(changedfiles))
1595 1599 for chnk in group:
1596 1600 yield chnk
1597 1601
1598 1602 # The list of manifests has been collected by the generator
1599 1603 # calling our functions back.
1600 1604 prune_manifests()
1601 1605 msng_mnfst_lst = msng_mnfst_set.keys()
1602 1606 # Sort the manifestnodes by revision number.
1603 1607 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1604 1608 # Create a generator for the manifestnodes that calls our lookup
1605 1609 # and data collection functions back.
1606 1610 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1607 1611 filenode_collector(changedfiles))
1608 1612 for chnk in group:
1609 1613 yield chnk
1610 1614
1611 1615 # These are no longer needed, dereference and toss the memory for
1612 1616 # them.
1613 1617 msng_mnfst_lst = None
1614 1618 msng_mnfst_set.clear()
1615 1619
1616 1620 changedfiles = changedfiles.keys()
1617 1621 changedfiles.sort()
1618 1622 # Go through all our files in order sorted by name.
1619 1623 for fname in changedfiles:
1620 1624 filerevlog = self.file(fname)
1621 1625 # Toss out the filenodes that the recipient isn't really
1622 1626 # missing.
1623 1627 if msng_filenode_set.has_key(fname):
1624 1628 prune_filenodes(fname, filerevlog)
1625 1629 msng_filenode_lst = msng_filenode_set[fname].keys()
1626 1630 else:
1627 1631 msng_filenode_lst = []
1628 1632 # If any filenodes are left, generate the group for them,
1629 1633 # otherwise don't bother.
1630 1634 if len(msng_filenode_lst) > 0:
1631 1635 yield changegroup.genchunk(fname)
1632 1636 # Sort the filenodes by their revision #
1633 1637 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1634 1638 # Create a group generator and only pass in a changenode
1635 1639 # lookup function as we need to collect no information
1636 1640 # from filenodes.
1637 1641 group = filerevlog.group(msng_filenode_lst,
1638 1642 lookup_filenode_link_func(fname))
1639 1643 for chnk in group:
1640 1644 yield chnk
1641 1645 if msng_filenode_set.has_key(fname):
1642 1646 # Don't need this anymore, toss it to free memory.
1643 1647 del msng_filenode_set[fname]
1644 1648 # Signal that no more groups are left.
1645 1649 yield changegroup.closechunk()
1646 1650
1647 1651 if msng_cl_lst:
1648 1652 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1649 1653
1650 1654 return util.chunkbuffer(gengroup())
1651 1655
1652 1656 def changegroup(self, basenodes, source):
1653 1657 """Generate a changegroup of all nodes that we have that a recipient
1654 1658 doesn't.
1655 1659
1656 1660 This is much easier than the previous function as we can assume that
1657 1661 the recipient has any changenode we aren't sending them."""
1658 1662
1659 1663 self.hook('preoutgoing', throw=True, source=source)
1660 1664
1661 1665 cl = self.changelog
1662 1666 nodes = cl.nodesbetween(basenodes, None)[0]
1663 1667 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1664 1668 self.changegroupinfo(nodes)
1665 1669
1666 1670 def identity(x):
1667 1671 return x
1668 1672
1669 1673 def gennodelst(revlog):
1670 1674 for r in xrange(0, revlog.count()):
1671 1675 n = revlog.node(r)
1672 1676 if revlog.linkrev(n) in revset:
1673 1677 yield n
1674 1678
1675 1679 def changed_file_collector(changedfileset):
1676 1680 def collect_changed_files(clnode):
1677 1681 c = cl.read(clnode)
1678 1682 for fname in c[3]:
1679 1683 changedfileset[fname] = 1
1680 1684 return collect_changed_files
1681 1685
1682 1686 def lookuprevlink_func(revlog):
1683 1687 def lookuprevlink(n):
1684 1688 return cl.node(revlog.linkrev(n))
1685 1689 return lookuprevlink
1686 1690
1687 1691 def gengroup():
1688 1692 # construct a list of all changed files
1689 1693 changedfiles = {}
1690 1694
1691 1695 for chnk in cl.group(nodes, identity,
1692 1696 changed_file_collector(changedfiles)):
1693 1697 yield chnk
1694 1698 changedfiles = changedfiles.keys()
1695 1699 changedfiles.sort()
1696 1700
1697 1701 mnfst = self.manifest
1698 1702 nodeiter = gennodelst(mnfst)
1699 1703 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1700 1704 yield chnk
1701 1705
1702 1706 for fname in changedfiles:
1703 1707 filerevlog = self.file(fname)
1704 1708 nodeiter = gennodelst(filerevlog)
1705 1709 nodeiter = list(nodeiter)
1706 1710 if nodeiter:
1707 1711 yield changegroup.genchunk(fname)
1708 1712 lookup = lookuprevlink_func(filerevlog)
1709 1713 for chnk in filerevlog.group(nodeiter, lookup):
1710 1714 yield chnk
1711 1715
1712 1716 yield changegroup.closechunk()
1713 1717
1714 1718 if nodes:
1715 1719 self.hook('outgoing', node=hex(nodes[0]), source=source)
1716 1720
1717 1721 return util.chunkbuffer(gengroup())
1718 1722
1719 1723 def addchangegroup(self, source, srctype, url):
1720 1724 """add changegroup to repo.
1721 1725
1722 1726 return values:
1723 1727 - nothing changed or no source: 0
1724 1728 - more heads than before: 1+added heads (2..n)
1725 1729 - less heads than before: -1-removed heads (-2..-n)
1726 1730 - number of heads stays the same: 1
1727 1731 """
1728 1732 def csmap(x):
1729 1733 self.ui.debug(_("add changeset %s\n") % short(x))
1730 1734 return cl.count()
1731 1735
1732 1736 def revmap(x):
1733 1737 return cl.rev(x)
1734 1738
1735 1739 if not source:
1736 1740 return 0
1737 1741
1738 1742 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1739 1743
1740 1744 changesets = files = revisions = 0
1741 1745
1742 1746 tr = self.transaction()
1743 1747
1744 1748 # write changelog data to temp files so concurrent readers will not see
1745 1749 # inconsistent view
1746 1750 cl = None
1747 1751 try:
1748 1752 cl = appendfile.appendchangelog(self.sopener,
1749 1753 self.changelog.version)
1750 1754
1751 1755 oldheads = len(cl.heads())
1752 1756
1753 1757 # pull off the changeset group
1754 1758 self.ui.status(_("adding changesets\n"))
1755 1759 cor = cl.count() - 1
1756 1760 chunkiter = changegroup.chunkiter(source)
1757 1761 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1758 1762 raise util.Abort(_("received changelog group is empty"))
1759 1763 cnr = cl.count() - 1
1760 1764 changesets = cnr - cor
1761 1765
1762 1766 # pull off the manifest group
1763 1767 self.ui.status(_("adding manifests\n"))
1764 1768 chunkiter = changegroup.chunkiter(source)
1765 1769 # no need to check for empty manifest group here:
1766 1770 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1767 1771 # no new manifest will be created and the manifest group will
1768 1772 # be empty during the pull
1769 1773 self.manifest.addgroup(chunkiter, revmap, tr)
1770 1774
1771 1775 # process the files
1772 1776 self.ui.status(_("adding file changes\n"))
1773 1777 while 1:
1774 1778 f = changegroup.getchunk(source)
1775 1779 if not f:
1776 1780 break
1777 1781 self.ui.debug(_("adding %s revisions\n") % f)
1778 1782 fl = self.file(f)
1779 1783 o = fl.count()
1780 1784 chunkiter = changegroup.chunkiter(source)
1781 1785 if fl.addgroup(chunkiter, revmap, tr) is None:
1782 1786 raise util.Abort(_("received file revlog group is empty"))
1783 1787 revisions += fl.count() - o
1784 1788 files += 1
1785 1789
1786 1790 cl.writedata()
1787 1791 finally:
1788 1792 if cl:
1789 1793 cl.cleanup()
1790 1794
1791 1795 # make changelog see real files again
1792 1796 self.changelog = changelog.changelog(self.sopener,
1793 1797 self.changelog.version)
1794 1798 self.changelog.checkinlinesize(tr)
1795 1799
1796 1800 newheads = len(self.changelog.heads())
1797 1801 heads = ""
1798 1802 if oldheads and newheads != oldheads:
1799 1803 heads = _(" (%+d heads)") % (newheads - oldheads)
1800 1804
1801 1805 self.ui.status(_("added %d changesets"
1802 1806 " with %d changes to %d files%s\n")
1803 1807 % (changesets, revisions, files, heads))
1804 1808
1805 1809 if changesets > 0:
1806 1810 self.hook('pretxnchangegroup', throw=True,
1807 1811 node=hex(self.changelog.node(cor+1)), source=srctype,
1808 1812 url=url)
1809 1813
1810 1814 tr.close()
1811 1815
1812 1816 if changesets > 0:
1813 1817 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1814 1818 source=srctype, url=url)
1815 1819
1816 1820 for i in xrange(cor + 1, cnr + 1):
1817 1821 self.hook("incoming", node=hex(self.changelog.node(i)),
1818 1822 source=srctype, url=url)
1819 1823
1820 1824 # never return 0 here:
1821 1825 if newheads < oldheads:
1822 1826 return newheads - oldheads - 1
1823 1827 else:
1824 1828 return newheads - oldheads + 1
1825 1829
1826 1830
1827 1831 def stream_in(self, remote):
1828 1832 fp = remote.stream_out()
1829 1833 l = fp.readline()
1830 1834 try:
1831 1835 resp = int(l)
1832 1836 except ValueError:
1833 1837 raise util.UnexpectedOutput(
1834 1838 _('Unexpected response from remote server:'), l)
1835 1839 if resp == 1:
1836 1840 raise util.Abort(_('operation forbidden by server'))
1837 1841 elif resp == 2:
1838 1842 raise util.Abort(_('locking the remote repository failed'))
1839 1843 elif resp != 0:
1840 1844 raise util.Abort(_('the server sent an unknown error code'))
1841 1845 self.ui.status(_('streaming all changes\n'))
1842 1846 l = fp.readline()
1843 1847 try:
1844 1848 total_files, total_bytes = map(int, l.split(' ', 1))
1845 1849 except ValueError, TypeError:
1846 1850 raise util.UnexpectedOutput(
1847 1851 _('Unexpected response from remote server:'), l)
1848 1852 self.ui.status(_('%d files to transfer, %s of data\n') %
1849 1853 (total_files, util.bytecount(total_bytes)))
1850 1854 start = time.time()
1851 1855 for i in xrange(total_files):
1852 1856 # XXX doesn't support '\n' or '\r' in filenames
1853 1857 l = fp.readline()
1854 1858 try:
1855 1859 name, size = l.split('\0', 1)
1856 1860 size = int(size)
1857 1861 except ValueError, TypeError:
1858 1862 raise util.UnexpectedOutput(
1859 1863 _('Unexpected response from remote server:'), l)
1860 1864 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1861 1865 ofp = self.sopener(name, 'w')
1862 1866 for chunk in util.filechunkiter(fp, limit=size):
1863 1867 ofp.write(chunk)
1864 1868 ofp.close()
1865 1869 elapsed = time.time() - start
1866 1870 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1867 1871 (util.bytecount(total_bytes), elapsed,
1868 1872 util.bytecount(total_bytes / elapsed)))
1869 1873 self.reload()
1870 1874 return len(self.heads()) + 1
1871 1875
1872 1876 def clone(self, remote, heads=[], stream=False):
1873 1877 '''clone remote repository.
1874 1878
1875 1879 keyword arguments:
1876 1880 heads: list of revs to clone (forces use of pull)
1877 1881 stream: use streaming clone if possible'''
1878 1882
1879 1883 # now, all clients that can request uncompressed clones can
1880 1884 # read repo formats supported by all servers that can serve
1881 1885 # them.
1882 1886
1883 1887 # if revlog format changes, client will have to check version
1884 1888 # and format flags on "stream" capability, and use
1885 1889 # uncompressed only if compatible.
1886 1890
1887 1891 if stream and not heads and remote.capable('stream'):
1888 1892 return self.stream_in(remote)
1889 1893 return self.pull(remote, heads)
1890 1894
1891 1895 # used to avoid circular references so destructors work
1892 1896 def aftertrans(files):
1893 1897 renamefiles = [tuple(t) for t in files]
1894 1898 def a():
1895 1899 for src, dest in renamefiles:
1896 1900 util.rename(src, dest)
1897 1901 return a
1898 1902
1899 1903 def instance(ui, path, create):
1900 1904 return localrepository(ui, util.drop_scheme('file', path), create)
1901 1905
1902 1906 def islocal(path):
1903 1907 return True
@@ -1,186 +1,206 b''
1 1 #!/bin/sh
2 2
3 3 # commit hooks can see env vars
4 4 hg init a
5 5 cd a
6 6 echo "[hooks]" > .hg/hgrc
7 7 echo 'commit = echo commit hook: n=$HG_NODE p1=$HG_PARENT1 p2=$HG_PARENT2' >> .hg/hgrc
8 8 echo 'commit.b = echo commit hook b' >> .hg/hgrc
9 9 echo 'precommit = echo precommit hook: p1=$HG_PARENT1 p2=$HG_PARENT2' >> .hg/hgrc
10 10 echo 'pretxncommit = echo pretxncommit hook: n=$HG_NODE p1=$HG_PARENT1 p2=$HG_PARENT2; hg -q tip' >> .hg/hgrc
11 11 echo a > a
12 12 hg add a
13 13 hg commit -m a -d "1000000 0"
14 14
15 15 hg clone . ../b
16 16 cd ../b
17 17
18 18 # changegroup hooks can see env vars
19 19 echo '[hooks]' > .hg/hgrc
20 20 echo 'prechangegroup = echo prechangegroup hook: u=`echo $HG_URL | sed s,file:.*,file:,`' >> .hg/hgrc
21 21 echo 'changegroup = echo changegroup hook: n=$HG_NODE u=`echo $HG_URL | sed s,file:.*,file:,`' >> .hg/hgrc
22 22 echo 'incoming = echo incoming hook: n=$HG_NODE u=`echo $HG_URL | sed s,file:.*,file:,`' >> .hg/hgrc
23 23
24 24 # pretxncommit and commit hooks can see both parents of merge
25 25 cd ../a
26 26 echo b >> a
27 27 hg commit -m a1 -d "1 0"
28 28 hg update -C 0
29 29 echo b > b
30 30 hg add b
31 31 hg commit -m b -d '1 0'
32 32 hg merge 1
33 33 hg commit -m merge -d '2 0'
34 34
35 35 cd ../b
36 36 hg pull ../a
37 37
38 38 # tag hooks can see env vars
39 39 cd ../a
40 40 echo 'pretag = echo pretag hook: t=$HG_TAG n=$HG_NODE l=$HG_LOCAL' >> .hg/hgrc
41 41 echo 'tag = echo tag hook: t=$HG_TAG n=$HG_NODE l=$HG_LOCAL' >> .hg/hgrc
42 42 hg tag -d '3 0' a
43 43 hg tag -l la
44 44
45 45 # pretag hook can forbid tagging
46 46 echo 'pretag.forbid = echo pretag.forbid hook; exit 1' >> .hg/hgrc
47 47 hg tag -d '4 0' fa
48 48 hg tag -l fla
49 49
50 50 # pretxncommit hook can see changeset, can roll back txn, changeset
51 51 # no more there after
52 52 echo 'pretxncommit.forbid = echo pretxncommit.forbid hook: tip=`hg -q tip`; exit 1' >> .hg/hgrc
53 53 echo z > z
54 54 hg add z
55 55 hg -q tip
56 56 hg commit -m 'fail' -d '4 0'
57 57 hg -q tip
58 58
59 59 # precommit hook can prevent commit
60 60 echo 'precommit.forbid = echo precommit.forbid hook; exit 1' >> .hg/hgrc
61 61 hg commit -m 'fail' -d '4 0'
62 62 hg -q tip
63 63
64 64 # preupdate hook can prevent update
65 65 echo 'preupdate = echo preupdate hook: p1=$HG_PARENT1 p2=$HG_PARENT2' >> .hg/hgrc
66 66 hg update 1
67 67
68 68 # update hook
69 69 echo 'update = echo update hook: p1=$HG_PARENT1 p2=$HG_PARENT2 err=$HG_ERROR' >> .hg/hgrc
70 70 hg update
71 71
72 72 # prechangegroup hook can prevent incoming changes
73 73 cd ../b
74 74 hg -q tip
75 75 echo '[hooks]' > .hg/hgrc
76 76 echo 'prechangegroup.forbid = echo prechangegroup.forbid hook; exit 1' >> .hg/hgrc
77 77 hg pull ../a
78 78
79 79 # pretxnchangegroup hook can see incoming changes, can roll back txn,
80 80 # incoming changes no longer there after
81 81 echo '[hooks]' > .hg/hgrc
82 82 echo 'pretxnchangegroup.forbid = echo pretxnchangegroup.forbid hook: tip=`hg -q tip`; exit 1' >> .hg/hgrc
83 83 hg pull ../a
84 84 hg -q tip
85 85
86 86 # outgoing hooks can see env vars
87 87 rm .hg/hgrc
88 88 echo '[hooks]' > ../a/.hg/hgrc
89 89 echo 'preoutgoing = echo preoutgoing hook: s=$HG_SOURCE' >> ../a/.hg/hgrc
90 90 echo 'outgoing = echo outgoing hook: n=$HG_NODE s=$HG_SOURCE' >> ../a/.hg/hgrc
91 91 hg pull ../a
92 92 hg rollback
93 93
94 94 # preoutgoing hook can prevent outgoing changes
95 95 echo 'preoutgoing.forbid = echo preoutgoing.forbid hook; exit 1' >> ../a/.hg/hgrc
96 96 hg pull ../a
97 97
98 98 cat > hooktests.py <<EOF
99 99 from mercurial import util
100 100
101 101 uncallable = 0
102 102
103 103 def printargs(args):
104 104 args.pop('ui', None)
105 105 args.pop('repo', None)
106 106 a = list(args.items())
107 107 a.sort()
108 108 print 'hook args:'
109 109 for k, v in a:
110 110 print ' ', k, v
111 111
112 112 def passhook(**args):
113 113 printargs(args)
114 114
115 115 def failhook(**args):
116 116 printargs(args)
117 117 return True
118 118
119 119 class LocalException(Exception):
120 120 pass
121 121
122 122 def raisehook(**args):
123 123 raise LocalException('exception from hook')
124 124
125 125 def aborthook(**args):
126 126 raise util.Abort('raise abort from hook')
127 127
128 128 def brokenhook(**args):
129 129 return 1 + {}
130 130
131 131 class container:
132 132 unreachable = 1
133 133 EOF
134 134
135 135 echo '# test python hooks'
136 136 PYTHONPATH="`pwd`:$PYTHONPATH"
137 137 export PYTHONPATH
138 138
139 139 echo '[hooks]' > ../a/.hg/hgrc
140 140 echo 'preoutgoing.broken = python:hooktests.brokenhook' >> ../a/.hg/hgrc
141 141 hg pull ../a 2>&1 | grep 'raised an exception'
142 142
143 143 echo '[hooks]' > ../a/.hg/hgrc
144 144 echo 'preoutgoing.raise = python:hooktests.raisehook' >> ../a/.hg/hgrc
145 145 hg pull ../a 2>&1 | grep 'raised an exception'
146 146
147 147 echo '[hooks]' > ../a/.hg/hgrc
148 148 echo 'preoutgoing.abort = python:hooktests.aborthook' >> ../a/.hg/hgrc
149 149 hg pull ../a
150 150
151 151 echo '[hooks]' > ../a/.hg/hgrc
152 152 echo 'preoutgoing.fail = python:hooktests.failhook' >> ../a/.hg/hgrc
153 153 hg pull ../a
154 154
155 155 echo '[hooks]' > ../a/.hg/hgrc
156 156 echo 'preoutgoing.uncallable = python:hooktests.uncallable' >> ../a/.hg/hgrc
157 157 hg pull ../a
158 158
159 159 echo '[hooks]' > ../a/.hg/hgrc
160 160 echo 'preoutgoing.nohook = python:hooktests.nohook' >> ../a/.hg/hgrc
161 161 hg pull ../a
162 162
163 163 echo '[hooks]' > ../a/.hg/hgrc
164 164 echo 'preoutgoing.nomodule = python:nomodule' >> ../a/.hg/hgrc
165 165 hg pull ../a
166 166
167 167 echo '[hooks]' > ../a/.hg/hgrc
168 168 echo 'preoutgoing.badmodule = python:nomodule.nowhere' >> ../a/.hg/hgrc
169 169 hg pull ../a
170 170
171 171 echo '[hooks]' > ../a/.hg/hgrc
172 172 echo 'preoutgoing.unreachable = python:hooktests.container.unreachable' >> ../a/.hg/hgrc
173 173 hg pull ../a
174 174
175 175 echo '[hooks]' > ../a/.hg/hgrc
176 176 echo 'preoutgoing.pass = python:hooktests.passhook' >> ../a/.hg/hgrc
177 177 hg pull ../a
178 178
179 179 echo '# make sure --traceback works'
180 180 echo '[hooks]' > .hg/hgrc
181 181 echo 'commit.abort = python:hooktests.aborthook' >> .hg/hgrc
182 182
183 183 echo a >> a
184 184 hg --traceback commit -A -m a 2>&1 | grep '^Traceback'
185 185
186 cd ..
187 hg init c
188 cd c
189
190 cat > hookext.py <<EOF
191 def autohook(**args):
192 print "Automatically installed hook"
193
194 def reposetup(ui, repo):
195 repo.ui.setconfig("hooks", "commit.auto", autohook)
196 EOF
197 echo '[extensions]' >> .hg/hgrc
198 echo 'hookext = hookext.py' >> .hg/hgrc
199
200 touch foo
201 hg add foo
202 hg ci -m 'add foo'
203 echo >> foo
204 hg ci --debug -m 'change foo' | sed -e 's/ at .*>/>/'
205
186 206 exit 0
@@ -1,140 +1,144 b''
1 1 precommit hook: p1=0000000000000000000000000000000000000000 p2=
2 2 pretxncommit hook: n=29b62aeb769fdf78d8d9c5f28b017f76d7ef824b p1=0000000000000000000000000000000000000000 p2=
3 3 0:29b62aeb769f
4 4 commit hook: n=29b62aeb769fdf78d8d9c5f28b017f76d7ef824b p1=0000000000000000000000000000000000000000 p2=
5 5 commit hook b
6 6 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
7 7 precommit hook: p1=29b62aeb769fdf78d8d9c5f28b017f76d7ef824b p2=
8 8 pretxncommit hook: n=b702efe9688826e3a91283852b328b84dbf37bc2 p1=29b62aeb769fdf78d8d9c5f28b017f76d7ef824b p2=
9 9 1:b702efe96888
10 10 commit hook: n=b702efe9688826e3a91283852b328b84dbf37bc2 p1=29b62aeb769fdf78d8d9c5f28b017f76d7ef824b p2=
11 11 commit hook b
12 12 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
13 13 precommit hook: p1=29b62aeb769fdf78d8d9c5f28b017f76d7ef824b p2=
14 14 pretxncommit hook: n=1324a5531bac09b329c3845d35ae6a7526874edb p1=29b62aeb769fdf78d8d9c5f28b017f76d7ef824b p2=
15 15 2:1324a5531bac
16 16 commit hook: n=1324a5531bac09b329c3845d35ae6a7526874edb p1=29b62aeb769fdf78d8d9c5f28b017f76d7ef824b p2=
17 17 commit hook b
18 18 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
19 19 (branch merge, don't forget to commit)
20 20 precommit hook: p1=1324a5531bac09b329c3845d35ae6a7526874edb p2=b702efe9688826e3a91283852b328b84dbf37bc2
21 21 pretxncommit hook: n=4c52fb2e402287dd5dc052090682536c8406c321 p1=1324a5531bac09b329c3845d35ae6a7526874edb p2=b702efe9688826e3a91283852b328b84dbf37bc2
22 22 3:4c52fb2e4022
23 23 commit hook: n=4c52fb2e402287dd5dc052090682536c8406c321 p1=1324a5531bac09b329c3845d35ae6a7526874edb p2=b702efe9688826e3a91283852b328b84dbf37bc2
24 24 commit hook b
25 25 prechangegroup hook: u=file:
26 26 changegroup hook: n=b702efe9688826e3a91283852b328b84dbf37bc2 u=file:
27 27 incoming hook: n=b702efe9688826e3a91283852b328b84dbf37bc2 u=file:
28 28 incoming hook: n=1324a5531bac09b329c3845d35ae6a7526874edb u=file:
29 29 incoming hook: n=4c52fb2e402287dd5dc052090682536c8406c321 u=file:
30 30 pulling from ../a
31 31 searching for changes
32 32 adding changesets
33 33 adding manifests
34 34 adding file changes
35 35 added 3 changesets with 2 changes to 2 files
36 36 (run 'hg update' to get a working copy)
37 37 pretag hook: t=a n=4c52fb2e402287dd5dc052090682536c8406c321 l=0
38 38 precommit hook: p1=4c52fb2e402287dd5dc052090682536c8406c321 p2=
39 39 pretxncommit hook: n=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198 p1=4c52fb2e402287dd5dc052090682536c8406c321 p2=
40 40 4:8ea2ef7ad3e8
41 41 commit hook: n=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198 p1=4c52fb2e402287dd5dc052090682536c8406c321 p2=
42 42 commit hook b
43 43 tag hook: t=a n=4c52fb2e402287dd5dc052090682536c8406c321 l=0
44 44 pretag hook: t=la n=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198 l=1
45 45 tag hook: t=la n=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198 l=1
46 46 pretag hook: t=fa n=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198 l=0
47 47 pretag.forbid hook
48 48 abort: pretag.forbid hook exited with status 1
49 49 pretag hook: t=fla n=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198 l=1
50 50 pretag.forbid hook
51 51 abort: pretag.forbid hook exited with status 1
52 52 4:8ea2ef7ad3e8
53 53 precommit hook: p1=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198 p2=
54 54 pretxncommit hook: n=fad284daf8c032148abaffcd745dafeceefceb61 p1=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198 p2=
55 55 5:fad284daf8c0
56 56 pretxncommit.forbid hook: tip=5:fad284daf8c0
57 57 abort: pretxncommit.forbid hook exited with status 1
58 58 transaction abort!
59 59 rollback completed
60 60 4:8ea2ef7ad3e8
61 61 precommit hook: p1=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198 p2=
62 62 precommit.forbid hook
63 63 abort: precommit.forbid hook exited with status 1
64 64 4:8ea2ef7ad3e8
65 65 preupdate hook: p1=b702efe96888 p2=
66 66 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
67 67 preupdate hook: p1=8ea2ef7ad3e8 p2=
68 68 update hook: p1=8ea2ef7ad3e8 p2= err=0
69 69 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
70 70 3:4c52fb2e4022
71 71 prechangegroup.forbid hook
72 72 pulling from ../a
73 73 searching for changes
74 74 abort: prechangegroup.forbid hook exited with status 1
75 75 pretxnchangegroup.forbid hook: tip=4:8ea2ef7ad3e8
76 76 pulling from ../a
77 77 searching for changes
78 78 adding changesets
79 79 adding manifests
80 80 adding file changes
81 81 added 1 changesets with 1 changes to 1 files
82 82 abort: pretxnchangegroup.forbid hook exited with status 1
83 83 transaction abort!
84 84 rollback completed
85 85 3:4c52fb2e4022
86 86 preoutgoing hook: s=pull
87 87 outgoing hook: n=8ea2ef7ad3e8cac946c72f1e0c79d6aebc301198 s=pull
88 88 pulling from ../a
89 89 searching for changes
90 90 adding changesets
91 91 adding manifests
92 92 adding file changes
93 93 added 1 changesets with 1 changes to 1 files
94 94 (run 'hg update' to get a working copy)
95 95 rolling back last transaction
96 96 preoutgoing hook: s=pull
97 97 preoutgoing.forbid hook
98 98 pulling from ../a
99 99 searching for changes
100 100 abort: preoutgoing.forbid hook exited with status 1
101 101 # test python hooks
102 102 error: preoutgoing.broken hook raised an exception: unsupported operand type(s) for +: 'int' and 'dict'
103 103 error: preoutgoing.raise hook raised an exception: exception from hook
104 104 pulling from ../a
105 105 searching for changes
106 106 error: preoutgoing.abort hook failed: raise abort from hook
107 107 abort: raise abort from hook
108 108 pulling from ../a
109 109 searching for changes
110 110 hook args:
111 111 hooktype preoutgoing
112 112 source pull
113 113 abort: preoutgoing.fail hook failed
114 114 pulling from ../a
115 115 searching for changes
116 116 abort: preoutgoing.uncallable hook is invalid ("hooktests.uncallable" is not callable)
117 117 pulling from ../a
118 118 searching for changes
119 119 abort: preoutgoing.nohook hook is invalid ("hooktests.nohook" is not defined)
120 120 pulling from ../a
121 121 searching for changes
122 122 abort: preoutgoing.nomodule hook is invalid ("nomodule" not in a module)
123 123 pulling from ../a
124 124 searching for changes
125 125 abort: preoutgoing.badmodule hook is invalid (import of "nomodule" failed)
126 126 pulling from ../a
127 127 searching for changes
128 128 abort: preoutgoing.unreachable hook is invalid (import of "hooktests.container" failed)
129 129 pulling from ../a
130 130 searching for changes
131 131 hook args:
132 132 hooktype preoutgoing
133 133 source pull
134 134 adding changesets
135 135 adding manifests
136 136 adding file changes
137 137 added 1 changesets with 1 changes to 1 files
138 138 (run 'hg update' to get a working copy)
139 139 # make sure --traceback works
140 140 Traceback (most recent call last):
141 Automatically installed hook
142 foo
143 calling hook commit.auto: <function autohook>
144 Automatically installed hook
General Comments 0
You need to be logged in to leave comments. Login now