##// END OF EJS Templates
Add branch support to commit
Matt Mackall -
r3419:d0459ec1 default
parent child Browse files
Show More
@@ -1,1803 +1,1814
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import gettext as _
10 10 from demandload import *
11 11 import repo
12 12 demandload(globals(), "appendfile changegroup")
13 13 demandload(globals(), "changelog dirstate filelog manifest context")
14 14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 15 demandload(globals(), "os revlog time util")
16 16
17 17 class localrepository(repo.repository):
18 18 capabilities = ()
19 19
20 20 def __del__(self):
21 21 self.transhandle = None
22 22 def __init__(self, parentui, path=None, create=0):
23 23 repo.repository.__init__(self)
24 24 if not path:
25 25 p = os.getcwd()
26 26 while not os.path.isdir(os.path.join(p, ".hg")):
27 27 oldp = p
28 28 p = os.path.dirname(p)
29 29 if p == oldp:
30 30 raise repo.RepoError(_("There is no Mercurial repository"
31 31 " here (.hg not found)"))
32 32 path = p
33 33 self.path = os.path.join(path, ".hg")
34 34
35 35 if not os.path.isdir(self.path):
36 36 if create:
37 37 if not os.path.exists(path):
38 38 os.mkdir(path)
39 39 os.mkdir(self.path)
40 40 os.mkdir(self.join("data"))
41 41 else:
42 42 raise repo.RepoError(_("repository %s not found") % path)
43 43 elif create:
44 44 raise repo.RepoError(_("repository %s already exists") % path)
45 45
46 46 self.root = os.path.abspath(path)
47 47 self.origroot = path
48 48 self.ui = ui.ui(parentui=parentui)
49 49 self.opener = util.opener(self.path)
50 50 self.wopener = util.opener(self.root)
51 51
52 52 try:
53 53 self.ui.readconfig(self.join("hgrc"), self.root)
54 54 except IOError:
55 55 pass
56 56
57 57 v = self.ui.configrevlog()
58 58 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
59 59 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
60 60 fl = v.get('flags', None)
61 61 flags = 0
62 62 if fl != None:
63 63 for x in fl.split():
64 64 flags |= revlog.flagstr(x)
65 65 elif self.revlogv1:
66 66 flags = revlog.REVLOG_DEFAULT_FLAGS
67 67
68 68 v = self.revlogversion | flags
69 69 self.manifest = manifest.manifest(self.opener, v)
70 70 self.changelog = changelog.changelog(self.opener, v)
71 71
72 72 # the changelog might not have the inline index flag
73 73 # on. If the format of the changelog is the same as found in
74 74 # .hgrc, apply any flags found in the .hgrc as well.
75 75 # Otherwise, just version from the changelog
76 76 v = self.changelog.version
77 77 if v == self.revlogversion:
78 78 v |= flags
79 79 self.revlogversion = v
80 80
81 81 self.tagscache = None
82 82 self.branchcache = None
83 83 self.nodetagscache = None
84 84 self.encodepats = None
85 85 self.decodepats = None
86 86 self.transhandle = None
87 87
88 88 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
89 89
90 90 def url(self):
91 91 return 'file:' + self.root
92 92
93 93 def hook(self, name, throw=False, **args):
94 94 def callhook(hname, funcname):
95 95 '''call python hook. hook is callable object, looked up as
96 96 name in python module. if callable returns "true", hook
97 97 fails, else passes. if hook raises exception, treated as
98 98 hook failure. exception propagates if throw is "true".
99 99
100 100 reason for "true" meaning "hook failed" is so that
101 101 unmodified commands (e.g. mercurial.commands.update) can
102 102 be run as hooks without wrappers to convert return values.'''
103 103
104 104 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
105 105 d = funcname.rfind('.')
106 106 if d == -1:
107 107 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
108 108 % (hname, funcname))
109 109 modname = funcname[:d]
110 110 try:
111 111 obj = __import__(modname)
112 112 except ImportError:
113 113 try:
114 114 # extensions are loaded with hgext_ prefix
115 115 obj = __import__("hgext_%s" % modname)
116 116 except ImportError:
117 117 raise util.Abort(_('%s hook is invalid '
118 118 '(import of "%s" failed)') %
119 119 (hname, modname))
120 120 try:
121 121 for p in funcname.split('.')[1:]:
122 122 obj = getattr(obj, p)
123 123 except AttributeError, err:
124 124 raise util.Abort(_('%s hook is invalid '
125 125 '("%s" is not defined)') %
126 126 (hname, funcname))
127 127 if not callable(obj):
128 128 raise util.Abort(_('%s hook is invalid '
129 129 '("%s" is not callable)') %
130 130 (hname, funcname))
131 131 try:
132 132 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
133 133 except (KeyboardInterrupt, util.SignalInterrupt):
134 134 raise
135 135 except Exception, exc:
136 136 if isinstance(exc, util.Abort):
137 137 self.ui.warn(_('error: %s hook failed: %s\n') %
138 138 (hname, exc.args[0]))
139 139 else:
140 140 self.ui.warn(_('error: %s hook raised an exception: '
141 141 '%s\n') % (hname, exc))
142 142 if throw:
143 143 raise
144 144 self.ui.print_exc()
145 145 return True
146 146 if r:
147 147 if throw:
148 148 raise util.Abort(_('%s hook failed') % hname)
149 149 self.ui.warn(_('warning: %s hook failed\n') % hname)
150 150 return r
151 151
152 152 def runhook(name, cmd):
153 153 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
154 154 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
155 155 r = util.system(cmd, environ=env, cwd=self.root)
156 156 if r:
157 157 desc, r = util.explain_exit(r)
158 158 if throw:
159 159 raise util.Abort(_('%s hook %s') % (name, desc))
160 160 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
161 161 return r
162 162
163 163 r = False
164 164 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
165 165 if hname.split(".", 1)[0] == name and cmd]
166 166 hooks.sort()
167 167 for hname, cmd in hooks:
168 168 if cmd.startswith('python:'):
169 169 r = callhook(hname, cmd[7:].strip()) or r
170 170 else:
171 171 r = runhook(hname, cmd) or r
172 172 return r
173 173
174 174 tag_disallowed = ':\r\n'
175 175
176 176 def tag(self, name, node, message, local, user, date):
177 177 '''tag a revision with a symbolic name.
178 178
179 179 if local is True, the tag is stored in a per-repository file.
180 180 otherwise, it is stored in the .hgtags file, and a new
181 181 changeset is committed with the change.
182 182
183 183 keyword arguments:
184 184
185 185 local: whether to store tag in non-version-controlled file
186 186 (default False)
187 187
188 188 message: commit message to use if committing
189 189
190 190 user: name of user to use if committing
191 191
192 192 date: date tuple to use if committing'''
193 193
194 194 for c in self.tag_disallowed:
195 195 if c in name:
196 196 raise util.Abort(_('%r cannot be used in a tag name') % c)
197 197
198 198 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
199 199
200 200 if local:
201 201 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
202 202 self.hook('tag', node=hex(node), tag=name, local=local)
203 203 return
204 204
205 205 for x in self.status()[:5]:
206 206 if '.hgtags' in x:
207 207 raise util.Abort(_('working copy of .hgtags is changed '
208 208 '(please commit .hgtags manually)'))
209 209
210 210 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
211 211 if self.dirstate.state('.hgtags') == '?':
212 212 self.add(['.hgtags'])
213 213
214 214 self.commit(['.hgtags'], message, user, date)
215 215 self.hook('tag', node=hex(node), tag=name, local=local)
216 216
217 217 def tags(self):
218 218 '''return a mapping of tag to node'''
219 219 if not self.tagscache:
220 220 self.tagscache = {}
221 221
222 222 def parsetag(line, context):
223 223 if not line:
224 224 return
225 225 s = l.split(" ", 1)
226 226 if len(s) != 2:
227 227 self.ui.warn(_("%s: cannot parse entry\n") % context)
228 228 return
229 229 node, key = s
230 230 key = key.strip()
231 231 try:
232 232 bin_n = bin(node)
233 233 except TypeError:
234 234 self.ui.warn(_("%s: node '%s' is not well formed\n") %
235 235 (context, node))
236 236 return
237 237 if bin_n not in self.changelog.nodemap:
238 238 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
239 239 (context, key))
240 240 return
241 241 self.tagscache[key] = bin_n
242 242
243 243 # read the tags file from each head, ending with the tip,
244 244 # and add each tag found to the map, with "newer" ones
245 245 # taking precedence
246 246 heads = self.heads()
247 247 heads.reverse()
248 248 fl = self.file(".hgtags")
249 249 for node in heads:
250 250 change = self.changelog.read(node)
251 251 rev = self.changelog.rev(node)
252 252 fn, ff = self.manifest.find(change[0], '.hgtags')
253 253 if fn is None: continue
254 254 count = 0
255 255 for l in fl.read(fn).splitlines():
256 256 count += 1
257 257 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
258 258 (rev, short(node), count))
259 259 try:
260 260 f = self.opener("localtags")
261 261 count = 0
262 262 for l in f:
263 263 count += 1
264 264 parsetag(l, _("localtags, line %d") % count)
265 265 except IOError:
266 266 pass
267 267
268 268 self.tagscache['tip'] = self.changelog.tip()
269 269
270 270 return self.tagscache
271 271
272 272 def tagslist(self):
273 273 '''return a list of tags ordered by revision'''
274 274 l = []
275 275 for t, n in self.tags().items():
276 276 try:
277 277 r = self.changelog.rev(n)
278 278 except:
279 279 r = -2 # sort to the beginning of the list if unknown
280 280 l.append((r, t, n))
281 281 l.sort()
282 282 return [(t, n) for r, t, n in l]
283 283
284 284 def nodetags(self, node):
285 285 '''return the tags associated with a node'''
286 286 if not self.nodetagscache:
287 287 self.nodetagscache = {}
288 288 for t, n in self.tags().items():
289 289 self.nodetagscache.setdefault(n, []).append(t)
290 290 return self.nodetagscache.get(node, [])
291 291
292 292 def branchtags(self):
293 293 if self.branchcache != None:
294 294 return self.branchcache
295 295
296 296 self.branchcache = {}
297 297
298 298 try:
299 299 f = self.opener("branches.cache")
300 300 last, lrev = f.readline().rstrip().split(" ", 1)
301 301 last, lrev = bin(last), int(lrev)
302 302 if self.changelog.node(lrev) == last: # sanity check
303 303 for l in f:
304 304 node, label = l.rstrip().split(" ", 1)
305 305 self.branchcache[label] = bin(node)
306 306 f.close()
307 307 except IOError:
308 308 last, lrev = nullid, -1
309 309 lrev = self.changelog.rev(last)
310 310
311 311 tip = self.changelog.count() - 1
312 312 if lrev != tip:
313 313 for r in range(lrev + 1, tip + 1):
314 314 n = self.changelog.node(r)
315 315 c = self.changelog.read(n)
316 316 b = c[5].get("branch")
317 317 if b:
318 318 self.branchcache[b] = n
319 319 self._writebranchcache()
320 320
321 321 return self.branchcache
322 322
323 323 def _writebranchcache(self):
324 324 f = self.opener("branches.cache", "w")
325 325 t = self.changelog.tip()
326 326 f.write("%s %s\n" % (hex(t), self.changelog.count() - 1))
327 327 for label, node in self.branchcache.iteritems():
328 328 f.write("%s %s\n" % (hex(node), label))
329 329
330 330 def lookup(self, key):
331 331 if key == '.':
332 332 key = self.dirstate.parents()[0]
333 333 if key == nullid:
334 334 raise repo.RepoError(_("no revision checked out"))
335 335 if key in self.tags():
336 336 return self.tags()[key]
337 337 if key in self.branchtags():
338 338 return self.branchtags()[key]
339 339 try:
340 340 return self.changelog.lookup(key)
341 341 except:
342 342 raise repo.RepoError(_("unknown revision '%s'") % key)
343 343
344 344 def dev(self):
345 345 return os.lstat(self.path).st_dev
346 346
347 347 def local(self):
348 348 return True
349 349
350 350 def join(self, f):
351 351 return os.path.join(self.path, f)
352 352
353 353 def wjoin(self, f):
354 354 return os.path.join(self.root, f)
355 355
356 356 def file(self, f):
357 357 if f[0] == '/':
358 358 f = f[1:]
359 359 return filelog.filelog(self.opener, f, self.revlogversion)
360 360
361 361 def changectx(self, changeid=None):
362 362 return context.changectx(self, changeid)
363 363
364 364 def workingctx(self):
365 365 return context.workingctx(self)
366 366
367 367 def parents(self, changeid=None):
368 368 '''
369 369 get list of changectxs for parents of changeid or working directory
370 370 '''
371 371 if changeid is None:
372 372 pl = self.dirstate.parents()
373 373 else:
374 374 n = self.changelog.lookup(changeid)
375 375 pl = self.changelog.parents(n)
376 376 if pl[1] == nullid:
377 377 return [self.changectx(pl[0])]
378 378 return [self.changectx(pl[0]), self.changectx(pl[1])]
379 379
380 380 def filectx(self, path, changeid=None, fileid=None):
381 381 """changeid can be a changeset revision, node, or tag.
382 382 fileid can be a file revision or node."""
383 383 return context.filectx(self, path, changeid, fileid)
384 384
385 385 def getcwd(self):
386 386 return self.dirstate.getcwd()
387 387
388 388 def wfile(self, f, mode='r'):
389 389 return self.wopener(f, mode)
390 390
391 391 def wread(self, filename):
392 392 if self.encodepats == None:
393 393 l = []
394 394 for pat, cmd in self.ui.configitems("encode"):
395 395 mf = util.matcher(self.root, "", [pat], [], [])[1]
396 396 l.append((mf, cmd))
397 397 self.encodepats = l
398 398
399 399 data = self.wopener(filename, 'r').read()
400 400
401 401 for mf, cmd in self.encodepats:
402 402 if mf(filename):
403 403 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
404 404 data = util.filter(data, cmd)
405 405 break
406 406
407 407 return data
408 408
409 409 def wwrite(self, filename, data, fd=None):
410 410 if self.decodepats == None:
411 411 l = []
412 412 for pat, cmd in self.ui.configitems("decode"):
413 413 mf = util.matcher(self.root, "", [pat], [], [])[1]
414 414 l.append((mf, cmd))
415 415 self.decodepats = l
416 416
417 417 for mf, cmd in self.decodepats:
418 418 if mf(filename):
419 419 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
420 420 data = util.filter(data, cmd)
421 421 break
422 422
423 423 if fd:
424 424 return fd.write(data)
425 425 return self.wopener(filename, 'w').write(data)
426 426
427 427 def transaction(self):
428 428 tr = self.transhandle
429 429 if tr != None and tr.running():
430 430 return tr.nest()
431 431
432 432 # save dirstate for rollback
433 433 try:
434 434 ds = self.opener("dirstate").read()
435 435 except IOError:
436 436 ds = ""
437 437 self.opener("journal.dirstate", "w").write(ds)
438 438
439 439 tr = transaction.transaction(self.ui.warn, self.opener,
440 440 self.join("journal"),
441 441 aftertrans(self.path))
442 442 self.transhandle = tr
443 443 return tr
444 444
445 445 def recover(self):
446 446 l = self.lock()
447 447 if os.path.exists(self.join("journal")):
448 448 self.ui.status(_("rolling back interrupted transaction\n"))
449 449 transaction.rollback(self.opener, self.join("journal"))
450 450 self.reload()
451 451 return True
452 452 else:
453 453 self.ui.warn(_("no interrupted transaction available\n"))
454 454 return False
455 455
456 456 def rollback(self, wlock=None):
457 457 if not wlock:
458 458 wlock = self.wlock()
459 459 l = self.lock()
460 460 if os.path.exists(self.join("undo")):
461 461 self.ui.status(_("rolling back last transaction\n"))
462 462 transaction.rollback(self.opener, self.join("undo"))
463 463 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
464 464 self.reload()
465 465 self.wreload()
466 466 else:
467 467 self.ui.warn(_("no rollback information available\n"))
468 468
469 469 def wreload(self):
470 470 self.dirstate.read()
471 471
472 472 def reload(self):
473 473 self.changelog.load()
474 474 self.manifest.load()
475 475 self.tagscache = None
476 476 self.nodetagscache = None
477 477
478 478 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
479 479 desc=None):
480 480 try:
481 481 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
482 482 except lock.LockHeld, inst:
483 483 if not wait:
484 484 raise
485 485 self.ui.warn(_("waiting for lock on %s held by %s\n") %
486 486 (desc, inst.args[0]))
487 487 # default to 600 seconds timeout
488 488 l = lock.lock(self.join(lockname),
489 489 int(self.ui.config("ui", "timeout") or 600),
490 490 releasefn, desc=desc)
491 491 if acquirefn:
492 492 acquirefn()
493 493 return l
494 494
495 495 def lock(self, wait=1):
496 496 return self.do_lock("lock", wait, acquirefn=self.reload,
497 497 desc=_('repository %s') % self.origroot)
498 498
499 499 def wlock(self, wait=1):
500 500 return self.do_lock("wlock", wait, self.dirstate.write,
501 501 self.wreload,
502 502 desc=_('working directory of %s') % self.origroot)
503 503
504 504 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
505 505 """
506 506 commit an individual file as part of a larger transaction
507 507 """
508 508
509 509 t = self.wread(fn)
510 510 fl = self.file(fn)
511 511 fp1 = manifest1.get(fn, nullid)
512 512 fp2 = manifest2.get(fn, nullid)
513 513
514 514 meta = {}
515 515 cp = self.dirstate.copied(fn)
516 516 if cp:
517 517 meta["copy"] = cp
518 518 if not manifest2: # not a branch merge
519 519 meta["copyrev"] = hex(manifest1.get(cp, nullid))
520 520 fp2 = nullid
521 521 elif fp2 != nullid: # copied on remote side
522 522 meta["copyrev"] = hex(manifest1.get(cp, nullid))
523 523 else: # copied on local side, reversed
524 524 meta["copyrev"] = hex(manifest2.get(cp))
525 525 fp2 = nullid
526 526 self.ui.debug(_(" %s: copy %s:%s\n") %
527 527 (fn, cp, meta["copyrev"]))
528 528 fp1 = nullid
529 529 elif fp2 != nullid:
530 530 # is one parent an ancestor of the other?
531 531 fpa = fl.ancestor(fp1, fp2)
532 532 if fpa == fp1:
533 533 fp1, fp2 = fp2, nullid
534 534 elif fpa == fp2:
535 535 fp2 = nullid
536 536
537 537 # is the file unmodified from the parent? report existing entry
538 538 if fp2 == nullid and not fl.cmp(fp1, t):
539 539 return fp1
540 540
541 541 changelist.append(fn)
542 542 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
543 543
544 544 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
545 545 orig_parent = self.dirstate.parents()[0] or nullid
546 546 p1 = p1 or self.dirstate.parents()[0] or nullid
547 547 p2 = p2 or self.dirstate.parents()[1] or nullid
548 548 c1 = self.changelog.read(p1)
549 549 c2 = self.changelog.read(p2)
550 550 m1 = self.manifest.read(c1[0]).copy()
551 551 m2 = self.manifest.read(c2[0])
552 552 changed = []
553 553 removed = []
554 554
555 555 if orig_parent == p1:
556 556 update_dirstate = 1
557 557 else:
558 558 update_dirstate = 0
559 559
560 560 if not wlock:
561 561 wlock = self.wlock()
562 562 l = self.lock()
563 563 tr = self.transaction()
564 564 linkrev = self.changelog.count()
565 565 for f in files:
566 566 try:
567 567 m1[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
568 568 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
569 569 except IOError:
570 570 try:
571 571 del m1[f]
572 572 if update_dirstate:
573 573 self.dirstate.forget([f])
574 574 removed.append(f)
575 575 except:
576 576 # deleted from p2?
577 577 pass
578 578
579 579 mnode = self.manifest.add(m1, tr, linkrev, c1[0], c2[0])
580 580 user = user or self.ui.username()
581 581 n = self.changelog.add(mnode, changed + removed, text,
582 582 tr, p1, p2, user, date)
583 583 tr.close()
584 584 if update_dirstate:
585 585 self.dirstate.setparents(n, nullid)
586 586
587 587 def commit(self, files=None, text="", user=None, date=None,
588 588 match=util.always, force=False, lock=None, wlock=None,
589 589 force_editor=False):
590 590 commit = []
591 591 remove = []
592 592 changed = []
593 593
594 594 if files:
595 595 for f in files:
596 596 s = self.dirstate.state(f)
597 597 if s in 'nmai':
598 598 commit.append(f)
599 599 elif s == 'r':
600 600 remove.append(f)
601 601 else:
602 602 self.ui.warn(_("%s not tracked!\n") % f)
603 603 else:
604 604 modified, added, removed, deleted, unknown = self.status(match=match)[:5]
605 605 commit = modified + added
606 606 remove = removed
607 607
608 608 p1, p2 = self.dirstate.parents()
609 609 c1 = self.changelog.read(p1)
610 610 c2 = self.changelog.read(p2)
611 611 m1 = self.manifest.read(c1[0]).copy()
612 612 m2 = self.manifest.read(c2[0])
613 613
614 if not commit and not remove and not force and p2 == nullid:
614 try:
615 branchname = self.opener("branch").read().rstrip()
616 except IOError:
617 branchname = ""
618 oldname = c1[5].get("branch", "")
619
620 if not commit and not remove and not force and p2 == nullid and \
621 branchname == oldname:
615 622 self.ui.status(_("nothing changed\n"))
616 623 return None
617 624
618 625 xp1 = hex(p1)
619 626 if p2 == nullid: xp2 = ''
620 627 else: xp2 = hex(p2)
621 628
622 629 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
623 630
624 631 if not wlock:
625 632 wlock = self.wlock()
626 633 if not lock:
627 634 lock = self.lock()
628 635 tr = self.transaction()
629 636
630 637 # check in files
631 638 new = {}
632 639 linkrev = self.changelog.count()
633 640 commit.sort()
634 641 for f in commit:
635 642 self.ui.note(f + "\n")
636 643 try:
637 644 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
638 645 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
639 646 except IOError:
640 647 self.ui.warn(_("trouble committing %s!\n") % f)
641 648 raise
642 649
643 650 # update manifest
644 651 m1.update(new)
645 652 for f in remove:
646 653 if f in m1:
647 654 del m1[f]
648 655 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
649 656
650 657 # add changeset
651 658 new = new.keys()
652 659 new.sort()
653 660
654 661 user = user or self.ui.username()
655 662 if not text or force_editor:
656 663 edittext = []
657 664 if text:
658 665 edittext.append(text)
659 666 edittext.append("")
660 667 if p2 != nullid:
661 668 edittext.append("HG: branch merge")
662 669 edittext.extend(["HG: changed %s" % f for f in changed])
663 670 edittext.extend(["HG: removed %s" % f for f in remove])
664 671 if not changed and not remove:
665 672 edittext.append("HG: no files changed")
666 673 edittext.append("")
667 674 # run editor in the repository root
668 675 olddir = os.getcwd()
669 676 os.chdir(self.root)
670 677 text = self.ui.edit("\n".join(edittext), user)
671 678 os.chdir(olddir)
672 679
673 680 lines = [line.rstrip() for line in text.rstrip().splitlines()]
674 681 while lines and not lines[0]:
675 682 del lines[0]
676 683 if not lines:
677 684 return None
678 685 text = '\n'.join(lines)
679 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
686 extra = {}
687 if branchname:
688 extra["branch"] = branchname
689 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
690 user, date, extra)
680 691 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
681 692 parent2=xp2)
682 693 tr.close()
683 694
684 695 self.dirstate.setparents(n)
685 696 self.dirstate.update(new, "n")
686 697 self.dirstate.forget(remove)
687 698
688 699 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
689 700 return n
690 701
691 702 def walk(self, node=None, files=[], match=util.always, badmatch=None):
692 703 if node:
693 704 fdict = dict.fromkeys(files)
694 705 for fn in self.manifest.read(self.changelog.read(node)[0]):
695 706 for ffn in fdict:
696 707 # match if the file is the exact name or a directory
697 708 if ffn == fn or fn.startswith("%s/" % ffn):
698 709 del fdict[ffn]
699 710 break
700 711 if match(fn):
701 712 yield 'm', fn
702 713 for fn in fdict:
703 714 if badmatch and badmatch(fn):
704 715 if match(fn):
705 716 yield 'b', fn
706 717 else:
707 718 self.ui.warn(_('%s: No such file in rev %s\n') % (
708 719 util.pathto(self.getcwd(), fn), short(node)))
709 720 else:
710 721 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
711 722 yield src, fn
712 723
713 724 def status(self, node1=None, node2=None, files=[], match=util.always,
714 725 wlock=None, list_ignored=False, list_clean=False):
715 726 """return status of files between two nodes or node and working directory
716 727
717 728 If node1 is None, use the first dirstate parent instead.
718 729 If node2 is None, compare node1 with working directory.
719 730 """
720 731
721 732 def fcmp(fn, mf):
722 733 t1 = self.wread(fn)
723 734 return self.file(fn).cmp(mf.get(fn, nullid), t1)
724 735
725 736 def mfmatches(node):
726 737 change = self.changelog.read(node)
727 738 mf = self.manifest.read(change[0]).copy()
728 739 for fn in mf.keys():
729 740 if not match(fn):
730 741 del mf[fn]
731 742 return mf
732 743
733 744 modified, added, removed, deleted, unknown = [], [], [], [], []
734 745 ignored, clean = [], []
735 746
736 747 compareworking = False
737 748 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
738 749 compareworking = True
739 750
740 751 if not compareworking:
741 752 # read the manifest from node1 before the manifest from node2,
742 753 # so that we'll hit the manifest cache if we're going through
743 754 # all the revisions in parent->child order.
744 755 mf1 = mfmatches(node1)
745 756
746 757 # are we comparing the working directory?
747 758 if not node2:
748 759 if not wlock:
749 760 try:
750 761 wlock = self.wlock(wait=0)
751 762 except lock.LockException:
752 763 wlock = None
753 764 (lookup, modified, added, removed, deleted, unknown,
754 765 ignored, clean) = self.dirstate.status(files, match,
755 766 list_ignored, list_clean)
756 767
757 768 # are we comparing working dir against its parent?
758 769 if compareworking:
759 770 if lookup:
760 771 # do a full compare of any files that might have changed
761 772 mf2 = mfmatches(self.dirstate.parents()[0])
762 773 for f in lookup:
763 774 if fcmp(f, mf2):
764 775 modified.append(f)
765 776 else:
766 777 clean.append(f)
767 778 if wlock is not None:
768 779 self.dirstate.update([f], "n")
769 780 else:
770 781 # we are comparing working dir against non-parent
771 782 # generate a pseudo-manifest for the working dir
772 783 # XXX: create it in dirstate.py ?
773 784 mf2 = mfmatches(self.dirstate.parents()[0])
774 785 for f in lookup + modified + added:
775 786 mf2[f] = ""
776 787 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
777 788 for f in removed:
778 789 if f in mf2:
779 790 del mf2[f]
780 791 else:
781 792 # we are comparing two revisions
782 793 mf2 = mfmatches(node2)
783 794
784 795 if not compareworking:
785 796 # flush lists from dirstate before comparing manifests
786 797 modified, added, clean = [], [], []
787 798
788 799 # make sure to sort the files so we talk to the disk in a
789 800 # reasonable order
790 801 mf2keys = mf2.keys()
791 802 mf2keys.sort()
792 803 for fn in mf2keys:
793 804 if mf1.has_key(fn):
794 805 if mf1.flags(fn) != mf2.flags(fn) or \
795 806 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
796 807 modified.append(fn)
797 808 elif list_clean:
798 809 clean.append(fn)
799 810 del mf1[fn]
800 811 else:
801 812 added.append(fn)
802 813
803 814 removed = mf1.keys()
804 815
805 816 # sort and return results:
806 817 for l in modified, added, removed, deleted, unknown, ignored, clean:
807 818 l.sort()
808 819 return (modified, added, removed, deleted, unknown, ignored, clean)
809 820
810 821 def add(self, list, wlock=None):
811 822 if not wlock:
812 823 wlock = self.wlock()
813 824 for f in list:
814 825 p = self.wjoin(f)
815 826 if not os.path.exists(p):
816 827 self.ui.warn(_("%s does not exist!\n") % f)
817 828 elif not os.path.isfile(p):
818 829 self.ui.warn(_("%s not added: only files supported currently\n")
819 830 % f)
820 831 elif self.dirstate.state(f) in 'an':
821 832 self.ui.warn(_("%s already tracked!\n") % f)
822 833 else:
823 834 self.dirstate.update([f], "a")
824 835
825 836 def forget(self, list, wlock=None):
826 837 if not wlock:
827 838 wlock = self.wlock()
828 839 for f in list:
829 840 if self.dirstate.state(f) not in 'ai':
830 841 self.ui.warn(_("%s not added!\n") % f)
831 842 else:
832 843 self.dirstate.forget([f])
833 844
834 845 def remove(self, list, unlink=False, wlock=None):
835 846 if unlink:
836 847 for f in list:
837 848 try:
838 849 util.unlink(self.wjoin(f))
839 850 except OSError, inst:
840 851 if inst.errno != errno.ENOENT:
841 852 raise
842 853 if not wlock:
843 854 wlock = self.wlock()
844 855 for f in list:
845 856 p = self.wjoin(f)
846 857 if os.path.exists(p):
847 858 self.ui.warn(_("%s still exists!\n") % f)
848 859 elif self.dirstate.state(f) == 'a':
849 860 self.dirstate.forget([f])
850 861 elif f not in self.dirstate:
851 862 self.ui.warn(_("%s not tracked!\n") % f)
852 863 else:
853 864 self.dirstate.update([f], "r")
854 865
855 866 def undelete(self, list, wlock=None):
856 867 p = self.dirstate.parents()[0]
857 868 mn = self.changelog.read(p)[0]
858 869 m = self.manifest.read(mn)
859 870 if not wlock:
860 871 wlock = self.wlock()
861 872 for f in list:
862 873 if self.dirstate.state(f) not in "r":
863 874 self.ui.warn("%s not removed!\n" % f)
864 875 else:
865 876 t = self.file(f).read(m[f])
866 877 self.wwrite(f, t)
867 878 util.set_exec(self.wjoin(f), m.execf(f))
868 879 self.dirstate.update([f], "n")
869 880
870 881 def copy(self, source, dest, wlock=None):
871 882 p = self.wjoin(dest)
872 883 if not os.path.exists(p):
873 884 self.ui.warn(_("%s does not exist!\n") % dest)
874 885 elif not os.path.isfile(p):
875 886 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
876 887 else:
877 888 if not wlock:
878 889 wlock = self.wlock()
879 890 if self.dirstate.state(dest) == '?':
880 891 self.dirstate.update([dest], "a")
881 892 self.dirstate.copy(source, dest)
882 893
883 894 def heads(self, start=None):
884 895 heads = self.changelog.heads(start)
885 896 # sort the output in rev descending order
886 897 heads = [(-self.changelog.rev(h), h) for h in heads]
887 898 heads.sort()
888 899 return [n for (r, n) in heads]
889 900
890 901 # branchlookup returns a dict giving a list of branches for
891 902 # each head. A branch is defined as the tag of a node or
892 903 # the branch of the node's parents. If a node has multiple
893 904 # branch tags, tags are eliminated if they are visible from other
894 905 # branch tags.
895 906 #
896 907 # So, for this graph: a->b->c->d->e
897 908 # \ /
898 909 # aa -----/
899 910 # a has tag 2.6.12
900 911 # d has tag 2.6.13
901 912 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
902 913 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
903 914 # from the list.
904 915 #
905 916 # It is possible that more than one head will have the same branch tag.
906 917 # callers need to check the result for multiple heads under the same
907 918 # branch tag if that is a problem for them (ie checkout of a specific
908 919 # branch).
909 920 #
910 921 # passing in a specific branch will limit the depth of the search
911 922 # through the parents. It won't limit the branches returned in the
912 923 # result though.
913 924 def branchlookup(self, heads=None, branch=None):
914 925 if not heads:
915 926 heads = self.heads()
916 927 headt = [ h for h in heads ]
917 928 chlog = self.changelog
918 929 branches = {}
919 930 merges = []
920 931 seenmerge = {}
921 932
922 933 # traverse the tree once for each head, recording in the branches
923 934 # dict which tags are visible from this head. The branches
924 935 # dict also records which tags are visible from each tag
925 936 # while we traverse.
926 937 while headt or merges:
927 938 if merges:
928 939 n, found = merges.pop()
929 940 visit = [n]
930 941 else:
931 942 h = headt.pop()
932 943 visit = [h]
933 944 found = [h]
934 945 seen = {}
935 946 while visit:
936 947 n = visit.pop()
937 948 if n in seen:
938 949 continue
939 950 pp = chlog.parents(n)
940 951 tags = self.nodetags(n)
941 952 if tags:
942 953 for x in tags:
943 954 if x == 'tip':
944 955 continue
945 956 for f in found:
946 957 branches.setdefault(f, {})[n] = 1
947 958 branches.setdefault(n, {})[n] = 1
948 959 break
949 960 if n not in found:
950 961 found.append(n)
951 962 if branch in tags:
952 963 continue
953 964 seen[n] = 1
954 965 if pp[1] != nullid and n not in seenmerge:
955 966 merges.append((pp[1], [x for x in found]))
956 967 seenmerge[n] = 1
957 968 if pp[0] != nullid:
958 969 visit.append(pp[0])
959 970 # traverse the branches dict, eliminating branch tags from each
960 971 # head that are visible from another branch tag for that head.
961 972 out = {}
962 973 viscache = {}
963 974 for h in heads:
964 975 def visible(node):
965 976 if node in viscache:
966 977 return viscache[node]
967 978 ret = {}
968 979 visit = [node]
969 980 while visit:
970 981 x = visit.pop()
971 982 if x in viscache:
972 983 ret.update(viscache[x])
973 984 elif x not in ret:
974 985 ret[x] = 1
975 986 if x in branches:
976 987 visit[len(visit):] = branches[x].keys()
977 988 viscache[node] = ret
978 989 return ret
979 990 if h not in branches:
980 991 continue
981 992 # O(n^2), but somewhat limited. This only searches the
982 993 # tags visible from a specific head, not all the tags in the
983 994 # whole repo.
984 995 for b in branches[h]:
985 996 vis = False
986 997 for bb in branches[h].keys():
987 998 if b != bb:
988 999 if b in visible(bb):
989 1000 vis = True
990 1001 break
991 1002 if not vis:
992 1003 l = out.setdefault(h, [])
993 1004 l[len(l):] = self.nodetags(b)
994 1005 return out
995 1006
996 1007 def branches(self, nodes):
997 1008 if not nodes:
998 1009 nodes = [self.changelog.tip()]
999 1010 b = []
1000 1011 for n in nodes:
1001 1012 t = n
1002 1013 while 1:
1003 1014 p = self.changelog.parents(n)
1004 1015 if p[1] != nullid or p[0] == nullid:
1005 1016 b.append((t, n, p[0], p[1]))
1006 1017 break
1007 1018 n = p[0]
1008 1019 return b
1009 1020
1010 1021 def between(self, pairs):
1011 1022 r = []
1012 1023
1013 1024 for top, bottom in pairs:
1014 1025 n, l, i = top, [], 0
1015 1026 f = 1
1016 1027
1017 1028 while n != bottom:
1018 1029 p = self.changelog.parents(n)[0]
1019 1030 if i == f:
1020 1031 l.append(n)
1021 1032 f = f * 2
1022 1033 n = p
1023 1034 i += 1
1024 1035
1025 1036 r.append(l)
1026 1037
1027 1038 return r
1028 1039
1029 1040 def findincoming(self, remote, base=None, heads=None, force=False):
1030 1041 """Return list of roots of the subsets of missing nodes from remote
1031 1042
1032 1043 If base dict is specified, assume that these nodes and their parents
1033 1044 exist on the remote side and that no child of a node of base exists
1034 1045 in both remote and self.
1035 1046 Furthermore base will be updated to include the nodes that exists
1036 1047 in self and remote but no children exists in self and remote.
1037 1048 If a list of heads is specified, return only nodes which are heads
1038 1049 or ancestors of these heads.
1039 1050
1040 1051 All the ancestors of base are in self and in remote.
1041 1052 All the descendants of the list returned are missing in self.
1042 1053 (and so we know that the rest of the nodes are missing in remote, see
1043 1054 outgoing)
1044 1055 """
1045 1056 m = self.changelog.nodemap
1046 1057 search = []
1047 1058 fetch = {}
1048 1059 seen = {}
1049 1060 seenbranch = {}
1050 1061 if base == None:
1051 1062 base = {}
1052 1063
1053 1064 if not heads:
1054 1065 heads = remote.heads()
1055 1066
1056 1067 if self.changelog.tip() == nullid:
1057 1068 base[nullid] = 1
1058 1069 if heads != [nullid]:
1059 1070 return [nullid]
1060 1071 return []
1061 1072
1062 1073 # assume we're closer to the tip than the root
1063 1074 # and start by examining the heads
1064 1075 self.ui.status(_("searching for changes\n"))
1065 1076
1066 1077 unknown = []
1067 1078 for h in heads:
1068 1079 if h not in m:
1069 1080 unknown.append(h)
1070 1081 else:
1071 1082 base[h] = 1
1072 1083
1073 1084 if not unknown:
1074 1085 return []
1075 1086
1076 1087 req = dict.fromkeys(unknown)
1077 1088 reqcnt = 0
1078 1089
1079 1090 # search through remote branches
1080 1091 # a 'branch' here is a linear segment of history, with four parts:
1081 1092 # head, root, first parent, second parent
1082 1093 # (a branch always has two parents (or none) by definition)
1083 1094 unknown = remote.branches(unknown)
1084 1095 while unknown:
1085 1096 r = []
1086 1097 while unknown:
1087 1098 n = unknown.pop(0)
1088 1099 if n[0] in seen:
1089 1100 continue
1090 1101
1091 1102 self.ui.debug(_("examining %s:%s\n")
1092 1103 % (short(n[0]), short(n[1])))
1093 1104 if n[0] == nullid: # found the end of the branch
1094 1105 pass
1095 1106 elif n in seenbranch:
1096 1107 self.ui.debug(_("branch already found\n"))
1097 1108 continue
1098 1109 elif n[1] and n[1] in m: # do we know the base?
1099 1110 self.ui.debug(_("found incomplete branch %s:%s\n")
1100 1111 % (short(n[0]), short(n[1])))
1101 1112 search.append(n) # schedule branch range for scanning
1102 1113 seenbranch[n] = 1
1103 1114 else:
1104 1115 if n[1] not in seen and n[1] not in fetch:
1105 1116 if n[2] in m and n[3] in m:
1106 1117 self.ui.debug(_("found new changeset %s\n") %
1107 1118 short(n[1]))
1108 1119 fetch[n[1]] = 1 # earliest unknown
1109 1120 for p in n[2:4]:
1110 1121 if p in m:
1111 1122 base[p] = 1 # latest known
1112 1123
1113 1124 for p in n[2:4]:
1114 1125 if p not in req and p not in m:
1115 1126 r.append(p)
1116 1127 req[p] = 1
1117 1128 seen[n[0]] = 1
1118 1129
1119 1130 if r:
1120 1131 reqcnt += 1
1121 1132 self.ui.debug(_("request %d: %s\n") %
1122 1133 (reqcnt, " ".join(map(short, r))))
1123 1134 for p in range(0, len(r), 10):
1124 1135 for b in remote.branches(r[p:p+10]):
1125 1136 self.ui.debug(_("received %s:%s\n") %
1126 1137 (short(b[0]), short(b[1])))
1127 1138 unknown.append(b)
1128 1139
1129 1140 # do binary search on the branches we found
1130 1141 while search:
1131 1142 n = search.pop(0)
1132 1143 reqcnt += 1
1133 1144 l = remote.between([(n[0], n[1])])[0]
1134 1145 l.append(n[1])
1135 1146 p = n[0]
1136 1147 f = 1
1137 1148 for i in l:
1138 1149 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1139 1150 if i in m:
1140 1151 if f <= 2:
1141 1152 self.ui.debug(_("found new branch changeset %s\n") %
1142 1153 short(p))
1143 1154 fetch[p] = 1
1144 1155 base[i] = 1
1145 1156 else:
1146 1157 self.ui.debug(_("narrowed branch search to %s:%s\n")
1147 1158 % (short(p), short(i)))
1148 1159 search.append((p, i))
1149 1160 break
1150 1161 p, f = i, f * 2
1151 1162
1152 1163 # sanity check our fetch list
1153 1164 for f in fetch.keys():
1154 1165 if f in m:
1155 1166 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1156 1167
1157 1168 if base.keys() == [nullid]:
1158 1169 if force:
1159 1170 self.ui.warn(_("warning: repository is unrelated\n"))
1160 1171 else:
1161 1172 raise util.Abort(_("repository is unrelated"))
1162 1173
1163 1174 self.ui.debug(_("found new changesets starting at ") +
1164 1175 " ".join([short(f) for f in fetch]) + "\n")
1165 1176
1166 1177 self.ui.debug(_("%d total queries\n") % reqcnt)
1167 1178
1168 1179 return fetch.keys()
1169 1180
1170 1181 def findoutgoing(self, remote, base=None, heads=None, force=False):
1171 1182 """Return list of nodes that are roots of subsets not in remote
1172 1183
1173 1184 If base dict is specified, assume that these nodes and their parents
1174 1185 exist on the remote side.
1175 1186 If a list of heads is specified, return only nodes which are heads
1176 1187 or ancestors of these heads, and return a second element which
1177 1188 contains all remote heads which get new children.
1178 1189 """
1179 1190 if base == None:
1180 1191 base = {}
1181 1192 self.findincoming(remote, base, heads, force=force)
1182 1193
1183 1194 self.ui.debug(_("common changesets up to ")
1184 1195 + " ".join(map(short, base.keys())) + "\n")
1185 1196
1186 1197 remain = dict.fromkeys(self.changelog.nodemap)
1187 1198
1188 1199 # prune everything remote has from the tree
1189 1200 del remain[nullid]
1190 1201 remove = base.keys()
1191 1202 while remove:
1192 1203 n = remove.pop(0)
1193 1204 if n in remain:
1194 1205 del remain[n]
1195 1206 for p in self.changelog.parents(n):
1196 1207 remove.append(p)
1197 1208
1198 1209 # find every node whose parents have been pruned
1199 1210 subset = []
1200 1211 # find every remote head that will get new children
1201 1212 updated_heads = {}
1202 1213 for n in remain:
1203 1214 p1, p2 = self.changelog.parents(n)
1204 1215 if p1 not in remain and p2 not in remain:
1205 1216 subset.append(n)
1206 1217 if heads:
1207 1218 if p1 in heads:
1208 1219 updated_heads[p1] = True
1209 1220 if p2 in heads:
1210 1221 updated_heads[p2] = True
1211 1222
1212 1223 # this is the set of all roots we have to push
1213 1224 if heads:
1214 1225 return subset, updated_heads.keys()
1215 1226 else:
1216 1227 return subset
1217 1228
1218 1229 def pull(self, remote, heads=None, force=False, lock=None):
1219 1230 mylock = False
1220 1231 if not lock:
1221 1232 lock = self.lock()
1222 1233 mylock = True
1223 1234
1224 1235 try:
1225 1236 fetch = self.findincoming(remote, force=force)
1226 1237 if fetch == [nullid]:
1227 1238 self.ui.status(_("requesting all changes\n"))
1228 1239
1229 1240 if not fetch:
1230 1241 self.ui.status(_("no changes found\n"))
1231 1242 return 0
1232 1243
1233 1244 if heads is None:
1234 1245 cg = remote.changegroup(fetch, 'pull')
1235 1246 else:
1236 1247 cg = remote.changegroupsubset(fetch, heads, 'pull')
1237 1248 return self.addchangegroup(cg, 'pull', remote.url())
1238 1249 finally:
1239 1250 if mylock:
1240 1251 lock.release()
1241 1252
1242 1253 def push(self, remote, force=False, revs=None):
1243 1254 # there are two ways to push to remote repo:
1244 1255 #
1245 1256 # addchangegroup assumes local user can lock remote
1246 1257 # repo (local filesystem, old ssh servers).
1247 1258 #
1248 1259 # unbundle assumes local user cannot lock remote repo (new ssh
1249 1260 # servers, http servers).
1250 1261
1251 1262 if remote.capable('unbundle'):
1252 1263 return self.push_unbundle(remote, force, revs)
1253 1264 return self.push_addchangegroup(remote, force, revs)
1254 1265
1255 1266 def prepush(self, remote, force, revs):
1256 1267 base = {}
1257 1268 remote_heads = remote.heads()
1258 1269 inc = self.findincoming(remote, base, remote_heads, force=force)
1259 1270 if not force and inc:
1260 1271 self.ui.warn(_("abort: unsynced remote changes!\n"))
1261 1272 self.ui.status(_("(did you forget to sync?"
1262 1273 " use push -f to force)\n"))
1263 1274 return None, 1
1264 1275
1265 1276 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1266 1277 if revs is not None:
1267 1278 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1268 1279 else:
1269 1280 bases, heads = update, self.changelog.heads()
1270 1281
1271 1282 if not bases:
1272 1283 self.ui.status(_("no changes found\n"))
1273 1284 return None, 1
1274 1285 elif not force:
1275 1286 # FIXME we don't properly detect creation of new heads
1276 1287 # in the push -r case, assume the user knows what he's doing
1277 1288 if not revs and len(remote_heads) < len(heads) \
1278 1289 and remote_heads != [nullid]:
1279 1290 self.ui.warn(_("abort: push creates new remote branches!\n"))
1280 1291 self.ui.status(_("(did you forget to merge?"
1281 1292 " use push -f to force)\n"))
1282 1293 return None, 1
1283 1294
1284 1295 if revs is None:
1285 1296 cg = self.changegroup(update, 'push')
1286 1297 else:
1287 1298 cg = self.changegroupsubset(update, revs, 'push')
1288 1299 return cg, remote_heads
1289 1300
1290 1301 def push_addchangegroup(self, remote, force, revs):
1291 1302 lock = remote.lock()
1292 1303
1293 1304 ret = self.prepush(remote, force, revs)
1294 1305 if ret[0] is not None:
1295 1306 cg, remote_heads = ret
1296 1307 return remote.addchangegroup(cg, 'push', self.url())
1297 1308 return ret[1]
1298 1309
1299 1310 def push_unbundle(self, remote, force, revs):
1300 1311 # local repo finds heads on server, finds out what revs it
1301 1312 # must push. once revs transferred, if server finds it has
1302 1313 # different heads (someone else won commit/push race), server
1303 1314 # aborts.
1304 1315
1305 1316 ret = self.prepush(remote, force, revs)
1306 1317 if ret[0] is not None:
1307 1318 cg, remote_heads = ret
1308 1319 if force: remote_heads = ['force']
1309 1320 return remote.unbundle(cg, remote_heads, 'push')
1310 1321 return ret[1]
1311 1322
1312 1323 def changegroupsubset(self, bases, heads, source):
1313 1324 """This function generates a changegroup consisting of all the nodes
1314 1325 that are descendents of any of the bases, and ancestors of any of
1315 1326 the heads.
1316 1327
1317 1328 It is fairly complex as determining which filenodes and which
1318 1329 manifest nodes need to be included for the changeset to be complete
1319 1330 is non-trivial.
1320 1331
1321 1332 Another wrinkle is doing the reverse, figuring out which changeset in
1322 1333 the changegroup a particular filenode or manifestnode belongs to."""
1323 1334
1324 1335 self.hook('preoutgoing', throw=True, source=source)
1325 1336
1326 1337 # Set up some initial variables
1327 1338 # Make it easy to refer to self.changelog
1328 1339 cl = self.changelog
1329 1340 # msng is short for missing - compute the list of changesets in this
1330 1341 # changegroup.
1331 1342 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1332 1343 # Some bases may turn out to be superfluous, and some heads may be
1333 1344 # too. nodesbetween will return the minimal set of bases and heads
1334 1345 # necessary to re-create the changegroup.
1335 1346
1336 1347 # Known heads are the list of heads that it is assumed the recipient
1337 1348 # of this changegroup will know about.
1338 1349 knownheads = {}
1339 1350 # We assume that all parents of bases are known heads.
1340 1351 for n in bases:
1341 1352 for p in cl.parents(n):
1342 1353 if p != nullid:
1343 1354 knownheads[p] = 1
1344 1355 knownheads = knownheads.keys()
1345 1356 if knownheads:
1346 1357 # Now that we know what heads are known, we can compute which
1347 1358 # changesets are known. The recipient must know about all
1348 1359 # changesets required to reach the known heads from the null
1349 1360 # changeset.
1350 1361 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1351 1362 junk = None
1352 1363 # Transform the list into an ersatz set.
1353 1364 has_cl_set = dict.fromkeys(has_cl_set)
1354 1365 else:
1355 1366 # If there were no known heads, the recipient cannot be assumed to
1356 1367 # know about any changesets.
1357 1368 has_cl_set = {}
1358 1369
1359 1370 # Make it easy to refer to self.manifest
1360 1371 mnfst = self.manifest
1361 1372 # We don't know which manifests are missing yet
1362 1373 msng_mnfst_set = {}
1363 1374 # Nor do we know which filenodes are missing.
1364 1375 msng_filenode_set = {}
1365 1376
1366 1377 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1367 1378 junk = None
1368 1379
1369 1380 # A changeset always belongs to itself, so the changenode lookup
1370 1381 # function for a changenode is identity.
1371 1382 def identity(x):
1372 1383 return x
1373 1384
1374 1385 # A function generating function. Sets up an environment for the
1375 1386 # inner function.
1376 1387 def cmp_by_rev_func(revlog):
1377 1388 # Compare two nodes by their revision number in the environment's
1378 1389 # revision history. Since the revision number both represents the
1379 1390 # most efficient order to read the nodes in, and represents a
1380 1391 # topological sorting of the nodes, this function is often useful.
1381 1392 def cmp_by_rev(a, b):
1382 1393 return cmp(revlog.rev(a), revlog.rev(b))
1383 1394 return cmp_by_rev
1384 1395
1385 1396 # If we determine that a particular file or manifest node must be a
1386 1397 # node that the recipient of the changegroup will already have, we can
1387 1398 # also assume the recipient will have all the parents. This function
1388 1399 # prunes them from the set of missing nodes.
1389 1400 def prune_parents(revlog, hasset, msngset):
1390 1401 haslst = hasset.keys()
1391 1402 haslst.sort(cmp_by_rev_func(revlog))
1392 1403 for node in haslst:
1393 1404 parentlst = [p for p in revlog.parents(node) if p != nullid]
1394 1405 while parentlst:
1395 1406 n = parentlst.pop()
1396 1407 if n not in hasset:
1397 1408 hasset[n] = 1
1398 1409 p = [p for p in revlog.parents(n) if p != nullid]
1399 1410 parentlst.extend(p)
1400 1411 for n in hasset:
1401 1412 msngset.pop(n, None)
1402 1413
1403 1414 # This is a function generating function used to set up an environment
1404 1415 # for the inner function to execute in.
1405 1416 def manifest_and_file_collector(changedfileset):
1406 1417 # This is an information gathering function that gathers
1407 1418 # information from each changeset node that goes out as part of
1408 1419 # the changegroup. The information gathered is a list of which
1409 1420 # manifest nodes are potentially required (the recipient may
1410 1421 # already have them) and total list of all files which were
1411 1422 # changed in any changeset in the changegroup.
1412 1423 #
1413 1424 # We also remember the first changenode we saw any manifest
1414 1425 # referenced by so we can later determine which changenode 'owns'
1415 1426 # the manifest.
1416 1427 def collect_manifests_and_files(clnode):
1417 1428 c = cl.read(clnode)
1418 1429 for f in c[3]:
1419 1430 # This is to make sure we only have one instance of each
1420 1431 # filename string for each filename.
1421 1432 changedfileset.setdefault(f, f)
1422 1433 msng_mnfst_set.setdefault(c[0], clnode)
1423 1434 return collect_manifests_and_files
1424 1435
1425 1436 # Figure out which manifest nodes (of the ones we think might be part
1426 1437 # of the changegroup) the recipient must know about and remove them
1427 1438 # from the changegroup.
1428 1439 def prune_manifests():
1429 1440 has_mnfst_set = {}
1430 1441 for n in msng_mnfst_set:
1431 1442 # If a 'missing' manifest thinks it belongs to a changenode
1432 1443 # the recipient is assumed to have, obviously the recipient
1433 1444 # must have that manifest.
1434 1445 linknode = cl.node(mnfst.linkrev(n))
1435 1446 if linknode in has_cl_set:
1436 1447 has_mnfst_set[n] = 1
1437 1448 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1438 1449
1439 1450 # Use the information collected in collect_manifests_and_files to say
1440 1451 # which changenode any manifestnode belongs to.
1441 1452 def lookup_manifest_link(mnfstnode):
1442 1453 return msng_mnfst_set[mnfstnode]
1443 1454
1444 1455 # A function generating function that sets up the initial environment
1445 1456 # the inner function.
1446 1457 def filenode_collector(changedfiles):
1447 1458 next_rev = [0]
1448 1459 # This gathers information from each manifestnode included in the
1449 1460 # changegroup about which filenodes the manifest node references
1450 1461 # so we can include those in the changegroup too.
1451 1462 #
1452 1463 # It also remembers which changenode each filenode belongs to. It
1453 1464 # does this by assuming the a filenode belongs to the changenode
1454 1465 # the first manifest that references it belongs to.
1455 1466 def collect_msng_filenodes(mnfstnode):
1456 1467 r = mnfst.rev(mnfstnode)
1457 1468 if r == next_rev[0]:
1458 1469 # If the last rev we looked at was the one just previous,
1459 1470 # we only need to see a diff.
1460 1471 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1461 1472 # For each line in the delta
1462 1473 for dline in delta.splitlines():
1463 1474 # get the filename and filenode for that line
1464 1475 f, fnode = dline.split('\0')
1465 1476 fnode = bin(fnode[:40])
1466 1477 f = changedfiles.get(f, None)
1467 1478 # And if the file is in the list of files we care
1468 1479 # about.
1469 1480 if f is not None:
1470 1481 # Get the changenode this manifest belongs to
1471 1482 clnode = msng_mnfst_set[mnfstnode]
1472 1483 # Create the set of filenodes for the file if
1473 1484 # there isn't one already.
1474 1485 ndset = msng_filenode_set.setdefault(f, {})
1475 1486 # And set the filenode's changelog node to the
1476 1487 # manifest's if it hasn't been set already.
1477 1488 ndset.setdefault(fnode, clnode)
1478 1489 else:
1479 1490 # Otherwise we need a full manifest.
1480 1491 m = mnfst.read(mnfstnode)
1481 1492 # For every file in we care about.
1482 1493 for f in changedfiles:
1483 1494 fnode = m.get(f, None)
1484 1495 # If it's in the manifest
1485 1496 if fnode is not None:
1486 1497 # See comments above.
1487 1498 clnode = msng_mnfst_set[mnfstnode]
1488 1499 ndset = msng_filenode_set.setdefault(f, {})
1489 1500 ndset.setdefault(fnode, clnode)
1490 1501 # Remember the revision we hope to see next.
1491 1502 next_rev[0] = r + 1
1492 1503 return collect_msng_filenodes
1493 1504
1494 1505 # We have a list of filenodes we think we need for a file, lets remove
1495 1506 # all those we now the recipient must have.
1496 1507 def prune_filenodes(f, filerevlog):
1497 1508 msngset = msng_filenode_set[f]
1498 1509 hasset = {}
1499 1510 # If a 'missing' filenode thinks it belongs to a changenode we
1500 1511 # assume the recipient must have, then the recipient must have
1501 1512 # that filenode.
1502 1513 for n in msngset:
1503 1514 clnode = cl.node(filerevlog.linkrev(n))
1504 1515 if clnode in has_cl_set:
1505 1516 hasset[n] = 1
1506 1517 prune_parents(filerevlog, hasset, msngset)
1507 1518
1508 1519 # A function generator function that sets up the a context for the
1509 1520 # inner function.
1510 1521 def lookup_filenode_link_func(fname):
1511 1522 msngset = msng_filenode_set[fname]
1512 1523 # Lookup the changenode the filenode belongs to.
1513 1524 def lookup_filenode_link(fnode):
1514 1525 return msngset[fnode]
1515 1526 return lookup_filenode_link
1516 1527
1517 1528 # Now that we have all theses utility functions to help out and
1518 1529 # logically divide up the task, generate the group.
1519 1530 def gengroup():
1520 1531 # The set of changed files starts empty.
1521 1532 changedfiles = {}
1522 1533 # Create a changenode group generator that will call our functions
1523 1534 # back to lookup the owning changenode and collect information.
1524 1535 group = cl.group(msng_cl_lst, identity,
1525 1536 manifest_and_file_collector(changedfiles))
1526 1537 for chnk in group:
1527 1538 yield chnk
1528 1539
1529 1540 # The list of manifests has been collected by the generator
1530 1541 # calling our functions back.
1531 1542 prune_manifests()
1532 1543 msng_mnfst_lst = msng_mnfst_set.keys()
1533 1544 # Sort the manifestnodes by revision number.
1534 1545 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1535 1546 # Create a generator for the manifestnodes that calls our lookup
1536 1547 # and data collection functions back.
1537 1548 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1538 1549 filenode_collector(changedfiles))
1539 1550 for chnk in group:
1540 1551 yield chnk
1541 1552
1542 1553 # These are no longer needed, dereference and toss the memory for
1543 1554 # them.
1544 1555 msng_mnfst_lst = None
1545 1556 msng_mnfst_set.clear()
1546 1557
1547 1558 changedfiles = changedfiles.keys()
1548 1559 changedfiles.sort()
1549 1560 # Go through all our files in order sorted by name.
1550 1561 for fname in changedfiles:
1551 1562 filerevlog = self.file(fname)
1552 1563 # Toss out the filenodes that the recipient isn't really
1553 1564 # missing.
1554 1565 if msng_filenode_set.has_key(fname):
1555 1566 prune_filenodes(fname, filerevlog)
1556 1567 msng_filenode_lst = msng_filenode_set[fname].keys()
1557 1568 else:
1558 1569 msng_filenode_lst = []
1559 1570 # If any filenodes are left, generate the group for them,
1560 1571 # otherwise don't bother.
1561 1572 if len(msng_filenode_lst) > 0:
1562 1573 yield changegroup.genchunk(fname)
1563 1574 # Sort the filenodes by their revision #
1564 1575 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1565 1576 # Create a group generator and only pass in a changenode
1566 1577 # lookup function as we need to collect no information
1567 1578 # from filenodes.
1568 1579 group = filerevlog.group(msng_filenode_lst,
1569 1580 lookup_filenode_link_func(fname))
1570 1581 for chnk in group:
1571 1582 yield chnk
1572 1583 if msng_filenode_set.has_key(fname):
1573 1584 # Don't need this anymore, toss it to free memory.
1574 1585 del msng_filenode_set[fname]
1575 1586 # Signal that no more groups are left.
1576 1587 yield changegroup.closechunk()
1577 1588
1578 1589 if msng_cl_lst:
1579 1590 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1580 1591
1581 1592 return util.chunkbuffer(gengroup())
1582 1593
1583 1594 def changegroup(self, basenodes, source):
1584 1595 """Generate a changegroup of all nodes that we have that a recipient
1585 1596 doesn't.
1586 1597
1587 1598 This is much easier than the previous function as we can assume that
1588 1599 the recipient has any changenode we aren't sending them."""
1589 1600
1590 1601 self.hook('preoutgoing', throw=True, source=source)
1591 1602
1592 1603 cl = self.changelog
1593 1604 nodes = cl.nodesbetween(basenodes, None)[0]
1594 1605 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1595 1606
1596 1607 def identity(x):
1597 1608 return x
1598 1609
1599 1610 def gennodelst(revlog):
1600 1611 for r in xrange(0, revlog.count()):
1601 1612 n = revlog.node(r)
1602 1613 if revlog.linkrev(n) in revset:
1603 1614 yield n
1604 1615
1605 1616 def changed_file_collector(changedfileset):
1606 1617 def collect_changed_files(clnode):
1607 1618 c = cl.read(clnode)
1608 1619 for fname in c[3]:
1609 1620 changedfileset[fname] = 1
1610 1621 return collect_changed_files
1611 1622
1612 1623 def lookuprevlink_func(revlog):
1613 1624 def lookuprevlink(n):
1614 1625 return cl.node(revlog.linkrev(n))
1615 1626 return lookuprevlink
1616 1627
1617 1628 def gengroup():
1618 1629 # construct a list of all changed files
1619 1630 changedfiles = {}
1620 1631
1621 1632 for chnk in cl.group(nodes, identity,
1622 1633 changed_file_collector(changedfiles)):
1623 1634 yield chnk
1624 1635 changedfiles = changedfiles.keys()
1625 1636 changedfiles.sort()
1626 1637
1627 1638 mnfst = self.manifest
1628 1639 nodeiter = gennodelst(mnfst)
1629 1640 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1630 1641 yield chnk
1631 1642
1632 1643 for fname in changedfiles:
1633 1644 filerevlog = self.file(fname)
1634 1645 nodeiter = gennodelst(filerevlog)
1635 1646 nodeiter = list(nodeiter)
1636 1647 if nodeiter:
1637 1648 yield changegroup.genchunk(fname)
1638 1649 lookup = lookuprevlink_func(filerevlog)
1639 1650 for chnk in filerevlog.group(nodeiter, lookup):
1640 1651 yield chnk
1641 1652
1642 1653 yield changegroup.closechunk()
1643 1654
1644 1655 if nodes:
1645 1656 self.hook('outgoing', node=hex(nodes[0]), source=source)
1646 1657
1647 1658 return util.chunkbuffer(gengroup())
1648 1659
1649 1660 def addchangegroup(self, source, srctype, url):
1650 1661 """add changegroup to repo.
1651 1662 returns number of heads modified or added + 1."""
1652 1663
1653 1664 def csmap(x):
1654 1665 self.ui.debug(_("add changeset %s\n") % short(x))
1655 1666 return cl.count()
1656 1667
1657 1668 def revmap(x):
1658 1669 return cl.rev(x)
1659 1670
1660 1671 if not source:
1661 1672 return 0
1662 1673
1663 1674 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1664 1675
1665 1676 changesets = files = revisions = 0
1666 1677
1667 1678 tr = self.transaction()
1668 1679
1669 1680 # write changelog data to temp files so concurrent readers will not see
1670 1681 # inconsistent view
1671 1682 cl = None
1672 1683 try:
1673 1684 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1674 1685
1675 1686 oldheads = len(cl.heads())
1676 1687
1677 1688 # pull off the changeset group
1678 1689 self.ui.status(_("adding changesets\n"))
1679 1690 cor = cl.count() - 1
1680 1691 chunkiter = changegroup.chunkiter(source)
1681 1692 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1682 1693 raise util.Abort(_("received changelog group is empty"))
1683 1694 cnr = cl.count() - 1
1684 1695 changesets = cnr - cor
1685 1696
1686 1697 # pull off the manifest group
1687 1698 self.ui.status(_("adding manifests\n"))
1688 1699 chunkiter = changegroup.chunkiter(source)
1689 1700 # no need to check for empty manifest group here:
1690 1701 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1691 1702 # no new manifest will be created and the manifest group will
1692 1703 # be empty during the pull
1693 1704 self.manifest.addgroup(chunkiter, revmap, tr)
1694 1705
1695 1706 # process the files
1696 1707 self.ui.status(_("adding file changes\n"))
1697 1708 while 1:
1698 1709 f = changegroup.getchunk(source)
1699 1710 if not f:
1700 1711 break
1701 1712 self.ui.debug(_("adding %s revisions\n") % f)
1702 1713 fl = self.file(f)
1703 1714 o = fl.count()
1704 1715 chunkiter = changegroup.chunkiter(source)
1705 1716 if fl.addgroup(chunkiter, revmap, tr) is None:
1706 1717 raise util.Abort(_("received file revlog group is empty"))
1707 1718 revisions += fl.count() - o
1708 1719 files += 1
1709 1720
1710 1721 cl.writedata()
1711 1722 finally:
1712 1723 if cl:
1713 1724 cl.cleanup()
1714 1725
1715 1726 # make changelog see real files again
1716 1727 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1717 1728 self.changelog.checkinlinesize(tr)
1718 1729
1719 1730 newheads = len(self.changelog.heads())
1720 1731 heads = ""
1721 1732 if oldheads and newheads != oldheads:
1722 1733 heads = _(" (%+d heads)") % (newheads - oldheads)
1723 1734
1724 1735 self.ui.status(_("added %d changesets"
1725 1736 " with %d changes to %d files%s\n")
1726 1737 % (changesets, revisions, files, heads))
1727 1738
1728 1739 if changesets > 0:
1729 1740 self.hook('pretxnchangegroup', throw=True,
1730 1741 node=hex(self.changelog.node(cor+1)), source=srctype,
1731 1742 url=url)
1732 1743
1733 1744 tr.close()
1734 1745
1735 1746 if changesets > 0:
1736 1747 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1737 1748 source=srctype, url=url)
1738 1749
1739 1750 for i in range(cor + 1, cnr + 1):
1740 1751 self.hook("incoming", node=hex(self.changelog.node(i)),
1741 1752 source=srctype, url=url)
1742 1753
1743 1754 return newheads - oldheads + 1
1744 1755
1745 1756
1746 1757 def stream_in(self, remote):
1747 1758 fp = remote.stream_out()
1748 1759 resp = int(fp.readline())
1749 1760 if resp != 0:
1750 1761 raise util.Abort(_('operation forbidden by server'))
1751 1762 self.ui.status(_('streaming all changes\n'))
1752 1763 total_files, total_bytes = map(int, fp.readline().split(' ', 1))
1753 1764 self.ui.status(_('%d files to transfer, %s of data\n') %
1754 1765 (total_files, util.bytecount(total_bytes)))
1755 1766 start = time.time()
1756 1767 for i in xrange(total_files):
1757 1768 name, size = fp.readline().split('\0', 1)
1758 1769 size = int(size)
1759 1770 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1760 1771 ofp = self.opener(name, 'w')
1761 1772 for chunk in util.filechunkiter(fp, limit=size):
1762 1773 ofp.write(chunk)
1763 1774 ofp.close()
1764 1775 elapsed = time.time() - start
1765 1776 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1766 1777 (util.bytecount(total_bytes), elapsed,
1767 1778 util.bytecount(total_bytes / elapsed)))
1768 1779 self.reload()
1769 1780 return len(self.heads()) + 1
1770 1781
1771 1782 def clone(self, remote, heads=[], stream=False):
1772 1783 '''clone remote repository.
1773 1784
1774 1785 keyword arguments:
1775 1786 heads: list of revs to clone (forces use of pull)
1776 1787 stream: use streaming clone if possible'''
1777 1788
1778 1789 # now, all clients that can request uncompressed clones can
1779 1790 # read repo formats supported by all servers that can serve
1780 1791 # them.
1781 1792
1782 1793 # if revlog format changes, client will have to check version
1783 1794 # and format flags on "stream" capability, and use
1784 1795 # uncompressed only if compatible.
1785 1796
1786 1797 if stream and not heads and remote.capable('stream'):
1787 1798 return self.stream_in(remote)
1788 1799 return self.pull(remote, heads)
1789 1800
1790 1801 # used to avoid circular references so destructors work
1791 1802 def aftertrans(base):
1792 1803 p = base
1793 1804 def a():
1794 1805 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1795 1806 util.rename(os.path.join(p, "journal.dirstate"),
1796 1807 os.path.join(p, "undo.dirstate"))
1797 1808 return a
1798 1809
1799 1810 def instance(ui, path, create):
1800 1811 return localrepository(ui, util.drop_scheme('file', path), create)
1801 1812
1802 1813 def islocal(path):
1803 1814 return True
General Comments 0
You need to be logged in to leave comments. Login now