##// END OF EJS Templates
Create the parent directory when checking out symlinks.
Giorgos Keramidas -
r4137:26596a6b default
parent child Browse files
Show More
@@ -1,1922 +1,1926 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import _
10 10 import repo, appendfile, changegroup
11 11 import changelog, dirstate, filelog, manifest, context
12 12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
13 13 import os, revlog, time, util
14 14
15 15 class localrepository(repo.repository):
16 16 capabilities = ('lookup', 'changegroupsubset')
17 17 supported = ('revlogv1', 'store')
18 18
19 19 def __del__(self):
20 20 self.transhandle = None
21 21 def __init__(self, parentui, path=None, create=0):
22 22 repo.repository.__init__(self)
23 23 if not path:
24 24 p = os.getcwd()
25 25 while not os.path.isdir(os.path.join(p, ".hg")):
26 26 oldp = p
27 27 p = os.path.dirname(p)
28 28 if p == oldp:
29 29 raise repo.RepoError(_("There is no Mercurial repository"
30 30 " here (.hg not found)"))
31 31 path = p
32 32
33 33 self.path = os.path.join(path, ".hg")
34 34 self.root = os.path.realpath(path)
35 35 self.origroot = path
36 36 self.opener = util.opener(self.path)
37 37 self.wopener = util.opener(self.root)
38 38
39 39 if not os.path.isdir(self.path):
40 40 if create:
41 41 if not os.path.exists(path):
42 42 os.mkdir(path)
43 43 os.mkdir(self.path)
44 44 os.mkdir(os.path.join(self.path, "store"))
45 45 requirements = ("revlogv1", "store")
46 46 reqfile = self.opener("requires", "w")
47 47 for r in requirements:
48 48 reqfile.write("%s\n" % r)
49 49 reqfile.close()
50 50 # create an invalid changelog
51 51 self.opener("00changelog.i", "a").write(
52 52 '\0\0\0\2' # represents revlogv2
53 53 ' dummy changelog to prevent using the old repo layout'
54 54 )
55 55 else:
56 56 raise repo.RepoError(_("repository %s not found") % path)
57 57 elif create:
58 58 raise repo.RepoError(_("repository %s already exists") % path)
59 59 else:
60 60 # find requirements
61 61 try:
62 62 requirements = self.opener("requires").read().splitlines()
63 63 except IOError, inst:
64 64 if inst.errno != errno.ENOENT:
65 65 raise
66 66 requirements = []
67 67 # check them
68 68 for r in requirements:
69 69 if r not in self.supported:
70 70 raise repo.RepoError(_("requirement '%s' not supported") % r)
71 71
72 72 # setup store
73 73 if "store" in requirements:
74 74 self.encodefn = util.encodefilename
75 75 self.decodefn = util.decodefilename
76 76 self.spath = os.path.join(self.path, "store")
77 77 else:
78 78 self.encodefn = lambda x: x
79 79 self.decodefn = lambda x: x
80 80 self.spath = self.path
81 81 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
82 82
83 83 self.ui = ui.ui(parentui=parentui)
84 84 try:
85 85 self.ui.readconfig(self.join("hgrc"), self.root)
86 86 except IOError:
87 87 pass
88 88
89 89 v = self.ui.configrevlog()
90 90 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
91 91 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
92 92 fl = v.get('flags', None)
93 93 flags = 0
94 94 if fl != None:
95 95 for x in fl.split():
96 96 flags |= revlog.flagstr(x)
97 97 elif self.revlogv1:
98 98 flags = revlog.REVLOG_DEFAULT_FLAGS
99 99
100 100 v = self.revlogversion | flags
101 101 self.manifest = manifest.manifest(self.sopener, v)
102 102 self.changelog = changelog.changelog(self.sopener, v)
103 103
104 104 fallback = self.ui.config('ui', 'fallbackencoding')
105 105 if fallback:
106 106 util._fallbackencoding = fallback
107 107
108 108 # the changelog might not have the inline index flag
109 109 # on. If the format of the changelog is the same as found in
110 110 # .hgrc, apply any flags found in the .hgrc as well.
111 111 # Otherwise, just version from the changelog
112 112 v = self.changelog.version
113 113 if v == self.revlogversion:
114 114 v |= flags
115 115 self.revlogversion = v
116 116
117 117 self.tagscache = None
118 118 self.branchcache = None
119 119 self.nodetagscache = None
120 120 self.filterpats = {}
121 121 self.transhandle = None
122 122
123 123 self._link = lambda x: False
124 124 if util.checklink(self.root):
125 125 r = self.root # avoid circular reference in lambda
126 126 self._link = lambda x: util.is_link(os.path.join(r, x))
127 127
128 128 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
129 129
130 130 def url(self):
131 131 return 'file:' + self.root
132 132
133 133 def hook(self, name, throw=False, **args):
134 134 def callhook(hname, funcname):
135 135 '''call python hook. hook is callable object, looked up as
136 136 name in python module. if callable returns "true", hook
137 137 fails, else passes. if hook raises exception, treated as
138 138 hook failure. exception propagates if throw is "true".
139 139
140 140 reason for "true" meaning "hook failed" is so that
141 141 unmodified commands (e.g. mercurial.commands.update) can
142 142 be run as hooks without wrappers to convert return values.'''
143 143
144 144 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
145 145 obj = funcname
146 146 if not callable(obj):
147 147 d = funcname.rfind('.')
148 148 if d == -1:
149 149 raise util.Abort(_('%s hook is invalid ("%s" not in '
150 150 'a module)') % (hname, funcname))
151 151 modname = funcname[:d]
152 152 try:
153 153 obj = __import__(modname)
154 154 except ImportError:
155 155 try:
156 156 # extensions are loaded with hgext_ prefix
157 157 obj = __import__("hgext_%s" % modname)
158 158 except ImportError:
159 159 raise util.Abort(_('%s hook is invalid '
160 160 '(import of "%s" failed)') %
161 161 (hname, modname))
162 162 try:
163 163 for p in funcname.split('.')[1:]:
164 164 obj = getattr(obj, p)
165 165 except AttributeError, err:
166 166 raise util.Abort(_('%s hook is invalid '
167 167 '("%s" is not defined)') %
168 168 (hname, funcname))
169 169 if not callable(obj):
170 170 raise util.Abort(_('%s hook is invalid '
171 171 '("%s" is not callable)') %
172 172 (hname, funcname))
173 173 try:
174 174 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
175 175 except (KeyboardInterrupt, util.SignalInterrupt):
176 176 raise
177 177 except Exception, exc:
178 178 if isinstance(exc, util.Abort):
179 179 self.ui.warn(_('error: %s hook failed: %s\n') %
180 180 (hname, exc.args[0]))
181 181 else:
182 182 self.ui.warn(_('error: %s hook raised an exception: '
183 183 '%s\n') % (hname, exc))
184 184 if throw:
185 185 raise
186 186 self.ui.print_exc()
187 187 return True
188 188 if r:
189 189 if throw:
190 190 raise util.Abort(_('%s hook failed') % hname)
191 191 self.ui.warn(_('warning: %s hook failed\n') % hname)
192 192 return r
193 193
194 194 def runhook(name, cmd):
195 195 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
196 196 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
197 197 r = util.system(cmd, environ=env, cwd=self.root)
198 198 if r:
199 199 desc, r = util.explain_exit(r)
200 200 if throw:
201 201 raise util.Abort(_('%s hook %s') % (name, desc))
202 202 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
203 203 return r
204 204
205 205 r = False
206 206 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
207 207 if hname.split(".", 1)[0] == name and cmd]
208 208 hooks.sort()
209 209 for hname, cmd in hooks:
210 210 if callable(cmd):
211 211 r = callhook(hname, cmd) or r
212 212 elif cmd.startswith('python:'):
213 213 r = callhook(hname, cmd[7:].strip()) or r
214 214 else:
215 215 r = runhook(hname, cmd) or r
216 216 return r
217 217
218 218 tag_disallowed = ':\r\n'
219 219
220 220 def _tag(self, name, node, message, local, user, date, parent=None):
221 221 use_dirstate = parent is None
222 222
223 223 for c in self.tag_disallowed:
224 224 if c in name:
225 225 raise util.Abort(_('%r cannot be used in a tag name') % c)
226 226
227 227 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
228 228
229 229 if local:
230 230 # local tags are stored in the current charset
231 231 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
232 232 self.hook('tag', node=hex(node), tag=name, local=local)
233 233 return
234 234
235 235 # committed tags are stored in UTF-8
236 236 line = '%s %s\n' % (hex(node), util.fromlocal(name))
237 237 if use_dirstate:
238 238 self.wfile('.hgtags', 'ab').write(line)
239 239 else:
240 240 ntags = self.filectx('.hgtags', parent).data()
241 241 self.wfile('.hgtags', 'ab').write(ntags + line)
242 242 if use_dirstate and self.dirstate.state('.hgtags') == '?':
243 243 self.add(['.hgtags'])
244 244
245 245 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent)
246 246
247 247 self.hook('tag', node=hex(node), tag=name, local=local)
248 248
249 249 return tagnode
250 250
251 251 def tag(self, name, node, message, local, user, date):
252 252 '''tag a revision with a symbolic name.
253 253
254 254 if local is True, the tag is stored in a per-repository file.
255 255 otherwise, it is stored in the .hgtags file, and a new
256 256 changeset is committed with the change.
257 257
258 258 keyword arguments:
259 259
260 260 local: whether to store tag in non-version-controlled file
261 261 (default False)
262 262
263 263 message: commit message to use if committing
264 264
265 265 user: name of user to use if committing
266 266
267 267 date: date tuple to use if committing'''
268 268
269 269 for x in self.status()[:5]:
270 270 if '.hgtags' in x:
271 271 raise util.Abort(_('working copy of .hgtags is changed '
272 272 '(please commit .hgtags manually)'))
273 273
274 274
275 275 self._tag(name, node, message, local, user, date)
276 276
277 277 def tags(self):
278 278 '''return a mapping of tag to node'''
279 279 if not self.tagscache:
280 280 self.tagscache = {}
281 281
282 282 def parsetag(line, context):
283 283 if not line:
284 284 return
285 285 s = l.split(" ", 1)
286 286 if len(s) != 2:
287 287 self.ui.warn(_("%s: cannot parse entry\n") % context)
288 288 return
289 289 node, key = s
290 290 key = util.tolocal(key.strip()) # stored in UTF-8
291 291 try:
292 292 bin_n = bin(node)
293 293 except TypeError:
294 294 self.ui.warn(_("%s: node '%s' is not well formed\n") %
295 295 (context, node))
296 296 return
297 297 if bin_n not in self.changelog.nodemap:
298 298 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
299 299 (context, key))
300 300 return
301 301 self.tagscache[key] = bin_n
302 302
303 303 # read the tags file from each head, ending with the tip,
304 304 # and add each tag found to the map, with "newer" ones
305 305 # taking precedence
306 306 f = None
307 307 for rev, node, fnode in self._hgtagsnodes():
308 308 f = (f and f.filectx(fnode) or
309 309 self.filectx('.hgtags', fileid=fnode))
310 310 count = 0
311 311 for l in f.data().splitlines():
312 312 count += 1
313 313 parsetag(l, _("%s, line %d") % (str(f), count))
314 314
315 315 try:
316 316 f = self.opener("localtags")
317 317 count = 0
318 318 for l in f:
319 319 # localtags are stored in the local character set
320 320 # while the internal tag table is stored in UTF-8
321 321 l = util.fromlocal(l)
322 322 count += 1
323 323 parsetag(l, _("localtags, line %d") % count)
324 324 except IOError:
325 325 pass
326 326
327 327 self.tagscache['tip'] = self.changelog.tip()
328 328
329 329 return self.tagscache
330 330
331 331 def _hgtagsnodes(self):
332 332 heads = self.heads()
333 333 heads.reverse()
334 334 last = {}
335 335 ret = []
336 336 for node in heads:
337 337 c = self.changectx(node)
338 338 rev = c.rev()
339 339 try:
340 340 fnode = c.filenode('.hgtags')
341 341 except revlog.LookupError:
342 342 continue
343 343 ret.append((rev, node, fnode))
344 344 if fnode in last:
345 345 ret[last[fnode]] = None
346 346 last[fnode] = len(ret) - 1
347 347 return [item for item in ret if item]
348 348
349 349 def tagslist(self):
350 350 '''return a list of tags ordered by revision'''
351 351 l = []
352 352 for t, n in self.tags().items():
353 353 try:
354 354 r = self.changelog.rev(n)
355 355 except:
356 356 r = -2 # sort to the beginning of the list if unknown
357 357 l.append((r, t, n))
358 358 l.sort()
359 359 return [(t, n) for r, t, n in l]
360 360
361 361 def nodetags(self, node):
362 362 '''return the tags associated with a node'''
363 363 if not self.nodetagscache:
364 364 self.nodetagscache = {}
365 365 for t, n in self.tags().items():
366 366 self.nodetagscache.setdefault(n, []).append(t)
367 367 return self.nodetagscache.get(node, [])
368 368
369 369 def _branchtags(self):
370 370 partial, last, lrev = self._readbranchcache()
371 371
372 372 tiprev = self.changelog.count() - 1
373 373 if lrev != tiprev:
374 374 self._updatebranchcache(partial, lrev+1, tiprev+1)
375 375 self._writebranchcache(partial, self.changelog.tip(), tiprev)
376 376
377 377 return partial
378 378
379 379 def branchtags(self):
380 380 if self.branchcache is not None:
381 381 return self.branchcache
382 382
383 383 self.branchcache = {} # avoid recursion in changectx
384 384 partial = self._branchtags()
385 385
386 386 # the branch cache is stored on disk as UTF-8, but in the local
387 387 # charset internally
388 388 for k, v in partial.items():
389 389 self.branchcache[util.tolocal(k)] = v
390 390 return self.branchcache
391 391
392 392 def _readbranchcache(self):
393 393 partial = {}
394 394 try:
395 395 f = self.opener("branches.cache")
396 396 lines = f.read().split('\n')
397 397 f.close()
398 398 last, lrev = lines.pop(0).rstrip().split(" ", 1)
399 399 last, lrev = bin(last), int(lrev)
400 400 if not (lrev < self.changelog.count() and
401 401 self.changelog.node(lrev) == last): # sanity check
402 402 # invalidate the cache
403 403 raise ValueError('Invalid branch cache: unknown tip')
404 404 for l in lines:
405 405 if not l: continue
406 406 node, label = l.rstrip().split(" ", 1)
407 407 partial[label] = bin(node)
408 408 except (KeyboardInterrupt, util.SignalInterrupt):
409 409 raise
410 410 except Exception, inst:
411 411 if self.ui.debugflag:
412 412 self.ui.warn(str(inst), '\n')
413 413 partial, last, lrev = {}, nullid, nullrev
414 414 return partial, last, lrev
415 415
416 416 def _writebranchcache(self, branches, tip, tiprev):
417 417 try:
418 418 f = self.opener("branches.cache", "w")
419 419 f.write("%s %s\n" % (hex(tip), tiprev))
420 420 for label, node in branches.iteritems():
421 421 f.write("%s %s\n" % (hex(node), label))
422 422 except IOError:
423 423 pass
424 424
425 425 def _updatebranchcache(self, partial, start, end):
426 426 for r in xrange(start, end):
427 427 c = self.changectx(r)
428 428 b = c.branch()
429 429 if b:
430 430 partial[b] = c.node()
431 431
432 432 def lookup(self, key):
433 433 if key == '.':
434 434 key = self.dirstate.parents()[0]
435 435 if key == nullid:
436 436 raise repo.RepoError(_("no revision checked out"))
437 437 elif key == 'null':
438 438 return nullid
439 439 n = self.changelog._match(key)
440 440 if n:
441 441 return n
442 442 if key in self.tags():
443 443 return self.tags()[key]
444 444 if key in self.branchtags():
445 445 return self.branchtags()[key]
446 446 n = self.changelog._partialmatch(key)
447 447 if n:
448 448 return n
449 449 raise repo.RepoError(_("unknown revision '%s'") % key)
450 450
451 451 def dev(self):
452 452 return os.lstat(self.path).st_dev
453 453
454 454 def local(self):
455 455 return True
456 456
457 457 def join(self, f):
458 458 return os.path.join(self.path, f)
459 459
460 460 def sjoin(self, f):
461 461 f = self.encodefn(f)
462 462 return os.path.join(self.spath, f)
463 463
464 464 def wjoin(self, f):
465 465 return os.path.join(self.root, f)
466 466
467 467 def file(self, f):
468 468 if f[0] == '/':
469 469 f = f[1:]
470 470 return filelog.filelog(self.sopener, f, self.revlogversion)
471 471
472 472 def changectx(self, changeid=None):
473 473 return context.changectx(self, changeid)
474 474
475 475 def workingctx(self):
476 476 return context.workingctx(self)
477 477
478 478 def parents(self, changeid=None):
479 479 '''
480 480 get list of changectxs for parents of changeid or working directory
481 481 '''
482 482 if changeid is None:
483 483 pl = self.dirstate.parents()
484 484 else:
485 485 n = self.changelog.lookup(changeid)
486 486 pl = self.changelog.parents(n)
487 487 if pl[1] == nullid:
488 488 return [self.changectx(pl[0])]
489 489 return [self.changectx(pl[0]), self.changectx(pl[1])]
490 490
491 491 def filectx(self, path, changeid=None, fileid=None):
492 492 """changeid can be a changeset revision, node, or tag.
493 493 fileid can be a file revision or node."""
494 494 return context.filectx(self, path, changeid, fileid)
495 495
496 496 def getcwd(self):
497 497 return self.dirstate.getcwd()
498 498
499 499 def wfile(self, f, mode='r'):
500 500 return self.wopener(f, mode)
501 501
502 502 def _filter(self, filter, filename, data):
503 503 if filter not in self.filterpats:
504 504 l = []
505 505 for pat, cmd in self.ui.configitems(filter):
506 506 mf = util.matcher(self.root, "", [pat], [], [])[1]
507 507 l.append((mf, cmd))
508 508 self.filterpats[filter] = l
509 509
510 510 for mf, cmd in self.filterpats[filter]:
511 511 if mf(filename):
512 512 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
513 513 data = util.filter(data, cmd)
514 514 break
515 515
516 516 return data
517 517
518 518 def wread(self, filename):
519 519 if self._link(filename):
520 520 data = os.readlink(self.wjoin(filename))
521 521 else:
522 522 data = self.wopener(filename, 'r').read()
523 523 return self._filter("encode", filename, data)
524 524
525 525 def wwrite(self, filename, data, flags):
526 526 data = self._filter("decode", filename, data)
527 527 if "l" in flags:
528 f = self.wjoin(filename)
528 529 try:
529 os.unlink(self.wjoin(filename))
530 os.unlink(f)
530 531 except OSError:
531 532 pass
532 os.symlink(data, self.wjoin(filename))
533 d = os.path.dirname(f)
534 if not os.path.exists(d):
535 os.makedirs(d)
536 os.symlink(data, f)
533 537 else:
534 538 try:
535 539 if self._link(filename):
536 540 os.unlink(self.wjoin(filename))
537 541 except OSError:
538 542 pass
539 543 self.wopener(filename, 'w').write(data)
540 544 util.set_exec(self.wjoin(filename), "x" in flags)
541 545
542 546 def wwritedata(self, filename, data):
543 547 return self._filter("decode", filename, data)
544 548
545 549 def transaction(self):
546 550 tr = self.transhandle
547 551 if tr != None and tr.running():
548 552 return tr.nest()
549 553
550 554 # save dirstate for rollback
551 555 try:
552 556 ds = self.opener("dirstate").read()
553 557 except IOError:
554 558 ds = ""
555 559 self.opener("journal.dirstate", "w").write(ds)
556 560
557 561 renames = [(self.sjoin("journal"), self.sjoin("undo")),
558 562 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
559 563 tr = transaction.transaction(self.ui.warn, self.sopener,
560 564 self.sjoin("journal"),
561 565 aftertrans(renames))
562 566 self.transhandle = tr
563 567 return tr
564 568
565 569 def recover(self):
566 570 l = self.lock()
567 571 if os.path.exists(self.sjoin("journal")):
568 572 self.ui.status(_("rolling back interrupted transaction\n"))
569 573 transaction.rollback(self.sopener, self.sjoin("journal"))
570 574 self.reload()
571 575 return True
572 576 else:
573 577 self.ui.warn(_("no interrupted transaction available\n"))
574 578 return False
575 579
576 580 def rollback(self, wlock=None):
577 581 if not wlock:
578 582 wlock = self.wlock()
579 583 l = self.lock()
580 584 if os.path.exists(self.sjoin("undo")):
581 585 self.ui.status(_("rolling back last transaction\n"))
582 586 transaction.rollback(self.sopener, self.sjoin("undo"))
583 587 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
584 588 self.reload()
585 589 self.wreload()
586 590 else:
587 591 self.ui.warn(_("no rollback information available\n"))
588 592
589 593 def wreload(self):
590 594 self.dirstate.read()
591 595
592 596 def reload(self):
593 597 self.changelog.load()
594 598 self.manifest.load()
595 599 self.tagscache = None
596 600 self.nodetagscache = None
597 601
598 602 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
599 603 desc=None):
600 604 try:
601 605 l = lock.lock(lockname, 0, releasefn, desc=desc)
602 606 except lock.LockHeld, inst:
603 607 if not wait:
604 608 raise
605 609 self.ui.warn(_("waiting for lock on %s held by %r\n") %
606 610 (desc, inst.locker))
607 611 # default to 600 seconds timeout
608 612 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
609 613 releasefn, desc=desc)
610 614 if acquirefn:
611 615 acquirefn()
612 616 return l
613 617
614 618 def lock(self, wait=1):
615 619 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
616 620 desc=_('repository %s') % self.origroot)
617 621
618 622 def wlock(self, wait=1):
619 623 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
620 624 self.wreload,
621 625 desc=_('working directory of %s') % self.origroot)
622 626
623 627 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
624 628 """
625 629 commit an individual file as part of a larger transaction
626 630 """
627 631
628 632 t = self.wread(fn)
629 633 fl = self.file(fn)
630 634 fp1 = manifest1.get(fn, nullid)
631 635 fp2 = manifest2.get(fn, nullid)
632 636
633 637 meta = {}
634 638 cp = self.dirstate.copied(fn)
635 639 if cp:
636 640 # Mark the new revision of this file as a copy of another
637 641 # file. This copy data will effectively act as a parent
638 642 # of this new revision. If this is a merge, the first
639 643 # parent will be the nullid (meaning "look up the copy data")
640 644 # and the second one will be the other parent. For example:
641 645 #
642 646 # 0 --- 1 --- 3 rev1 changes file foo
643 647 # \ / rev2 renames foo to bar and changes it
644 648 # \- 2 -/ rev3 should have bar with all changes and
645 649 # should record that bar descends from
646 650 # bar in rev2 and foo in rev1
647 651 #
648 652 # this allows this merge to succeed:
649 653 #
650 654 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
651 655 # \ / merging rev3 and rev4 should use bar@rev2
652 656 # \- 2 --- 4 as the merge base
653 657 #
654 658 meta["copy"] = cp
655 659 if not manifest2: # not a branch merge
656 660 meta["copyrev"] = hex(manifest1.get(cp, nullid))
657 661 fp2 = nullid
658 662 elif fp2 != nullid: # copied on remote side
659 663 meta["copyrev"] = hex(manifest1.get(cp, nullid))
660 664 elif fp1 != nullid: # copied on local side, reversed
661 665 meta["copyrev"] = hex(manifest2.get(cp))
662 666 fp2 = fp1
663 667 else: # directory rename
664 668 meta["copyrev"] = hex(manifest1.get(cp, nullid))
665 669 self.ui.debug(_(" %s: copy %s:%s\n") %
666 670 (fn, cp, meta["copyrev"]))
667 671 fp1 = nullid
668 672 elif fp2 != nullid:
669 673 # is one parent an ancestor of the other?
670 674 fpa = fl.ancestor(fp1, fp2)
671 675 if fpa == fp1:
672 676 fp1, fp2 = fp2, nullid
673 677 elif fpa == fp2:
674 678 fp2 = nullid
675 679
676 680 # is the file unmodified from the parent? report existing entry
677 681 if fp2 == nullid and not fl.cmp(fp1, t):
678 682 return fp1
679 683
680 684 changelist.append(fn)
681 685 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
682 686
683 687 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
684 688 if p1 is None:
685 689 p1, p2 = self.dirstate.parents()
686 690 return self.commit(files=files, text=text, user=user, date=date,
687 691 p1=p1, p2=p2, wlock=wlock, extra=extra)
688 692
689 693 def commit(self, files=None, text="", user=None, date=None,
690 694 match=util.always, force=False, lock=None, wlock=None,
691 695 force_editor=False, p1=None, p2=None, extra={}):
692 696
693 697 commit = []
694 698 remove = []
695 699 changed = []
696 700 use_dirstate = (p1 is None) # not rawcommit
697 701 extra = extra.copy()
698 702
699 703 if use_dirstate:
700 704 if files:
701 705 for f in files:
702 706 s = self.dirstate.state(f)
703 707 if s in 'nmai':
704 708 commit.append(f)
705 709 elif s == 'r':
706 710 remove.append(f)
707 711 else:
708 712 self.ui.warn(_("%s not tracked!\n") % f)
709 713 else:
710 714 changes = self.status(match=match)[:5]
711 715 modified, added, removed, deleted, unknown = changes
712 716 commit = modified + added
713 717 remove = removed
714 718 else:
715 719 commit = files
716 720
717 721 if use_dirstate:
718 722 p1, p2 = self.dirstate.parents()
719 723 update_dirstate = True
720 724 else:
721 725 p1, p2 = p1, p2 or nullid
722 726 update_dirstate = (self.dirstate.parents()[0] == p1)
723 727
724 728 c1 = self.changelog.read(p1)
725 729 c2 = self.changelog.read(p2)
726 730 m1 = self.manifest.read(c1[0]).copy()
727 731 m2 = self.manifest.read(c2[0])
728 732
729 733 if use_dirstate:
730 734 branchname = self.workingctx().branch()
731 735 try:
732 736 branchname = branchname.decode('UTF-8').encode('UTF-8')
733 737 except UnicodeDecodeError:
734 738 raise util.Abort(_('branch name not in UTF-8!'))
735 739 else:
736 740 branchname = ""
737 741
738 742 if use_dirstate:
739 743 oldname = c1[5].get("branch", "") # stored in UTF-8
740 744 if not commit and not remove and not force and p2 == nullid and \
741 745 branchname == oldname:
742 746 self.ui.status(_("nothing changed\n"))
743 747 return None
744 748
745 749 xp1 = hex(p1)
746 750 if p2 == nullid: xp2 = ''
747 751 else: xp2 = hex(p2)
748 752
749 753 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
750 754
751 755 if not wlock:
752 756 wlock = self.wlock()
753 757 if not lock:
754 758 lock = self.lock()
755 759 tr = self.transaction()
756 760
757 761 # check in files
758 762 new = {}
759 763 linkrev = self.changelog.count()
760 764 commit.sort()
761 765 is_exec = util.execfunc(self.root, m1.execf)
762 766 is_link = util.linkfunc(self.root, m1.linkf)
763 767 for f in commit:
764 768 self.ui.note(f + "\n")
765 769 try:
766 770 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
767 771 m1.set(f, is_exec(f), is_link(f))
768 772 except (OSError, IOError):
769 773 if use_dirstate:
770 774 self.ui.warn(_("trouble committing %s!\n") % f)
771 775 raise
772 776 else:
773 777 remove.append(f)
774 778
775 779 # update manifest
776 780 m1.update(new)
777 781 remove.sort()
778 782 removed = []
779 783
780 784 for f in remove:
781 785 if f in m1:
782 786 del m1[f]
783 787 removed.append(f)
784 788 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
785 789
786 790 # add changeset
787 791 new = new.keys()
788 792 new.sort()
789 793
790 794 user = user or self.ui.username()
791 795 if not text or force_editor:
792 796 edittext = []
793 797 if text:
794 798 edittext.append(text)
795 799 edittext.append("")
796 800 edittext.append("HG: user: %s" % user)
797 801 if p2 != nullid:
798 802 edittext.append("HG: branch merge")
799 803 if branchname:
800 804 edittext.append("HG: branch %s" % util.tolocal(branchname))
801 805 edittext.extend(["HG: changed %s" % f for f in changed])
802 806 edittext.extend(["HG: removed %s" % f for f in removed])
803 807 if not changed and not remove:
804 808 edittext.append("HG: no files changed")
805 809 edittext.append("")
806 810 # run editor in the repository root
807 811 olddir = os.getcwd()
808 812 os.chdir(self.root)
809 813 text = self.ui.edit("\n".join(edittext), user)
810 814 os.chdir(olddir)
811 815
812 816 lines = [line.rstrip() for line in text.rstrip().splitlines()]
813 817 while lines and not lines[0]:
814 818 del lines[0]
815 819 if not lines:
816 820 return None
817 821 text = '\n'.join(lines)
818 822 if branchname:
819 823 extra["branch"] = branchname
820 824 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
821 825 user, date, extra)
822 826 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
823 827 parent2=xp2)
824 828 tr.close()
825 829
826 830 if self.branchcache and "branch" in extra:
827 831 self.branchcache[util.tolocal(extra["branch"])] = n
828 832
829 833 if use_dirstate or update_dirstate:
830 834 self.dirstate.setparents(n)
831 835 if use_dirstate:
832 836 self.dirstate.update(new, "n")
833 837 self.dirstate.forget(removed)
834 838
835 839 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
836 840 return n
837 841
838 842 def walk(self, node=None, files=[], match=util.always, badmatch=None):
839 843 '''
840 844 walk recursively through the directory tree or a given
841 845 changeset, finding all files matched by the match
842 846 function
843 847
844 848 results are yielded in a tuple (src, filename), where src
845 849 is one of:
846 850 'f' the file was found in the directory tree
847 851 'm' the file was only in the dirstate and not in the tree
848 852 'b' file was not found and matched badmatch
849 853 '''
850 854
851 855 if node:
852 856 fdict = dict.fromkeys(files)
853 857 for fn in self.manifest.read(self.changelog.read(node)[0]):
854 858 for ffn in fdict:
855 859 # match if the file is the exact name or a directory
856 860 if ffn == fn or fn.startswith("%s/" % ffn):
857 861 del fdict[ffn]
858 862 break
859 863 if match(fn):
860 864 yield 'm', fn
861 865 for fn in fdict:
862 866 if badmatch and badmatch(fn):
863 867 if match(fn):
864 868 yield 'b', fn
865 869 else:
866 870 self.ui.warn(_('%s: No such file in rev %s\n') % (
867 871 util.pathto(self.getcwd(), fn), short(node)))
868 872 else:
869 873 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
870 874 yield src, fn
871 875
872 876 def status(self, node1=None, node2=None, files=[], match=util.always,
873 877 wlock=None, list_ignored=False, list_clean=False):
874 878 """return status of files between two nodes or node and working directory
875 879
876 880 If node1 is None, use the first dirstate parent instead.
877 881 If node2 is None, compare node1 with working directory.
878 882 """
879 883
880 884 def fcmp(fn, mf):
881 885 t1 = self.wread(fn)
882 886 return self.file(fn).cmp(mf.get(fn, nullid), t1)
883 887
884 888 def mfmatches(node):
885 889 change = self.changelog.read(node)
886 890 mf = self.manifest.read(change[0]).copy()
887 891 for fn in mf.keys():
888 892 if not match(fn):
889 893 del mf[fn]
890 894 return mf
891 895
892 896 modified, added, removed, deleted, unknown = [], [], [], [], []
893 897 ignored, clean = [], []
894 898
895 899 compareworking = False
896 900 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
897 901 compareworking = True
898 902
899 903 if not compareworking:
900 904 # read the manifest from node1 before the manifest from node2,
901 905 # so that we'll hit the manifest cache if we're going through
902 906 # all the revisions in parent->child order.
903 907 mf1 = mfmatches(node1)
904 908
905 909 # are we comparing the working directory?
906 910 if not node2:
907 911 if not wlock:
908 912 try:
909 913 wlock = self.wlock(wait=0)
910 914 except lock.LockException:
911 915 wlock = None
912 916 (lookup, modified, added, removed, deleted, unknown,
913 917 ignored, clean) = self.dirstate.status(files, match,
914 918 list_ignored, list_clean)
915 919
916 920 # are we comparing working dir against its parent?
917 921 if compareworking:
918 922 if lookup:
919 923 # do a full compare of any files that might have changed
920 924 mf2 = mfmatches(self.dirstate.parents()[0])
921 925 for f in lookup:
922 926 if fcmp(f, mf2):
923 927 modified.append(f)
924 928 else:
925 929 clean.append(f)
926 930 if wlock is not None:
927 931 self.dirstate.update([f], "n")
928 932 else:
929 933 # we are comparing working dir against non-parent
930 934 # generate a pseudo-manifest for the working dir
931 935 # XXX: create it in dirstate.py ?
932 936 mf2 = mfmatches(self.dirstate.parents()[0])
933 937 is_exec = util.execfunc(self.root, mf2.execf)
934 938 is_link = util.linkfunc(self.root, mf2.linkf)
935 939 for f in lookup + modified + added:
936 940 mf2[f] = ""
937 941 mf2.set(f, is_exec(f), is_link(f))
938 942 for f in removed:
939 943 if f in mf2:
940 944 del mf2[f]
941 945 else:
942 946 # we are comparing two revisions
943 947 mf2 = mfmatches(node2)
944 948
945 949 if not compareworking:
946 950 # flush lists from dirstate before comparing manifests
947 951 modified, added, clean = [], [], []
948 952
949 953 # make sure to sort the files so we talk to the disk in a
950 954 # reasonable order
951 955 mf2keys = mf2.keys()
952 956 mf2keys.sort()
953 957 for fn in mf2keys:
954 958 if mf1.has_key(fn):
955 959 if mf1.flags(fn) != mf2.flags(fn) or \
956 960 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
957 961 modified.append(fn)
958 962 elif list_clean:
959 963 clean.append(fn)
960 964 del mf1[fn]
961 965 else:
962 966 added.append(fn)
963 967
964 968 removed = mf1.keys()
965 969
966 970 # sort and return results:
967 971 for l in modified, added, removed, deleted, unknown, ignored, clean:
968 972 l.sort()
969 973 return (modified, added, removed, deleted, unknown, ignored, clean)
970 974
971 975 def add(self, list, wlock=None):
972 976 if not wlock:
973 977 wlock = self.wlock()
974 978 for f in list:
975 979 p = self.wjoin(f)
976 980 islink = os.path.islink(p)
977 981 if not islink and not os.path.exists(p):
978 982 self.ui.warn(_("%s does not exist!\n") % f)
979 983 elif not islink and not os.path.isfile(p):
980 984 self.ui.warn(_("%s not added: only files and symlinks "
981 985 "supported currently\n") % f)
982 986 elif self.dirstate.state(f) in 'an':
983 987 self.ui.warn(_("%s already tracked!\n") % f)
984 988 else:
985 989 self.dirstate.update([f], "a")
986 990
987 991 def forget(self, list, wlock=None):
988 992 if not wlock:
989 993 wlock = self.wlock()
990 994 for f in list:
991 995 if self.dirstate.state(f) not in 'ai':
992 996 self.ui.warn(_("%s not added!\n") % f)
993 997 else:
994 998 self.dirstate.forget([f])
995 999
996 1000 def remove(self, list, unlink=False, wlock=None):
997 1001 if unlink:
998 1002 for f in list:
999 1003 try:
1000 1004 util.unlink(self.wjoin(f))
1001 1005 except OSError, inst:
1002 1006 if inst.errno != errno.ENOENT:
1003 1007 raise
1004 1008 if not wlock:
1005 1009 wlock = self.wlock()
1006 1010 for f in list:
1007 1011 p = self.wjoin(f)
1008 1012 if os.path.exists(p):
1009 1013 self.ui.warn(_("%s still exists!\n") % f)
1010 1014 elif self.dirstate.state(f) == 'a':
1011 1015 self.dirstate.forget([f])
1012 1016 elif f not in self.dirstate:
1013 1017 self.ui.warn(_("%s not tracked!\n") % f)
1014 1018 else:
1015 1019 self.dirstate.update([f], "r")
1016 1020
1017 1021 def undelete(self, list, wlock=None):
1018 1022 p = self.dirstate.parents()[0]
1019 1023 mn = self.changelog.read(p)[0]
1020 1024 m = self.manifest.read(mn)
1021 1025 if not wlock:
1022 1026 wlock = self.wlock()
1023 1027 for f in list:
1024 1028 if self.dirstate.state(f) not in "r":
1025 1029 self.ui.warn("%s not removed!\n" % f)
1026 1030 else:
1027 1031 t = self.file(f).read(m[f])
1028 1032 self.wwrite(f, t, m.flags(f))
1029 1033 self.dirstate.update([f], "n")
1030 1034
1031 1035 def copy(self, source, dest, wlock=None):
1032 1036 p = self.wjoin(dest)
1033 1037 if not os.path.exists(p):
1034 1038 self.ui.warn(_("%s does not exist!\n") % dest)
1035 1039 elif not os.path.isfile(p):
1036 1040 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
1037 1041 else:
1038 1042 if not wlock:
1039 1043 wlock = self.wlock()
1040 1044 if self.dirstate.state(dest) == '?':
1041 1045 self.dirstate.update([dest], "a")
1042 1046 self.dirstate.copy(source, dest)
1043 1047
1044 1048 def heads(self, start=None):
1045 1049 heads = self.changelog.heads(start)
1046 1050 # sort the output in rev descending order
1047 1051 heads = [(-self.changelog.rev(h), h) for h in heads]
1048 1052 heads.sort()
1049 1053 return [n for (r, n) in heads]
1050 1054
1051 1055 def branches(self, nodes):
1052 1056 if not nodes:
1053 1057 nodes = [self.changelog.tip()]
1054 1058 b = []
1055 1059 for n in nodes:
1056 1060 t = n
1057 1061 while 1:
1058 1062 p = self.changelog.parents(n)
1059 1063 if p[1] != nullid or p[0] == nullid:
1060 1064 b.append((t, n, p[0], p[1]))
1061 1065 break
1062 1066 n = p[0]
1063 1067 return b
1064 1068
1065 1069 def between(self, pairs):
1066 1070 r = []
1067 1071
1068 1072 for top, bottom in pairs:
1069 1073 n, l, i = top, [], 0
1070 1074 f = 1
1071 1075
1072 1076 while n != bottom:
1073 1077 p = self.changelog.parents(n)[0]
1074 1078 if i == f:
1075 1079 l.append(n)
1076 1080 f = f * 2
1077 1081 n = p
1078 1082 i += 1
1079 1083
1080 1084 r.append(l)
1081 1085
1082 1086 return r
1083 1087
1084 1088 def findincoming(self, remote, base=None, heads=None, force=False):
1085 1089 """Return list of roots of the subsets of missing nodes from remote
1086 1090
1087 1091 If base dict is specified, assume that these nodes and their parents
1088 1092 exist on the remote side and that no child of a node of base exists
1089 1093 in both remote and self.
1090 1094 Furthermore base will be updated to include the nodes that exists
1091 1095 in self and remote but no children exists in self and remote.
1092 1096 If a list of heads is specified, return only nodes which are heads
1093 1097 or ancestors of these heads.
1094 1098
1095 1099 All the ancestors of base are in self and in remote.
1096 1100 All the descendants of the list returned are missing in self.
1097 1101 (and so we know that the rest of the nodes are missing in remote, see
1098 1102 outgoing)
1099 1103 """
1100 1104 m = self.changelog.nodemap
1101 1105 search = []
1102 1106 fetch = {}
1103 1107 seen = {}
1104 1108 seenbranch = {}
1105 1109 if base == None:
1106 1110 base = {}
1107 1111
1108 1112 if not heads:
1109 1113 heads = remote.heads()
1110 1114
1111 1115 if self.changelog.tip() == nullid:
1112 1116 base[nullid] = 1
1113 1117 if heads != [nullid]:
1114 1118 return [nullid]
1115 1119 return []
1116 1120
1117 1121 # assume we're closer to the tip than the root
1118 1122 # and start by examining the heads
1119 1123 self.ui.status(_("searching for changes\n"))
1120 1124
1121 1125 unknown = []
1122 1126 for h in heads:
1123 1127 if h not in m:
1124 1128 unknown.append(h)
1125 1129 else:
1126 1130 base[h] = 1
1127 1131
1128 1132 if not unknown:
1129 1133 return []
1130 1134
1131 1135 req = dict.fromkeys(unknown)
1132 1136 reqcnt = 0
1133 1137
1134 1138 # search through remote branches
1135 1139 # a 'branch' here is a linear segment of history, with four parts:
1136 1140 # head, root, first parent, second parent
1137 1141 # (a branch always has two parents (or none) by definition)
1138 1142 unknown = remote.branches(unknown)
1139 1143 while unknown:
1140 1144 r = []
1141 1145 while unknown:
1142 1146 n = unknown.pop(0)
1143 1147 if n[0] in seen:
1144 1148 continue
1145 1149
1146 1150 self.ui.debug(_("examining %s:%s\n")
1147 1151 % (short(n[0]), short(n[1])))
1148 1152 if n[0] == nullid: # found the end of the branch
1149 1153 pass
1150 1154 elif n in seenbranch:
1151 1155 self.ui.debug(_("branch already found\n"))
1152 1156 continue
1153 1157 elif n[1] and n[1] in m: # do we know the base?
1154 1158 self.ui.debug(_("found incomplete branch %s:%s\n")
1155 1159 % (short(n[0]), short(n[1])))
1156 1160 search.append(n) # schedule branch range for scanning
1157 1161 seenbranch[n] = 1
1158 1162 else:
1159 1163 if n[1] not in seen and n[1] not in fetch:
1160 1164 if n[2] in m and n[3] in m:
1161 1165 self.ui.debug(_("found new changeset %s\n") %
1162 1166 short(n[1]))
1163 1167 fetch[n[1]] = 1 # earliest unknown
1164 1168 for p in n[2:4]:
1165 1169 if p in m:
1166 1170 base[p] = 1 # latest known
1167 1171
1168 1172 for p in n[2:4]:
1169 1173 if p not in req and p not in m:
1170 1174 r.append(p)
1171 1175 req[p] = 1
1172 1176 seen[n[0]] = 1
1173 1177
1174 1178 if r:
1175 1179 reqcnt += 1
1176 1180 self.ui.debug(_("request %d: %s\n") %
1177 1181 (reqcnt, " ".join(map(short, r))))
1178 1182 for p in xrange(0, len(r), 10):
1179 1183 for b in remote.branches(r[p:p+10]):
1180 1184 self.ui.debug(_("received %s:%s\n") %
1181 1185 (short(b[0]), short(b[1])))
1182 1186 unknown.append(b)
1183 1187
1184 1188 # do binary search on the branches we found
1185 1189 while search:
1186 1190 n = search.pop(0)
1187 1191 reqcnt += 1
1188 1192 l = remote.between([(n[0], n[1])])[0]
1189 1193 l.append(n[1])
1190 1194 p = n[0]
1191 1195 f = 1
1192 1196 for i in l:
1193 1197 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1194 1198 if i in m:
1195 1199 if f <= 2:
1196 1200 self.ui.debug(_("found new branch changeset %s\n") %
1197 1201 short(p))
1198 1202 fetch[p] = 1
1199 1203 base[i] = 1
1200 1204 else:
1201 1205 self.ui.debug(_("narrowed branch search to %s:%s\n")
1202 1206 % (short(p), short(i)))
1203 1207 search.append((p, i))
1204 1208 break
1205 1209 p, f = i, f * 2
1206 1210
1207 1211 # sanity check our fetch list
1208 1212 for f in fetch.keys():
1209 1213 if f in m:
1210 1214 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1211 1215
1212 1216 if base.keys() == [nullid]:
1213 1217 if force:
1214 1218 self.ui.warn(_("warning: repository is unrelated\n"))
1215 1219 else:
1216 1220 raise util.Abort(_("repository is unrelated"))
1217 1221
1218 1222 self.ui.debug(_("found new changesets starting at ") +
1219 1223 " ".join([short(f) for f in fetch]) + "\n")
1220 1224
1221 1225 self.ui.debug(_("%d total queries\n") % reqcnt)
1222 1226
1223 1227 return fetch.keys()
1224 1228
1225 1229 def findoutgoing(self, remote, base=None, heads=None, force=False):
1226 1230 """Return list of nodes that are roots of subsets not in remote
1227 1231
1228 1232 If base dict is specified, assume that these nodes and their parents
1229 1233 exist on the remote side.
1230 1234 If a list of heads is specified, return only nodes which are heads
1231 1235 or ancestors of these heads, and return a second element which
1232 1236 contains all remote heads which get new children.
1233 1237 """
1234 1238 if base == None:
1235 1239 base = {}
1236 1240 self.findincoming(remote, base, heads, force=force)
1237 1241
1238 1242 self.ui.debug(_("common changesets up to ")
1239 1243 + " ".join(map(short, base.keys())) + "\n")
1240 1244
1241 1245 remain = dict.fromkeys(self.changelog.nodemap)
1242 1246
1243 1247 # prune everything remote has from the tree
1244 1248 del remain[nullid]
1245 1249 remove = base.keys()
1246 1250 while remove:
1247 1251 n = remove.pop(0)
1248 1252 if n in remain:
1249 1253 del remain[n]
1250 1254 for p in self.changelog.parents(n):
1251 1255 remove.append(p)
1252 1256
1253 1257 # find every node whose parents have been pruned
1254 1258 subset = []
1255 1259 # find every remote head that will get new children
1256 1260 updated_heads = {}
1257 1261 for n in remain:
1258 1262 p1, p2 = self.changelog.parents(n)
1259 1263 if p1 not in remain and p2 not in remain:
1260 1264 subset.append(n)
1261 1265 if heads:
1262 1266 if p1 in heads:
1263 1267 updated_heads[p1] = True
1264 1268 if p2 in heads:
1265 1269 updated_heads[p2] = True
1266 1270
1267 1271 # this is the set of all roots we have to push
1268 1272 if heads:
1269 1273 return subset, updated_heads.keys()
1270 1274 else:
1271 1275 return subset
1272 1276
1273 1277 def pull(self, remote, heads=None, force=False, lock=None):
1274 1278 mylock = False
1275 1279 if not lock:
1276 1280 lock = self.lock()
1277 1281 mylock = True
1278 1282
1279 1283 try:
1280 1284 fetch = self.findincoming(remote, force=force)
1281 1285 if fetch == [nullid]:
1282 1286 self.ui.status(_("requesting all changes\n"))
1283 1287
1284 1288 if not fetch:
1285 1289 self.ui.status(_("no changes found\n"))
1286 1290 return 0
1287 1291
1288 1292 if heads is None:
1289 1293 cg = remote.changegroup(fetch, 'pull')
1290 1294 else:
1291 1295 if 'changegroupsubset' not in remote.capabilities:
1292 1296 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1293 1297 cg = remote.changegroupsubset(fetch, heads, 'pull')
1294 1298 return self.addchangegroup(cg, 'pull', remote.url())
1295 1299 finally:
1296 1300 if mylock:
1297 1301 lock.release()
1298 1302
1299 1303 def push(self, remote, force=False, revs=None):
1300 1304 # there are two ways to push to remote repo:
1301 1305 #
1302 1306 # addchangegroup assumes local user can lock remote
1303 1307 # repo (local filesystem, old ssh servers).
1304 1308 #
1305 1309 # unbundle assumes local user cannot lock remote repo (new ssh
1306 1310 # servers, http servers).
1307 1311
1308 1312 if remote.capable('unbundle'):
1309 1313 return self.push_unbundle(remote, force, revs)
1310 1314 return self.push_addchangegroup(remote, force, revs)
1311 1315
1312 1316 def prepush(self, remote, force, revs):
1313 1317 base = {}
1314 1318 remote_heads = remote.heads()
1315 1319 inc = self.findincoming(remote, base, remote_heads, force=force)
1316 1320
1317 1321 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1318 1322 if revs is not None:
1319 1323 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1320 1324 else:
1321 1325 bases, heads = update, self.changelog.heads()
1322 1326
1323 1327 if not bases:
1324 1328 self.ui.status(_("no changes found\n"))
1325 1329 return None, 1
1326 1330 elif not force:
1327 1331 # check if we're creating new remote heads
1328 1332 # to be a remote head after push, node must be either
1329 1333 # - unknown locally
1330 1334 # - a local outgoing head descended from update
1331 1335 # - a remote head that's known locally and not
1332 1336 # ancestral to an outgoing head
1333 1337
1334 1338 warn = 0
1335 1339
1336 1340 if remote_heads == [nullid]:
1337 1341 warn = 0
1338 1342 elif not revs and len(heads) > len(remote_heads):
1339 1343 warn = 1
1340 1344 else:
1341 1345 newheads = list(heads)
1342 1346 for r in remote_heads:
1343 1347 if r in self.changelog.nodemap:
1344 1348 desc = self.changelog.heads(r, heads)
1345 1349 l = [h for h in heads if h in desc]
1346 1350 if not l:
1347 1351 newheads.append(r)
1348 1352 else:
1349 1353 newheads.append(r)
1350 1354 if len(newheads) > len(remote_heads):
1351 1355 warn = 1
1352 1356
1353 1357 if warn:
1354 1358 self.ui.warn(_("abort: push creates new remote branches!\n"))
1355 1359 self.ui.status(_("(did you forget to merge?"
1356 1360 " use push -f to force)\n"))
1357 1361 return None, 1
1358 1362 elif inc:
1359 1363 self.ui.warn(_("note: unsynced remote changes!\n"))
1360 1364
1361 1365
1362 1366 if revs is None:
1363 1367 cg = self.changegroup(update, 'push')
1364 1368 else:
1365 1369 cg = self.changegroupsubset(update, revs, 'push')
1366 1370 return cg, remote_heads
1367 1371
1368 1372 def push_addchangegroup(self, remote, force, revs):
1369 1373 lock = remote.lock()
1370 1374
1371 1375 ret = self.prepush(remote, force, revs)
1372 1376 if ret[0] is not None:
1373 1377 cg, remote_heads = ret
1374 1378 return remote.addchangegroup(cg, 'push', self.url())
1375 1379 return ret[1]
1376 1380
1377 1381 def push_unbundle(self, remote, force, revs):
1378 1382 # local repo finds heads on server, finds out what revs it
1379 1383 # must push. once revs transferred, if server finds it has
1380 1384 # different heads (someone else won commit/push race), server
1381 1385 # aborts.
1382 1386
1383 1387 ret = self.prepush(remote, force, revs)
1384 1388 if ret[0] is not None:
1385 1389 cg, remote_heads = ret
1386 1390 if force: remote_heads = ['force']
1387 1391 return remote.unbundle(cg, remote_heads, 'push')
1388 1392 return ret[1]
1389 1393
1390 1394 def changegroupinfo(self, nodes):
1391 1395 self.ui.note(_("%d changesets found\n") % len(nodes))
1392 1396 if self.ui.debugflag:
1393 1397 self.ui.debug(_("List of changesets:\n"))
1394 1398 for node in nodes:
1395 1399 self.ui.debug("%s\n" % hex(node))
1396 1400
1397 1401 def changegroupsubset(self, bases, heads, source):
1398 1402 """This function generates a changegroup consisting of all the nodes
1399 1403 that are descendents of any of the bases, and ancestors of any of
1400 1404 the heads.
1401 1405
1402 1406 It is fairly complex as determining which filenodes and which
1403 1407 manifest nodes need to be included for the changeset to be complete
1404 1408 is non-trivial.
1405 1409
1406 1410 Another wrinkle is doing the reverse, figuring out which changeset in
1407 1411 the changegroup a particular filenode or manifestnode belongs to."""
1408 1412
1409 1413 self.hook('preoutgoing', throw=True, source=source)
1410 1414
1411 1415 # Set up some initial variables
1412 1416 # Make it easy to refer to self.changelog
1413 1417 cl = self.changelog
1414 1418 # msng is short for missing - compute the list of changesets in this
1415 1419 # changegroup.
1416 1420 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1417 1421 self.changegroupinfo(msng_cl_lst)
1418 1422 # Some bases may turn out to be superfluous, and some heads may be
1419 1423 # too. nodesbetween will return the minimal set of bases and heads
1420 1424 # necessary to re-create the changegroup.
1421 1425
1422 1426 # Known heads are the list of heads that it is assumed the recipient
1423 1427 # of this changegroup will know about.
1424 1428 knownheads = {}
1425 1429 # We assume that all parents of bases are known heads.
1426 1430 for n in bases:
1427 1431 for p in cl.parents(n):
1428 1432 if p != nullid:
1429 1433 knownheads[p] = 1
1430 1434 knownheads = knownheads.keys()
1431 1435 if knownheads:
1432 1436 # Now that we know what heads are known, we can compute which
1433 1437 # changesets are known. The recipient must know about all
1434 1438 # changesets required to reach the known heads from the null
1435 1439 # changeset.
1436 1440 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1437 1441 junk = None
1438 1442 # Transform the list into an ersatz set.
1439 1443 has_cl_set = dict.fromkeys(has_cl_set)
1440 1444 else:
1441 1445 # If there were no known heads, the recipient cannot be assumed to
1442 1446 # know about any changesets.
1443 1447 has_cl_set = {}
1444 1448
1445 1449 # Make it easy to refer to self.manifest
1446 1450 mnfst = self.manifest
1447 1451 # We don't know which manifests are missing yet
1448 1452 msng_mnfst_set = {}
1449 1453 # Nor do we know which filenodes are missing.
1450 1454 msng_filenode_set = {}
1451 1455
1452 1456 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1453 1457 junk = None
1454 1458
1455 1459 # A changeset always belongs to itself, so the changenode lookup
1456 1460 # function for a changenode is identity.
1457 1461 def identity(x):
1458 1462 return x
1459 1463
1460 1464 # A function generating function. Sets up an environment for the
1461 1465 # inner function.
1462 1466 def cmp_by_rev_func(revlog):
1463 1467 # Compare two nodes by their revision number in the environment's
1464 1468 # revision history. Since the revision number both represents the
1465 1469 # most efficient order to read the nodes in, and represents a
1466 1470 # topological sorting of the nodes, this function is often useful.
1467 1471 def cmp_by_rev(a, b):
1468 1472 return cmp(revlog.rev(a), revlog.rev(b))
1469 1473 return cmp_by_rev
1470 1474
1471 1475 # If we determine that a particular file or manifest node must be a
1472 1476 # node that the recipient of the changegroup will already have, we can
1473 1477 # also assume the recipient will have all the parents. This function
1474 1478 # prunes them from the set of missing nodes.
1475 1479 def prune_parents(revlog, hasset, msngset):
1476 1480 haslst = hasset.keys()
1477 1481 haslst.sort(cmp_by_rev_func(revlog))
1478 1482 for node in haslst:
1479 1483 parentlst = [p for p in revlog.parents(node) if p != nullid]
1480 1484 while parentlst:
1481 1485 n = parentlst.pop()
1482 1486 if n not in hasset:
1483 1487 hasset[n] = 1
1484 1488 p = [p for p in revlog.parents(n) if p != nullid]
1485 1489 parentlst.extend(p)
1486 1490 for n in hasset:
1487 1491 msngset.pop(n, None)
1488 1492
1489 1493 # This is a function generating function used to set up an environment
1490 1494 # for the inner function to execute in.
1491 1495 def manifest_and_file_collector(changedfileset):
1492 1496 # This is an information gathering function that gathers
1493 1497 # information from each changeset node that goes out as part of
1494 1498 # the changegroup. The information gathered is a list of which
1495 1499 # manifest nodes are potentially required (the recipient may
1496 1500 # already have them) and total list of all files which were
1497 1501 # changed in any changeset in the changegroup.
1498 1502 #
1499 1503 # We also remember the first changenode we saw any manifest
1500 1504 # referenced by so we can later determine which changenode 'owns'
1501 1505 # the manifest.
1502 1506 def collect_manifests_and_files(clnode):
1503 1507 c = cl.read(clnode)
1504 1508 for f in c[3]:
1505 1509 # This is to make sure we only have one instance of each
1506 1510 # filename string for each filename.
1507 1511 changedfileset.setdefault(f, f)
1508 1512 msng_mnfst_set.setdefault(c[0], clnode)
1509 1513 return collect_manifests_and_files
1510 1514
1511 1515 # Figure out which manifest nodes (of the ones we think might be part
1512 1516 # of the changegroup) the recipient must know about and remove them
1513 1517 # from the changegroup.
1514 1518 def prune_manifests():
1515 1519 has_mnfst_set = {}
1516 1520 for n in msng_mnfst_set:
1517 1521 # If a 'missing' manifest thinks it belongs to a changenode
1518 1522 # the recipient is assumed to have, obviously the recipient
1519 1523 # must have that manifest.
1520 1524 linknode = cl.node(mnfst.linkrev(n))
1521 1525 if linknode in has_cl_set:
1522 1526 has_mnfst_set[n] = 1
1523 1527 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1524 1528
1525 1529 # Use the information collected in collect_manifests_and_files to say
1526 1530 # which changenode any manifestnode belongs to.
1527 1531 def lookup_manifest_link(mnfstnode):
1528 1532 return msng_mnfst_set[mnfstnode]
1529 1533
1530 1534 # A function generating function that sets up the initial environment
1531 1535 # the inner function.
1532 1536 def filenode_collector(changedfiles):
1533 1537 next_rev = [0]
1534 1538 # This gathers information from each manifestnode included in the
1535 1539 # changegroup about which filenodes the manifest node references
1536 1540 # so we can include those in the changegroup too.
1537 1541 #
1538 1542 # It also remembers which changenode each filenode belongs to. It
1539 1543 # does this by assuming the a filenode belongs to the changenode
1540 1544 # the first manifest that references it belongs to.
1541 1545 def collect_msng_filenodes(mnfstnode):
1542 1546 r = mnfst.rev(mnfstnode)
1543 1547 if r == next_rev[0]:
1544 1548 # If the last rev we looked at was the one just previous,
1545 1549 # we only need to see a diff.
1546 1550 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1547 1551 # For each line in the delta
1548 1552 for dline in delta.splitlines():
1549 1553 # get the filename and filenode for that line
1550 1554 f, fnode = dline.split('\0')
1551 1555 fnode = bin(fnode[:40])
1552 1556 f = changedfiles.get(f, None)
1553 1557 # And if the file is in the list of files we care
1554 1558 # about.
1555 1559 if f is not None:
1556 1560 # Get the changenode this manifest belongs to
1557 1561 clnode = msng_mnfst_set[mnfstnode]
1558 1562 # Create the set of filenodes for the file if
1559 1563 # there isn't one already.
1560 1564 ndset = msng_filenode_set.setdefault(f, {})
1561 1565 # And set the filenode's changelog node to the
1562 1566 # manifest's if it hasn't been set already.
1563 1567 ndset.setdefault(fnode, clnode)
1564 1568 else:
1565 1569 # Otherwise we need a full manifest.
1566 1570 m = mnfst.read(mnfstnode)
1567 1571 # For every file in we care about.
1568 1572 for f in changedfiles:
1569 1573 fnode = m.get(f, None)
1570 1574 # If it's in the manifest
1571 1575 if fnode is not None:
1572 1576 # See comments above.
1573 1577 clnode = msng_mnfst_set[mnfstnode]
1574 1578 ndset = msng_filenode_set.setdefault(f, {})
1575 1579 ndset.setdefault(fnode, clnode)
1576 1580 # Remember the revision we hope to see next.
1577 1581 next_rev[0] = r + 1
1578 1582 return collect_msng_filenodes
1579 1583
1580 1584 # We have a list of filenodes we think we need for a file, lets remove
1581 1585 # all those we now the recipient must have.
1582 1586 def prune_filenodes(f, filerevlog):
1583 1587 msngset = msng_filenode_set[f]
1584 1588 hasset = {}
1585 1589 # If a 'missing' filenode thinks it belongs to a changenode we
1586 1590 # assume the recipient must have, then the recipient must have
1587 1591 # that filenode.
1588 1592 for n in msngset:
1589 1593 clnode = cl.node(filerevlog.linkrev(n))
1590 1594 if clnode in has_cl_set:
1591 1595 hasset[n] = 1
1592 1596 prune_parents(filerevlog, hasset, msngset)
1593 1597
1594 1598 # A function generator function that sets up the a context for the
1595 1599 # inner function.
1596 1600 def lookup_filenode_link_func(fname):
1597 1601 msngset = msng_filenode_set[fname]
1598 1602 # Lookup the changenode the filenode belongs to.
1599 1603 def lookup_filenode_link(fnode):
1600 1604 return msngset[fnode]
1601 1605 return lookup_filenode_link
1602 1606
1603 1607 # Now that we have all theses utility functions to help out and
1604 1608 # logically divide up the task, generate the group.
1605 1609 def gengroup():
1606 1610 # The set of changed files starts empty.
1607 1611 changedfiles = {}
1608 1612 # Create a changenode group generator that will call our functions
1609 1613 # back to lookup the owning changenode and collect information.
1610 1614 group = cl.group(msng_cl_lst, identity,
1611 1615 manifest_and_file_collector(changedfiles))
1612 1616 for chnk in group:
1613 1617 yield chnk
1614 1618
1615 1619 # The list of manifests has been collected by the generator
1616 1620 # calling our functions back.
1617 1621 prune_manifests()
1618 1622 msng_mnfst_lst = msng_mnfst_set.keys()
1619 1623 # Sort the manifestnodes by revision number.
1620 1624 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1621 1625 # Create a generator for the manifestnodes that calls our lookup
1622 1626 # and data collection functions back.
1623 1627 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1624 1628 filenode_collector(changedfiles))
1625 1629 for chnk in group:
1626 1630 yield chnk
1627 1631
1628 1632 # These are no longer needed, dereference and toss the memory for
1629 1633 # them.
1630 1634 msng_mnfst_lst = None
1631 1635 msng_mnfst_set.clear()
1632 1636
1633 1637 changedfiles = changedfiles.keys()
1634 1638 changedfiles.sort()
1635 1639 # Go through all our files in order sorted by name.
1636 1640 for fname in changedfiles:
1637 1641 filerevlog = self.file(fname)
1638 1642 # Toss out the filenodes that the recipient isn't really
1639 1643 # missing.
1640 1644 if msng_filenode_set.has_key(fname):
1641 1645 prune_filenodes(fname, filerevlog)
1642 1646 msng_filenode_lst = msng_filenode_set[fname].keys()
1643 1647 else:
1644 1648 msng_filenode_lst = []
1645 1649 # If any filenodes are left, generate the group for them,
1646 1650 # otherwise don't bother.
1647 1651 if len(msng_filenode_lst) > 0:
1648 1652 yield changegroup.genchunk(fname)
1649 1653 # Sort the filenodes by their revision #
1650 1654 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1651 1655 # Create a group generator and only pass in a changenode
1652 1656 # lookup function as we need to collect no information
1653 1657 # from filenodes.
1654 1658 group = filerevlog.group(msng_filenode_lst,
1655 1659 lookup_filenode_link_func(fname))
1656 1660 for chnk in group:
1657 1661 yield chnk
1658 1662 if msng_filenode_set.has_key(fname):
1659 1663 # Don't need this anymore, toss it to free memory.
1660 1664 del msng_filenode_set[fname]
1661 1665 # Signal that no more groups are left.
1662 1666 yield changegroup.closechunk()
1663 1667
1664 1668 if msng_cl_lst:
1665 1669 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1666 1670
1667 1671 return util.chunkbuffer(gengroup())
1668 1672
1669 1673 def changegroup(self, basenodes, source):
1670 1674 """Generate a changegroup of all nodes that we have that a recipient
1671 1675 doesn't.
1672 1676
1673 1677 This is much easier than the previous function as we can assume that
1674 1678 the recipient has any changenode we aren't sending them."""
1675 1679
1676 1680 self.hook('preoutgoing', throw=True, source=source)
1677 1681
1678 1682 cl = self.changelog
1679 1683 nodes = cl.nodesbetween(basenodes, None)[0]
1680 1684 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1681 1685 self.changegroupinfo(nodes)
1682 1686
1683 1687 def identity(x):
1684 1688 return x
1685 1689
1686 1690 def gennodelst(revlog):
1687 1691 for r in xrange(0, revlog.count()):
1688 1692 n = revlog.node(r)
1689 1693 if revlog.linkrev(n) in revset:
1690 1694 yield n
1691 1695
1692 1696 def changed_file_collector(changedfileset):
1693 1697 def collect_changed_files(clnode):
1694 1698 c = cl.read(clnode)
1695 1699 for fname in c[3]:
1696 1700 changedfileset[fname] = 1
1697 1701 return collect_changed_files
1698 1702
1699 1703 def lookuprevlink_func(revlog):
1700 1704 def lookuprevlink(n):
1701 1705 return cl.node(revlog.linkrev(n))
1702 1706 return lookuprevlink
1703 1707
1704 1708 def gengroup():
1705 1709 # construct a list of all changed files
1706 1710 changedfiles = {}
1707 1711
1708 1712 for chnk in cl.group(nodes, identity,
1709 1713 changed_file_collector(changedfiles)):
1710 1714 yield chnk
1711 1715 changedfiles = changedfiles.keys()
1712 1716 changedfiles.sort()
1713 1717
1714 1718 mnfst = self.manifest
1715 1719 nodeiter = gennodelst(mnfst)
1716 1720 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1717 1721 yield chnk
1718 1722
1719 1723 for fname in changedfiles:
1720 1724 filerevlog = self.file(fname)
1721 1725 nodeiter = gennodelst(filerevlog)
1722 1726 nodeiter = list(nodeiter)
1723 1727 if nodeiter:
1724 1728 yield changegroup.genchunk(fname)
1725 1729 lookup = lookuprevlink_func(filerevlog)
1726 1730 for chnk in filerevlog.group(nodeiter, lookup):
1727 1731 yield chnk
1728 1732
1729 1733 yield changegroup.closechunk()
1730 1734
1731 1735 if nodes:
1732 1736 self.hook('outgoing', node=hex(nodes[0]), source=source)
1733 1737
1734 1738 return util.chunkbuffer(gengroup())
1735 1739
1736 1740 def addchangegroup(self, source, srctype, url):
1737 1741 """add changegroup to repo.
1738 1742
1739 1743 return values:
1740 1744 - nothing changed or no source: 0
1741 1745 - more heads than before: 1+added heads (2..n)
1742 1746 - less heads than before: -1-removed heads (-2..-n)
1743 1747 - number of heads stays the same: 1
1744 1748 """
1745 1749 def csmap(x):
1746 1750 self.ui.debug(_("add changeset %s\n") % short(x))
1747 1751 return cl.count()
1748 1752
1749 1753 def revmap(x):
1750 1754 return cl.rev(x)
1751 1755
1752 1756 if not source:
1753 1757 return 0
1754 1758
1755 1759 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1756 1760
1757 1761 changesets = files = revisions = 0
1758 1762
1759 1763 tr = self.transaction()
1760 1764
1761 1765 # write changelog data to temp files so concurrent readers will not see
1762 1766 # inconsistent view
1763 1767 cl = None
1764 1768 try:
1765 1769 cl = appendfile.appendchangelog(self.sopener,
1766 1770 self.changelog.version)
1767 1771
1768 1772 oldheads = len(cl.heads())
1769 1773
1770 1774 # pull off the changeset group
1771 1775 self.ui.status(_("adding changesets\n"))
1772 1776 cor = cl.count() - 1
1773 1777 chunkiter = changegroup.chunkiter(source)
1774 1778 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1775 1779 raise util.Abort(_("received changelog group is empty"))
1776 1780 cnr = cl.count() - 1
1777 1781 changesets = cnr - cor
1778 1782
1779 1783 # pull off the manifest group
1780 1784 self.ui.status(_("adding manifests\n"))
1781 1785 chunkiter = changegroup.chunkiter(source)
1782 1786 # no need to check for empty manifest group here:
1783 1787 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1784 1788 # no new manifest will be created and the manifest group will
1785 1789 # be empty during the pull
1786 1790 self.manifest.addgroup(chunkiter, revmap, tr)
1787 1791
1788 1792 # process the files
1789 1793 self.ui.status(_("adding file changes\n"))
1790 1794 while 1:
1791 1795 f = changegroup.getchunk(source)
1792 1796 if not f:
1793 1797 break
1794 1798 self.ui.debug(_("adding %s revisions\n") % f)
1795 1799 fl = self.file(f)
1796 1800 o = fl.count()
1797 1801 chunkiter = changegroup.chunkiter(source)
1798 1802 if fl.addgroup(chunkiter, revmap, tr) is None:
1799 1803 raise util.Abort(_("received file revlog group is empty"))
1800 1804 revisions += fl.count() - o
1801 1805 files += 1
1802 1806
1803 1807 cl.writedata()
1804 1808 finally:
1805 1809 if cl:
1806 1810 cl.cleanup()
1807 1811
1808 1812 # make changelog see real files again
1809 1813 self.changelog = changelog.changelog(self.sopener,
1810 1814 self.changelog.version)
1811 1815 self.changelog.checkinlinesize(tr)
1812 1816
1813 1817 newheads = len(self.changelog.heads())
1814 1818 heads = ""
1815 1819 if oldheads and newheads != oldheads:
1816 1820 heads = _(" (%+d heads)") % (newheads - oldheads)
1817 1821
1818 1822 self.ui.status(_("added %d changesets"
1819 1823 " with %d changes to %d files%s\n")
1820 1824 % (changesets, revisions, files, heads))
1821 1825
1822 1826 if changesets > 0:
1823 1827 self.hook('pretxnchangegroup', throw=True,
1824 1828 node=hex(self.changelog.node(cor+1)), source=srctype,
1825 1829 url=url)
1826 1830
1827 1831 tr.close()
1828 1832
1829 1833 if changesets > 0:
1830 1834 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1831 1835 source=srctype, url=url)
1832 1836
1833 1837 for i in xrange(cor + 1, cnr + 1):
1834 1838 self.hook("incoming", node=hex(self.changelog.node(i)),
1835 1839 source=srctype, url=url)
1836 1840
1837 1841 # never return 0 here:
1838 1842 if newheads < oldheads:
1839 1843 return newheads - oldheads - 1
1840 1844 else:
1841 1845 return newheads - oldheads + 1
1842 1846
1843 1847
1844 1848 def stream_in(self, remote):
1845 1849 fp = remote.stream_out()
1846 1850 l = fp.readline()
1847 1851 try:
1848 1852 resp = int(l)
1849 1853 except ValueError:
1850 1854 raise util.UnexpectedOutput(
1851 1855 _('Unexpected response from remote server:'), l)
1852 1856 if resp == 1:
1853 1857 raise util.Abort(_('operation forbidden by server'))
1854 1858 elif resp == 2:
1855 1859 raise util.Abort(_('locking the remote repository failed'))
1856 1860 elif resp != 0:
1857 1861 raise util.Abort(_('the server sent an unknown error code'))
1858 1862 self.ui.status(_('streaming all changes\n'))
1859 1863 l = fp.readline()
1860 1864 try:
1861 1865 total_files, total_bytes = map(int, l.split(' ', 1))
1862 1866 except ValueError, TypeError:
1863 1867 raise util.UnexpectedOutput(
1864 1868 _('Unexpected response from remote server:'), l)
1865 1869 self.ui.status(_('%d files to transfer, %s of data\n') %
1866 1870 (total_files, util.bytecount(total_bytes)))
1867 1871 start = time.time()
1868 1872 for i in xrange(total_files):
1869 1873 # XXX doesn't support '\n' or '\r' in filenames
1870 1874 l = fp.readline()
1871 1875 try:
1872 1876 name, size = l.split('\0', 1)
1873 1877 size = int(size)
1874 1878 except ValueError, TypeError:
1875 1879 raise util.UnexpectedOutput(
1876 1880 _('Unexpected response from remote server:'), l)
1877 1881 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1878 1882 ofp = self.sopener(name, 'w')
1879 1883 for chunk in util.filechunkiter(fp, limit=size):
1880 1884 ofp.write(chunk)
1881 1885 ofp.close()
1882 1886 elapsed = time.time() - start
1883 1887 if elapsed <= 0:
1884 1888 elapsed = 0.001
1885 1889 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1886 1890 (util.bytecount(total_bytes), elapsed,
1887 1891 util.bytecount(total_bytes / elapsed)))
1888 1892 self.reload()
1889 1893 return len(self.heads()) + 1
1890 1894
1891 1895 def clone(self, remote, heads=[], stream=False):
1892 1896 '''clone remote repository.
1893 1897
1894 1898 keyword arguments:
1895 1899 heads: list of revs to clone (forces use of pull)
1896 1900 stream: use streaming clone if possible'''
1897 1901
1898 1902 # now, all clients that can request uncompressed clones can
1899 1903 # read repo formats supported by all servers that can serve
1900 1904 # them.
1901 1905
1902 1906 # if revlog format changes, client will have to check version
1903 1907 # and format flags on "stream" capability, and use
1904 1908 # uncompressed only if compatible.
1905 1909
1906 1910 if stream and not heads and remote.capable('stream'):
1907 1911 return self.stream_in(remote)
1908 1912 return self.pull(remote, heads)
1909 1913
1910 1914 # used to avoid circular references so destructors work
1911 1915 def aftertrans(files):
1912 1916 renamefiles = [tuple(t) for t in files]
1913 1917 def a():
1914 1918 for src, dest in renamefiles:
1915 1919 util.rename(src, dest)
1916 1920 return a
1917 1921
1918 1922 def instance(ui, path, create):
1919 1923 return localrepository(ui, util.drop_scheme('file', path), create)
1920 1924
1921 1925 def islocal(path):
1922 1926 return True
@@ -1,16 +1,22 b''
1 1 adding bar
2 2 adding foo
3 3 adding bomb
4 4 adding a.c
5 5 adding dir/a.o
6 6 adding dir/b.o
7 7 M dir/b.o
8 8 ! a.c
9 9 ! dir/a.o
10 10 ? .hgignore
11 11 a.c: unsupported file type (type is fifo)
12 12 ! a.c
13 13 # test absolute path through symlink outside repo
14 14 A f
15 15 # try symlink outside repo to file inside
16 16 abort: ../z not under root
17 # try cloning symlink in a subdir
18 1. commit a symlink
19 ? a/b/c/demo
20 adding a/b/c/demo
21 2. clone it
22 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
General Comments 0
You need to be logged in to leave comments. Login now