##// END OF EJS Templates
Attempt to yield names in sorted order when walking....
Bryan O'Sullivan -
r822:b678e6d4 default
parent child Browse files
Show More
@@ -1,1988 +1,1992 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import sys, struct, os
9 9 import util
10 10 from revlog import *
11 11 from demandload import *
12 12 demandload(globals(), "re lock urllib urllib2 transaction time socket")
13 13 demandload(globals(), "tempfile httprangereader bdiff urlparse")
14 14 demandload(globals(), "bisect select")
15 15
16 16 class filelog(revlog):
17 17 def __init__(self, opener, path):
18 18 revlog.__init__(self, opener,
19 19 os.path.join("data", self.encodedir(path + ".i")),
20 20 os.path.join("data", self.encodedir(path + ".d")))
21 21
22 22 # This avoids a collision between a file named foo and a dir named
23 23 # foo.i or foo.d
24 24 def encodedir(self, path):
25 25 path.replace(".hg/", ".hg.hg/")
26 26 path.replace(".i/", ".i.hg/")
27 27 path.replace(".d/", ".i.hg/")
28 28 return path
29 29
30 30 def decodedir(self, path):
31 31 path.replace(".d.hg/", ".d/")
32 32 path.replace(".i.hg/", ".i/")
33 33 path.replace(".hg.hg/", ".hg/")
34 34 return path
35 35
36 36 def read(self, node):
37 37 t = self.revision(node)
38 38 if not t.startswith('\1\n'):
39 39 return t
40 40 s = t.find('\1\n', 2)
41 41 return t[s+2:]
42 42
43 43 def readmeta(self, node):
44 44 t = self.revision(node)
45 45 if not t.startswith('\1\n'):
46 46 return t
47 47 s = t.find('\1\n', 2)
48 48 mt = t[2:s]
49 49 for l in mt.splitlines():
50 50 k, v = l.split(": ", 1)
51 51 m[k] = v
52 52 return m
53 53
54 54 def add(self, text, meta, transaction, link, p1=None, p2=None):
55 55 if meta or text.startswith('\1\n'):
56 56 mt = ""
57 57 if meta:
58 58 mt = [ "%s: %s\n" % (k, v) for k,v in meta.items() ]
59 59 text = "\1\n" + "".join(mt) + "\1\n" + text
60 60 return self.addrevision(text, transaction, link, p1, p2)
61 61
62 62 def annotate(self, node):
63 63
64 64 def decorate(text, rev):
65 65 return ([rev] * len(text.splitlines()), text)
66 66
67 67 def pair(parent, child):
68 68 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
69 69 child[0][b1:b2] = parent[0][a1:a2]
70 70 return child
71 71
72 72 # find all ancestors
73 73 needed = {node:1}
74 74 visit = [node]
75 75 while visit:
76 76 n = visit.pop(0)
77 77 for p in self.parents(n):
78 78 if p not in needed:
79 79 needed[p] = 1
80 80 visit.append(p)
81 81 else:
82 82 # count how many times we'll use this
83 83 needed[p] += 1
84 84
85 85 # sort by revision which is a topological order
86 86 visit = [ (self.rev(n), n) for n in needed.keys() ]
87 87 visit.sort()
88 88 hist = {}
89 89
90 90 for r,n in visit:
91 91 curr = decorate(self.read(n), self.linkrev(n))
92 92 for p in self.parents(n):
93 93 if p != nullid:
94 94 curr = pair(hist[p], curr)
95 95 # trim the history of unneeded revs
96 96 needed[p] -= 1
97 97 if not needed[p]:
98 98 del hist[p]
99 99 hist[n] = curr
100 100
101 101 return zip(hist[n][0], hist[n][1].splitlines(1))
102 102
103 103 class manifest(revlog):
104 104 def __init__(self, opener):
105 105 self.mapcache = None
106 106 self.listcache = None
107 107 self.addlist = None
108 108 revlog.__init__(self, opener, "00manifest.i", "00manifest.d")
109 109
110 110 def read(self, node):
111 111 if node == nullid: return {} # don't upset local cache
112 112 if self.mapcache and self.mapcache[0] == node:
113 113 return self.mapcache[1]
114 114 text = self.revision(node)
115 115 map = {}
116 116 flag = {}
117 117 self.listcache = (text, text.splitlines(1))
118 118 for l in self.listcache[1]:
119 119 (f, n) = l.split('\0')
120 120 map[f] = bin(n[:40])
121 121 flag[f] = (n[40:-1] == "x")
122 122 self.mapcache = (node, map, flag)
123 123 return map
124 124
125 125 def readflags(self, node):
126 126 if node == nullid: return {} # don't upset local cache
127 127 if not self.mapcache or self.mapcache[0] != node:
128 128 self.read(node)
129 129 return self.mapcache[2]
130 130
131 131 def diff(self, a, b):
132 132 # this is sneaky, as we're not actually using a and b
133 133 if self.listcache and self.addlist and self.listcache[0] == a:
134 134 d = mdiff.diff(self.listcache[1], self.addlist, 1)
135 135 if mdiff.patch(a, d) != b:
136 136 sys.stderr.write("*** sortdiff failed, falling back ***\n")
137 137 return mdiff.textdiff(a, b)
138 138 return d
139 139 else:
140 140 return mdiff.textdiff(a, b)
141 141
142 142 def add(self, map, flags, transaction, link, p1=None, p2=None,
143 143 changed=None):
144 144 # directly generate the mdiff delta from the data collected during
145 145 # the bisect loop below
146 146 def gendelta(delta):
147 147 i = 0
148 148 result = []
149 149 while i < len(delta):
150 150 start = delta[i][2]
151 151 end = delta[i][3]
152 152 l = delta[i][4]
153 153 if l == None:
154 154 l = ""
155 155 while i < len(delta) - 1 and start <= delta[i+1][2] \
156 156 and end >= delta[i+1][2]:
157 157 if delta[i+1][3] > end:
158 158 end = delta[i+1][3]
159 159 if delta[i+1][4]:
160 160 l += delta[i+1][4]
161 161 i += 1
162 162 result.append(struct.pack(">lll", start, end, len(l)) + l)
163 163 i += 1
164 164 return result
165 165
166 166 # apply the changes collected during the bisect loop to our addlist
167 167 def addlistdelta(addlist, delta):
168 168 # apply the deltas to the addlist. start from the bottom up
169 169 # so changes to the offsets don't mess things up.
170 170 i = len(delta)
171 171 while i > 0:
172 172 i -= 1
173 173 start = delta[i][0]
174 174 end = delta[i][1]
175 175 if delta[i][4]:
176 176 addlist[start:end] = [delta[i][4]]
177 177 else:
178 178 del addlist[start:end]
179 179 return addlist
180 180
181 181 # calculate the byte offset of the start of each line in the
182 182 # manifest
183 183 def calcoffsets(addlist):
184 184 offsets = [0] * (len(addlist) + 1)
185 185 offset = 0
186 186 i = 0
187 187 while i < len(addlist):
188 188 offsets[i] = offset
189 189 offset += len(addlist[i])
190 190 i += 1
191 191 offsets[i] = offset
192 192 return offsets
193 193
194 194 # if we're using the listcache, make sure it is valid and
195 195 # parented by the same node we're diffing against
196 196 if not changed or not self.listcache or not p1 or \
197 197 self.mapcache[0] != p1:
198 198 files = map.keys()
199 199 files.sort()
200 200
201 201 self.addlist = ["%s\000%s%s\n" %
202 202 (f, hex(map[f]), flags[f] and "x" or '')
203 203 for f in files]
204 204 cachedelta = None
205 205 else:
206 206 addlist = self.listcache[1]
207 207
208 208 # find the starting offset for each line in the add list
209 209 offsets = calcoffsets(addlist)
210 210
211 211 # combine the changed lists into one list for sorting
212 212 work = [[x, 0] for x in changed[0]]
213 213 work[len(work):] = [[x, 1] for x in changed[1]]
214 214 work.sort()
215 215
216 216 delta = []
217 217 bs = 0
218 218
219 219 for w in work:
220 220 f = w[0]
221 221 # bs will either be the index of the item or the insert point
222 222 bs = bisect.bisect(addlist, f, bs)
223 223 if bs < len(addlist):
224 224 fn = addlist[bs][:addlist[bs].index('\0')]
225 225 else:
226 226 fn = None
227 227 if w[1] == 0:
228 228 l = "%s\000%s%s\n" % (f, hex(map[f]),
229 229 flags[f] and "x" or '')
230 230 else:
231 231 l = None
232 232 start = bs
233 233 if fn != f:
234 234 # item not found, insert a new one
235 235 end = bs
236 236 if w[1] == 1:
237 237 sys.stderr.write("failed to remove %s from manifest\n"
238 238 % f)
239 239 sys.exit(1)
240 240 else:
241 241 # item is found, replace/delete the existing line
242 242 end = bs + 1
243 243 delta.append([start, end, offsets[start], offsets[end], l])
244 244
245 245 self.addlist = addlistdelta(addlist, delta)
246 246 if self.mapcache[0] == self.tip():
247 247 cachedelta = "".join(gendelta(delta))
248 248 else:
249 249 cachedelta = None
250 250
251 251 text = "".join(self.addlist)
252 252 if cachedelta and mdiff.patch(self.listcache[0], cachedelta) != text:
253 253 sys.stderr.write("manifest delta failure\n")
254 254 sys.exit(1)
255 255 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
256 256 self.mapcache = (n, map, flags)
257 257 self.listcache = (text, self.addlist)
258 258 self.addlist = None
259 259
260 260 return n
261 261
262 262 class changelog(revlog):
263 263 def __init__(self, opener):
264 264 revlog.__init__(self, opener, "00changelog.i", "00changelog.d")
265 265
266 266 def extract(self, text):
267 267 if not text:
268 268 return (nullid, "", "0", [], "")
269 269 last = text.index("\n\n")
270 270 desc = text[last + 2:]
271 271 l = text[:last].splitlines()
272 272 manifest = bin(l[0])
273 273 user = l[1]
274 274 date = l[2]
275 275 files = l[3:]
276 276 return (manifest, user, date, files, desc)
277 277
278 278 def read(self, node):
279 279 return self.extract(self.revision(node))
280 280
281 281 def add(self, manifest, list, desc, transaction, p1=None, p2=None,
282 282 user=None, date=None):
283 283 date = date or "%d %d" % (time.time(), time.timezone)
284 284 list.sort()
285 285 l = [hex(manifest), user, date] + list + ["", desc]
286 286 text = "\n".join(l)
287 287 return self.addrevision(text, transaction, self.count(), p1, p2)
288 288
289 289 class dirstate:
290 290 def __init__(self, opener, ui, root):
291 291 self.opener = opener
292 292 self.root = root
293 293 self.dirty = 0
294 294 self.ui = ui
295 295 self.map = None
296 296 self.pl = None
297 297 self.copies = {}
298 298 self.ignorefunc = None
299 299
300 300 def wjoin(self, f):
301 301 return os.path.join(self.root, f)
302 302
303 303 def ignore(self, f):
304 304 if not self.ignorefunc:
305 305 bigpat = []
306 306 try:
307 307 l = file(self.wjoin(".hgignore"))
308 308 for pat in l:
309 309 if pat != "\n":
310 310 p = util.pconvert(pat[:-1])
311 311 try:
312 312 r = re.compile(p)
313 313 except:
314 314 self.ui.warn("ignoring invalid ignore"
315 315 + " regular expression '%s'\n" % p)
316 316 else:
317 317 bigpat.append(util.pconvert(pat[:-1]))
318 318 except IOError: pass
319 319
320 320 if bigpat:
321 321 s = "(?:%s)" % (")|(?:".join(bigpat))
322 322 r = re.compile(s)
323 323 self.ignorefunc = r.search
324 324 else:
325 325 self.ignorefunc = util.never
326 326
327 327 return self.ignorefunc(f)
328 328
329 329 def __del__(self):
330 330 if self.dirty:
331 331 self.write()
332 332
333 333 def __getitem__(self, key):
334 334 try:
335 335 return self.map[key]
336 336 except TypeError:
337 337 self.read()
338 338 return self[key]
339 339
340 340 def __contains__(self, key):
341 341 if not self.map: self.read()
342 342 return key in self.map
343 343
344 344 def parents(self):
345 345 if not self.pl:
346 346 self.read()
347 347 return self.pl
348 348
349 349 def markdirty(self):
350 350 if not self.dirty:
351 351 self.dirty = 1
352 352
353 353 def setparents(self, p1, p2 = nullid):
354 354 self.markdirty()
355 355 self.pl = p1, p2
356 356
357 357 def state(self, key):
358 358 try:
359 359 return self[key][0]
360 360 except KeyError:
361 361 return "?"
362 362
363 363 def read(self):
364 364 if self.map is not None: return self.map
365 365
366 366 self.map = {}
367 367 self.pl = [nullid, nullid]
368 368 try:
369 369 st = self.opener("dirstate").read()
370 370 if not st: return
371 371 except: return
372 372
373 373 self.pl = [st[:20], st[20: 40]]
374 374
375 375 pos = 40
376 376 while pos < len(st):
377 377 e = struct.unpack(">cllll", st[pos:pos+17])
378 378 l = e[4]
379 379 pos += 17
380 380 f = st[pos:pos + l]
381 381 if '\0' in f:
382 382 f, c = f.split('\0')
383 383 self.copies[f] = c
384 384 self.map[f] = e[:4]
385 385 pos += l
386 386
387 387 def copy(self, source, dest):
388 388 self.read()
389 389 self.markdirty()
390 390 self.copies[dest] = source
391 391
392 392 def copied(self, file):
393 393 return self.copies.get(file, None)
394 394
395 395 def update(self, files, state):
396 396 ''' current states:
397 397 n normal
398 398 m needs merging
399 399 r marked for removal
400 400 a marked for addition'''
401 401
402 402 if not files: return
403 403 self.read()
404 404 self.markdirty()
405 405 for f in files:
406 406 if state == "r":
407 407 self.map[f] = ('r', 0, 0, 0)
408 408 else:
409 409 s = os.stat(os.path.join(self.root, f))
410 410 self.map[f] = (state, s.st_mode, s.st_size, s.st_mtime)
411 411
412 412 def forget(self, files):
413 413 if not files: return
414 414 self.read()
415 415 self.markdirty()
416 416 for f in files:
417 417 try:
418 418 del self.map[f]
419 419 except KeyError:
420 420 self.ui.warn("not in dirstate: %s!\n" % f)
421 421 pass
422 422
423 423 def clear(self):
424 424 self.map = {}
425 425 self.markdirty()
426 426
427 427 def write(self):
428 428 st = self.opener("dirstate", "w")
429 429 st.write("".join(self.pl))
430 430 for f, e in self.map.items():
431 431 c = self.copied(f)
432 432 if c:
433 433 f = f + "\0" + c
434 434 e = struct.pack(">cllll", e[0], e[1], e[2], e[3], len(f))
435 435 st.write(e + f)
436 436 self.dirty = 0
437 437
438 438 def walk(self, files = None, match = util.always):
439 439 self.read()
440 440 dc = self.map.copy()
441 441 # walk all files by default
442 442 if not files: files = [self.root]
443 443 known = {'.hg': 1}
444 444 def seen(fn):
445 445 if fn in known: return True
446 446 known[fn] = 1
447 447 def traverse():
448 448 for f in util.unique(files):
449 449 f = os.path.join(self.root, f)
450 450 if os.path.isdir(f):
451 451 for dir, subdirs, fl in os.walk(f):
452 452 d = dir[len(self.root) + 1:]
453 453 nd = os.path.normpath(d)
454 454 if seen(nd):
455 455 subdirs[:] = []
456 456 continue
457 457 for sd in subdirs:
458 458 ds = os.path.join(nd, sd +'/')
459 459 if self.ignore(ds) or not match(ds):
460 460 subdirs.remove(sd)
461 subdirs.sort()
462 fl.sort()
461 463 for fn in fl:
462 464 fn = util.pconvert(os.path.join(d, fn))
463 465 yield 'f', fn
464 466 else:
465 467 yield 'f', f[len(self.root) + 1:]
466 468
467 for k in dc.keys():
469 ks = dc.keys()
470 ks.sort()
471 for k in ks:
468 472 yield 'm', k
469 473
470 474 # yield only files that match: all in dirstate, others only if
471 475 # not in .hgignore
472 476
473 477 for src, fn in util.unique(traverse()):
474 478 fn = os.path.normpath(fn)
475 479 if seen(fn): continue
476 480 if fn in dc:
477 481 del dc[fn]
478 482 elif self.ignore(fn):
479 483 continue
480 484 if match(fn):
481 485 yield src, fn
482 486
483 487 def changes(self, files = None, match = util.always):
484 488 self.read()
485 489 dc = self.map.copy()
486 490 lookup, changed, added, unknown = [], [], [], []
487 491
488 492 for src, fn in self.walk(files, match):
489 493 try: s = os.stat(os.path.join(self.root, fn))
490 494 except: continue
491 495
492 496 if fn in dc:
493 497 c = dc[fn]
494 498 del dc[fn]
495 499
496 500 if c[0] == 'm':
497 501 changed.append(fn)
498 502 elif c[0] == 'a':
499 503 added.append(fn)
500 504 elif c[0] == 'r':
501 505 unknown.append(fn)
502 506 elif c[2] != s.st_size or (c[1] ^ s.st_mode) & 0100:
503 507 changed.append(fn)
504 508 elif c[1] != s.st_mode or c[3] != s.st_mtime:
505 509 lookup.append(fn)
506 510 else:
507 511 if match(fn): unknown.append(fn)
508 512
509 513 return (lookup, changed, added, filter(match, dc.keys()), unknown)
510 514
511 515 # used to avoid circular references so destructors work
512 516 def opener(base):
513 517 p = base
514 518 def o(path, mode="r"):
515 519 if p.startswith("http://"):
516 520 f = os.path.join(p, urllib.quote(path))
517 521 return httprangereader.httprangereader(f)
518 522
519 523 f = os.path.join(p, path)
520 524
521 525 mode += "b" # for that other OS
522 526
523 527 if mode[0] != "r":
524 528 try:
525 529 s = os.stat(f)
526 530 except OSError:
527 531 d = os.path.dirname(f)
528 532 if not os.path.isdir(d):
529 533 os.makedirs(d)
530 534 else:
531 535 if s.st_nlink > 1:
532 536 file(f + ".tmp", "wb").write(file(f, "rb").read())
533 537 util.rename(f+".tmp", f)
534 538
535 539 return file(f, mode)
536 540
537 541 return o
538 542
539 543 class RepoError(Exception): pass
540 544
541 545 class localrepository:
542 546 def __init__(self, ui, path=None, create=0):
543 547 self.remote = 0
544 548 if path and path.startswith("http://"):
545 549 self.remote = 1
546 550 self.path = path
547 551 else:
548 552 if not path:
549 553 p = os.getcwd()
550 554 while not os.path.isdir(os.path.join(p, ".hg")):
551 555 oldp = p
552 556 p = os.path.dirname(p)
553 557 if p == oldp: raise RepoError("no repo found")
554 558 path = p
555 559 self.path = os.path.join(path, ".hg")
556 560
557 561 if not create and not os.path.isdir(self.path):
558 562 raise RepoError("repository %s not found" % self.path)
559 563
560 564 self.root = path
561 565 self.ui = ui
562 566
563 567 if create:
564 568 os.mkdir(self.path)
565 569 os.mkdir(self.join("data"))
566 570
567 571 self.opener = opener(self.path)
568 572 self.wopener = opener(self.root)
569 573 self.manifest = manifest(self.opener)
570 574 self.changelog = changelog(self.opener)
571 575 self.tagscache = None
572 576 self.nodetagscache = None
573 577
574 578 if not self.remote:
575 579 self.dirstate = dirstate(self.opener, ui, self.root)
576 580 try:
577 581 self.ui.readconfig(self.opener("hgrc"))
578 582 except IOError: pass
579 583
580 584 def hook(self, name, **args):
581 585 s = self.ui.config("hooks", name)
582 586 if s:
583 587 self.ui.note("running hook %s: %s\n" % (name, s))
584 588 old = {}
585 589 for k, v in args.items():
586 590 k = k.upper()
587 591 old[k] = os.environ.get(k, None)
588 592 os.environ[k] = v
589 593
590 594 r = os.system(s)
591 595
592 596 for k, v in old.items():
593 597 if v != None:
594 598 os.environ[k] = v
595 599 else:
596 600 del os.environ[k]
597 601
598 602 if r:
599 603 self.ui.warn("abort: %s hook failed with status %d!\n" %
600 604 (name, r))
601 605 return False
602 606 return True
603 607
604 608 def tags(self):
605 609 '''return a mapping of tag to node'''
606 610 if not self.tagscache:
607 611 self.tagscache = {}
608 612 def addtag(self, k, n):
609 613 try:
610 614 bin_n = bin(n)
611 615 except TypeError:
612 616 bin_n = ''
613 617 self.tagscache[k.strip()] = bin_n
614 618
615 619 try:
616 620 # read each head of the tags file, ending with the tip
617 621 # and add each tag found to the map, with "newer" ones
618 622 # taking precedence
619 623 fl = self.file(".hgtags")
620 624 h = fl.heads()
621 625 h.reverse()
622 626 for r in h:
623 627 for l in fl.revision(r).splitlines():
624 628 if l:
625 629 n, k = l.split(" ", 1)
626 630 addtag(self, k, n)
627 631 except KeyError:
628 632 pass
629 633
630 634 try:
631 635 f = self.opener("localtags")
632 636 for l in f:
633 637 n, k = l.split(" ", 1)
634 638 addtag(self, k, n)
635 639 except IOError:
636 640 pass
637 641
638 642 self.tagscache['tip'] = self.changelog.tip()
639 643
640 644 return self.tagscache
641 645
642 646 def tagslist(self):
643 647 '''return a list of tags ordered by revision'''
644 648 l = []
645 649 for t, n in self.tags().items():
646 650 try:
647 651 r = self.changelog.rev(n)
648 652 except:
649 653 r = -2 # sort to the beginning of the list if unknown
650 654 l.append((r,t,n))
651 655 l.sort()
652 656 return [(t,n) for r,t,n in l]
653 657
654 658 def nodetags(self, node):
655 659 '''return the tags associated with a node'''
656 660 if not self.nodetagscache:
657 661 self.nodetagscache = {}
658 662 for t,n in self.tags().items():
659 663 self.nodetagscache.setdefault(n,[]).append(t)
660 664 return self.nodetagscache.get(node, [])
661 665
662 666 def lookup(self, key):
663 667 try:
664 668 return self.tags()[key]
665 669 except KeyError:
666 670 try:
667 671 return self.changelog.lookup(key)
668 672 except:
669 673 raise RepoError("unknown revision '%s'" % key)
670 674
671 675 def dev(self):
672 676 if self.remote: return -1
673 677 return os.stat(self.path).st_dev
674 678
675 679 def join(self, f):
676 680 return os.path.join(self.path, f)
677 681
678 682 def wjoin(self, f):
679 683 return os.path.join(self.root, f)
680 684
681 685 def file(self, f):
682 686 if f[0] == '/': f = f[1:]
683 687 return filelog(self.opener, f)
684 688
685 689 def getcwd(self):
686 690 cwd = os.getcwd()
687 691 if cwd == self.root: return ''
688 692 return cwd[len(self.root) + 1:]
689 693
690 694 def wfile(self, f, mode='r'):
691 695 return self.wopener(f, mode)
692 696
693 697 def transaction(self):
694 698 # save dirstate for undo
695 699 try:
696 700 ds = self.opener("dirstate").read()
697 701 except IOError:
698 702 ds = ""
699 703 self.opener("journal.dirstate", "w").write(ds)
700 704
701 705 def after():
702 706 util.rename(self.join("journal"), self.join("undo"))
703 707 util.rename(self.join("journal.dirstate"),
704 708 self.join("undo.dirstate"))
705 709
706 710 return transaction.transaction(self.ui.warn, self.opener,
707 711 self.join("journal"), after)
708 712
709 713 def recover(self):
710 714 lock = self.lock()
711 715 if os.path.exists(self.join("journal")):
712 716 self.ui.status("rolling back interrupted transaction\n")
713 717 return transaction.rollback(self.opener, self.join("journal"))
714 718 else:
715 719 self.ui.warn("no interrupted transaction available\n")
716 720
717 721 def undo(self):
718 722 lock = self.lock()
719 723 if os.path.exists(self.join("undo")):
720 724 self.ui.status("rolling back last transaction\n")
721 725 transaction.rollback(self.opener, self.join("undo"))
722 726 self.dirstate = None
723 727 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
724 728 self.dirstate = dirstate(self.opener, self.ui, self.root)
725 729 else:
726 730 self.ui.warn("no undo information available\n")
727 731
728 732 def lock(self, wait = 1):
729 733 try:
730 734 return lock.lock(self.join("lock"), 0)
731 735 except lock.LockHeld, inst:
732 736 if wait:
733 737 self.ui.warn("waiting for lock held by %s\n" % inst.args[0])
734 738 return lock.lock(self.join("lock"), wait)
735 739 raise inst
736 740
737 741 def rawcommit(self, files, text, user, date, p1=None, p2=None):
738 742 orig_parent = self.dirstate.parents()[0] or nullid
739 743 p1 = p1 or self.dirstate.parents()[0] or nullid
740 744 p2 = p2 or self.dirstate.parents()[1] or nullid
741 745 c1 = self.changelog.read(p1)
742 746 c2 = self.changelog.read(p2)
743 747 m1 = self.manifest.read(c1[0])
744 748 mf1 = self.manifest.readflags(c1[0])
745 749 m2 = self.manifest.read(c2[0])
746 750
747 751 if orig_parent == p1:
748 752 update_dirstate = 1
749 753 else:
750 754 update_dirstate = 0
751 755
752 756 tr = self.transaction()
753 757 mm = m1.copy()
754 758 mfm = mf1.copy()
755 759 linkrev = self.changelog.count()
756 760 for f in files:
757 761 try:
758 762 t = self.wfile(f).read()
759 763 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
760 764 r = self.file(f)
761 765 mfm[f] = tm
762 766 mm[f] = r.add(t, {}, tr, linkrev,
763 767 m1.get(f, nullid), m2.get(f, nullid))
764 768 if update_dirstate:
765 769 self.dirstate.update([f], "n")
766 770 except IOError:
767 771 try:
768 772 del mm[f]
769 773 del mfm[f]
770 774 if update_dirstate:
771 775 self.dirstate.forget([f])
772 776 except:
773 777 # deleted from p2?
774 778 pass
775 779
776 780 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
777 781 user = user or self.ui.username()
778 782 n = self.changelog.add(mnode, files, text, tr, p1, p2, user, date)
779 783 tr.close()
780 784 if update_dirstate:
781 785 self.dirstate.setparents(n, nullid)
782 786
783 787 def commit(self, files = None, text = "", user = None, date = None,
784 788 match = util.always):
785 789 commit = []
786 790 remove = []
787 791 if files:
788 792 for f in files:
789 793 s = self.dirstate.state(f)
790 794 if s in 'nmai':
791 795 commit.append(f)
792 796 elif s == 'r':
793 797 remove.append(f)
794 798 else:
795 799 self.ui.warn("%s not tracked!\n" % f)
796 800 else:
797 801 (c, a, d, u) = self.changes(match = match)
798 802 commit = c + a
799 803 remove = d
800 804
801 805 if not commit and not remove:
802 806 self.ui.status("nothing changed\n")
803 807 return
804 808
805 809 if not self.hook("precommit"):
806 810 return 1
807 811
808 812 p1, p2 = self.dirstate.parents()
809 813 c1 = self.changelog.read(p1)
810 814 c2 = self.changelog.read(p2)
811 815 m1 = self.manifest.read(c1[0])
812 816 mf1 = self.manifest.readflags(c1[0])
813 817 m2 = self.manifest.read(c2[0])
814 818 lock = self.lock()
815 819 tr = self.transaction()
816 820
817 821 # check in files
818 822 new = {}
819 823 linkrev = self.changelog.count()
820 824 commit.sort()
821 825 for f in commit:
822 826 self.ui.note(f + "\n")
823 827 try:
824 828 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
825 829 t = self.wfile(f).read()
826 830 except IOError:
827 831 self.ui.warn("trouble committing %s!\n" % f)
828 832 raise
829 833
830 834 meta = {}
831 835 cp = self.dirstate.copied(f)
832 836 if cp:
833 837 meta["copy"] = cp
834 838 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
835 839 self.ui.debug(" %s: copy %s:%s\n" % (f, cp, meta["copyrev"]))
836 840
837 841 r = self.file(f)
838 842 fp1 = m1.get(f, nullid)
839 843 fp2 = m2.get(f, nullid)
840 844 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
841 845
842 846 # update manifest
843 847 m1.update(new)
844 848 for f in remove:
845 849 if f in m1:
846 850 del m1[f]
847 851 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
848 852 (new, remove))
849 853
850 854 # add changeset
851 855 new = new.keys()
852 856 new.sort()
853 857
854 858 if not text:
855 859 edittext = "\n" + "HG: manifest hash %s\n" % hex(mn)
856 860 edittext += "".join(["HG: changed %s\n" % f for f in new])
857 861 edittext += "".join(["HG: removed %s\n" % f for f in remove])
858 862 edittext = self.ui.edit(edittext)
859 863 if not edittext.rstrip():
860 864 return 1
861 865 text = edittext
862 866
863 867 user = user or self.ui.username()
864 868 n = self.changelog.add(mn, new, text, tr, p1, p2, user, date)
865 869
866 870 tr.close()
867 871
868 872 self.dirstate.setparents(n)
869 873 self.dirstate.update(new, "n")
870 874 self.dirstate.forget(remove)
871 875
872 876 if not self.hook("commit", node=hex(n)):
873 877 return 1
874 878
875 879 def walk(self, node = None, files = [], match = util.always):
876 880 if node:
877 881 for fn in self.manifest.read(self.changelog.read(node)[0]):
878 882 if match(fn): yield 'm', fn
879 883 else:
880 884 for src, fn in self.dirstate.walk(files, match):
881 885 yield src, fn
882 886
883 887 def changes(self, node1 = None, node2 = None, files = [],
884 888 match = util.always):
885 889 mf2, u = None, []
886 890
887 891 def fcmp(fn, mf):
888 892 t1 = self.wfile(fn).read()
889 893 t2 = self.file(fn).revision(mf[fn])
890 894 return cmp(t1, t2)
891 895
892 896 def mfmatches(node):
893 897 mf = dict(self.manifest.read(node))
894 898 for fn in mf.keys():
895 899 if not match(fn):
896 900 del mf[fn]
897 901 return mf
898 902
899 903 # are we comparing the working directory?
900 904 if not node2:
901 905 l, c, a, d, u = self.dirstate.changes(files, match)
902 906
903 907 # are we comparing working dir against its parent?
904 908 if not node1:
905 909 if l:
906 910 # do a full compare of any files that might have changed
907 911 change = self.changelog.read(self.dirstate.parents()[0])
908 912 mf2 = mfmatches(change[0])
909 913 for f in l:
910 914 if fcmp(f, mf2):
911 915 c.append(f)
912 916
913 917 for l in c, a, d, u:
914 918 l.sort()
915 919
916 920 return (c, a, d, u)
917 921
918 922 # are we comparing working dir against non-tip?
919 923 # generate a pseudo-manifest for the working dir
920 924 if not node2:
921 925 if not mf2:
922 926 change = self.changelog.read(self.dirstate.parents()[0])
923 927 mf2 = mfmatches(change[0])
924 928 for f in a + c + l:
925 929 mf2[f] = ""
926 930 for f in d:
927 931 if f in mf2: del mf2[f]
928 932 else:
929 933 change = self.changelog.read(node2)
930 934 mf2 = mfmatches(change[0])
931 935
932 936 # flush lists from dirstate before comparing manifests
933 937 c, a = [], []
934 938
935 939 change = self.changelog.read(node1)
936 940 mf1 = mfmatches(change[0])
937 941
938 942 for fn in mf2:
939 943 if mf1.has_key(fn):
940 944 if mf1[fn] != mf2[fn]:
941 945 if mf2[fn] != "" or fcmp(fn, mf1):
942 946 c.append(fn)
943 947 del mf1[fn]
944 948 else:
945 949 a.append(fn)
946 950
947 951 d = mf1.keys()
948 952
949 953 for l in c, a, d, u:
950 954 l.sort()
951 955
952 956 return (c, a, d, u)
953 957
954 958 def add(self, list):
955 959 for f in list:
956 960 p = self.wjoin(f)
957 961 if not os.path.exists(p):
958 962 self.ui.warn("%s does not exist!\n" % f)
959 963 elif not os.path.isfile(p):
960 964 self.ui.warn("%s not added: only files supported currently\n" % f)
961 965 elif self.dirstate.state(f) in 'an':
962 966 self.ui.warn("%s already tracked!\n" % f)
963 967 else:
964 968 self.dirstate.update([f], "a")
965 969
966 970 def forget(self, list):
967 971 for f in list:
968 972 if self.dirstate.state(f) not in 'ai':
969 973 self.ui.warn("%s not added!\n" % f)
970 974 else:
971 975 self.dirstate.forget([f])
972 976
973 977 def remove(self, list):
974 978 for f in list:
975 979 p = self.wjoin(f)
976 980 if os.path.exists(p):
977 981 self.ui.warn("%s still exists!\n" % f)
978 982 elif self.dirstate.state(f) == 'a':
979 983 self.ui.warn("%s never committed!\n" % f)
980 984 self.dirstate.forget([f])
981 985 elif f not in self.dirstate:
982 986 self.ui.warn("%s not tracked!\n" % f)
983 987 else:
984 988 self.dirstate.update([f], "r")
985 989
986 990 def copy(self, source, dest):
987 991 p = self.wjoin(dest)
988 992 if not os.path.exists(p):
989 993 self.ui.warn("%s does not exist!\n" % dest)
990 994 elif not os.path.isfile(p):
991 995 self.ui.warn("copy failed: %s is not a file\n" % dest)
992 996 else:
993 997 if self.dirstate.state(dest) == '?':
994 998 self.dirstate.update([dest], "a")
995 999 self.dirstate.copy(source, dest)
996 1000
997 1001 def heads(self):
998 1002 return self.changelog.heads()
999 1003
1000 1004 def branches(self, nodes):
1001 1005 if not nodes: nodes = [self.changelog.tip()]
1002 1006 b = []
1003 1007 for n in nodes:
1004 1008 t = n
1005 1009 while n:
1006 1010 p = self.changelog.parents(n)
1007 1011 if p[1] != nullid or p[0] == nullid:
1008 1012 b.append((t, n, p[0], p[1]))
1009 1013 break
1010 1014 n = p[0]
1011 1015 return b
1012 1016
1013 1017 def between(self, pairs):
1014 1018 r = []
1015 1019
1016 1020 for top, bottom in pairs:
1017 1021 n, l, i = top, [], 0
1018 1022 f = 1
1019 1023
1020 1024 while n != bottom:
1021 1025 p = self.changelog.parents(n)[0]
1022 1026 if i == f:
1023 1027 l.append(n)
1024 1028 f = f * 2
1025 1029 n = p
1026 1030 i += 1
1027 1031
1028 1032 r.append(l)
1029 1033
1030 1034 return r
1031 1035
1032 1036 def newer(self, nodes):
1033 1037 m = {}
1034 1038 nl = []
1035 1039 pm = {}
1036 1040 cl = self.changelog
1037 1041 t = l = cl.count()
1038 1042
1039 1043 # find the lowest numbered node
1040 1044 for n in nodes:
1041 1045 l = min(l, cl.rev(n))
1042 1046 m[n] = 1
1043 1047
1044 1048 for i in xrange(l, t):
1045 1049 n = cl.node(i)
1046 1050 if n in m: # explicitly listed
1047 1051 pm[n] = 1
1048 1052 nl.append(n)
1049 1053 continue
1050 1054 for p in cl.parents(n):
1051 1055 if p in pm: # parent listed
1052 1056 pm[n] = 1
1053 1057 nl.append(n)
1054 1058 break
1055 1059
1056 1060 return nl
1057 1061
1058 1062 def findincoming(self, remote, base={}):
1059 1063 m = self.changelog.nodemap
1060 1064 search = []
1061 1065 fetch = []
1062 1066 seen = {}
1063 1067 seenbranch = {}
1064 1068
1065 1069 # assume we're closer to the tip than the root
1066 1070 # and start by examining the heads
1067 1071 self.ui.status("searching for changes\n")
1068 1072 heads = remote.heads()
1069 1073 unknown = []
1070 1074 for h in heads:
1071 1075 if h not in m:
1072 1076 unknown.append(h)
1073 1077 else:
1074 1078 base[h] = 1
1075 1079
1076 1080 if not unknown:
1077 1081 return None
1078 1082
1079 1083 rep = {}
1080 1084 reqcnt = 0
1081 1085
1082 1086 # search through remote branches
1083 1087 # a 'branch' here is a linear segment of history, with four parts:
1084 1088 # head, root, first parent, second parent
1085 1089 # (a branch always has two parents (or none) by definition)
1086 1090 unknown = remote.branches(unknown)
1087 1091 while unknown:
1088 1092 r = []
1089 1093 while unknown:
1090 1094 n = unknown.pop(0)
1091 1095 if n[0] in seen:
1092 1096 continue
1093 1097
1094 1098 self.ui.debug("examining %s:%s\n" % (short(n[0]), short(n[1])))
1095 1099 if n[0] == nullid:
1096 1100 break
1097 1101 if n in seenbranch:
1098 1102 self.ui.debug("branch already found\n")
1099 1103 continue
1100 1104 if n[1] and n[1] in m: # do we know the base?
1101 1105 self.ui.debug("found incomplete branch %s:%s\n"
1102 1106 % (short(n[0]), short(n[1])))
1103 1107 search.append(n) # schedule branch range for scanning
1104 1108 seenbranch[n] = 1
1105 1109 else:
1106 1110 if n[1] not in seen and n[1] not in fetch:
1107 1111 if n[2] in m and n[3] in m:
1108 1112 self.ui.debug("found new changeset %s\n" %
1109 1113 short(n[1]))
1110 1114 fetch.append(n[1]) # earliest unknown
1111 1115 base[n[2]] = 1 # latest known
1112 1116 continue
1113 1117
1114 1118 for a in n[2:4]:
1115 1119 if a not in rep:
1116 1120 r.append(a)
1117 1121 rep[a] = 1
1118 1122
1119 1123 seen[n[0]] = 1
1120 1124
1121 1125 if r:
1122 1126 reqcnt += 1
1123 1127 self.ui.debug("request %d: %s\n" %
1124 1128 (reqcnt, " ".join(map(short, r))))
1125 1129 for p in range(0, len(r), 10):
1126 1130 for b in remote.branches(r[p:p+10]):
1127 1131 self.ui.debug("received %s:%s\n" %
1128 1132 (short(b[0]), short(b[1])))
1129 1133 if b[0] not in m and b[0] not in seen:
1130 1134 unknown.append(b)
1131 1135
1132 1136 # do binary search on the branches we found
1133 1137 while search:
1134 1138 n = search.pop(0)
1135 1139 reqcnt += 1
1136 1140 l = remote.between([(n[0], n[1])])[0]
1137 1141 l.append(n[1])
1138 1142 p = n[0]
1139 1143 f = 1
1140 1144 for i in l:
1141 1145 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1142 1146 if i in m:
1143 1147 if f <= 2:
1144 1148 self.ui.debug("found new branch changeset %s\n" %
1145 1149 short(p))
1146 1150 fetch.append(p)
1147 1151 base[i] = 1
1148 1152 else:
1149 1153 self.ui.debug("narrowed branch search to %s:%s\n"
1150 1154 % (short(p), short(i)))
1151 1155 search.append((p, i))
1152 1156 break
1153 1157 p, f = i, f * 2
1154 1158
1155 1159 # sanity check our fetch list
1156 1160 for f in fetch:
1157 1161 if f in m:
1158 1162 raise RepoError("already have changeset " + short(f[:4]))
1159 1163
1160 1164 if base.keys() == [nullid]:
1161 1165 self.ui.warn("warning: pulling from an unrelated repository!\n")
1162 1166
1163 1167 self.ui.note("adding new changesets starting at " +
1164 1168 " ".join([short(f) for f in fetch]) + "\n")
1165 1169
1166 1170 self.ui.debug("%d total queries\n" % reqcnt)
1167 1171
1168 1172 return fetch
1169 1173
1170 1174 def findoutgoing(self, remote):
1171 1175 base = {}
1172 1176 self.findincoming(remote, base)
1173 1177 remain = dict.fromkeys(self.changelog.nodemap)
1174 1178
1175 1179 # prune everything remote has from the tree
1176 1180 del remain[nullid]
1177 1181 remove = base.keys()
1178 1182 while remove:
1179 1183 n = remove.pop(0)
1180 1184 if n in remain:
1181 1185 del remain[n]
1182 1186 for p in self.changelog.parents(n):
1183 1187 remove.append(p)
1184 1188
1185 1189 # find every node whose parents have been pruned
1186 1190 subset = []
1187 1191 for n in remain:
1188 1192 p1, p2 = self.changelog.parents(n)
1189 1193 if p1 not in remain and p2 not in remain:
1190 1194 subset.append(n)
1191 1195
1192 1196 # this is the set of all roots we have to push
1193 1197 return subset
1194 1198
1195 1199 def pull(self, remote):
1196 1200 lock = self.lock()
1197 1201
1198 1202 # if we have an empty repo, fetch everything
1199 1203 if self.changelog.tip() == nullid:
1200 1204 self.ui.status("requesting all changes\n")
1201 1205 fetch = [nullid]
1202 1206 else:
1203 1207 fetch = self.findincoming(remote)
1204 1208
1205 1209 if not fetch:
1206 1210 self.ui.status("no changes found\n")
1207 1211 return 1
1208 1212
1209 1213 cg = remote.changegroup(fetch)
1210 1214 return self.addchangegroup(cg)
1211 1215
1212 1216 def push(self, remote):
1213 1217 lock = remote.lock()
1214 1218 update = self.findoutgoing(remote)
1215 1219 if not update:
1216 1220 self.ui.status("no changes found\n")
1217 1221 return 1
1218 1222
1219 1223 cg = self.changegroup(update)
1220 1224 return remote.addchangegroup(cg)
1221 1225
1222 1226 def changegroup(self, basenodes):
1223 1227 class genread:
1224 1228 def __init__(self, generator):
1225 1229 self.g = generator
1226 1230 self.buf = ""
1227 1231 def read(self, l):
1228 1232 while l > len(self.buf):
1229 1233 try:
1230 1234 self.buf += self.g.next()
1231 1235 except StopIteration:
1232 1236 break
1233 1237 d, self.buf = self.buf[:l], self.buf[l:]
1234 1238 return d
1235 1239
1236 1240 def gengroup():
1237 1241 nodes = self.newer(basenodes)
1238 1242
1239 1243 # construct the link map
1240 1244 linkmap = {}
1241 1245 for n in nodes:
1242 1246 linkmap[self.changelog.rev(n)] = n
1243 1247
1244 1248 # construct a list of all changed files
1245 1249 changed = {}
1246 1250 for n in nodes:
1247 1251 c = self.changelog.read(n)
1248 1252 for f in c[3]:
1249 1253 changed[f] = 1
1250 1254 changed = changed.keys()
1251 1255 changed.sort()
1252 1256
1253 1257 # the changegroup is changesets + manifests + all file revs
1254 1258 revs = [ self.changelog.rev(n) for n in nodes ]
1255 1259
1256 1260 for y in self.changelog.group(linkmap): yield y
1257 1261 for y in self.manifest.group(linkmap): yield y
1258 1262 for f in changed:
1259 1263 yield struct.pack(">l", len(f) + 4) + f
1260 1264 g = self.file(f).group(linkmap)
1261 1265 for y in g:
1262 1266 yield y
1263 1267
1264 1268 yield struct.pack(">l", 0)
1265 1269
1266 1270 return genread(gengroup())
1267 1271
1268 1272 def addchangegroup(self, source):
1269 1273
1270 1274 def getchunk():
1271 1275 d = source.read(4)
1272 1276 if not d: return ""
1273 1277 l = struct.unpack(">l", d)[0]
1274 1278 if l <= 4: return ""
1275 1279 return source.read(l - 4)
1276 1280
1277 1281 def getgroup():
1278 1282 while 1:
1279 1283 c = getchunk()
1280 1284 if not c: break
1281 1285 yield c
1282 1286
1283 1287 def csmap(x):
1284 1288 self.ui.debug("add changeset %s\n" % short(x))
1285 1289 return self.changelog.count()
1286 1290
1287 1291 def revmap(x):
1288 1292 return self.changelog.rev(x)
1289 1293
1290 1294 if not source: return
1291 1295 changesets = files = revisions = 0
1292 1296
1293 1297 tr = self.transaction()
1294 1298
1295 1299 # pull off the changeset group
1296 1300 self.ui.status("adding changesets\n")
1297 1301 co = self.changelog.tip()
1298 1302 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1299 1303 changesets = self.changelog.rev(cn) - self.changelog.rev(co)
1300 1304
1301 1305 # pull off the manifest group
1302 1306 self.ui.status("adding manifests\n")
1303 1307 mm = self.manifest.tip()
1304 1308 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1305 1309
1306 1310 # process the files
1307 1311 self.ui.status("adding file changes\n")
1308 1312 while 1:
1309 1313 f = getchunk()
1310 1314 if not f: break
1311 1315 self.ui.debug("adding %s revisions\n" % f)
1312 1316 fl = self.file(f)
1313 1317 o = fl.count()
1314 1318 n = fl.addgroup(getgroup(), revmap, tr)
1315 1319 revisions += fl.count() - o
1316 1320 files += 1
1317 1321
1318 1322 self.ui.status(("added %d changesets" +
1319 1323 " with %d changes to %d files\n")
1320 1324 % (changesets, revisions, files))
1321 1325
1322 1326 tr.close()
1323 1327
1324 1328 if not self.hook("changegroup"):
1325 1329 return 1
1326 1330
1327 1331 return
1328 1332
1329 1333 def update(self, node, allow=False, force=False, choose=None,
1330 1334 moddirstate=True):
1331 1335 pl = self.dirstate.parents()
1332 1336 if not force and pl[1] != nullid:
1333 1337 self.ui.warn("aborting: outstanding uncommitted merges\n")
1334 1338 return 1
1335 1339
1336 1340 p1, p2 = pl[0], node
1337 1341 pa = self.changelog.ancestor(p1, p2)
1338 1342 m1n = self.changelog.read(p1)[0]
1339 1343 m2n = self.changelog.read(p2)[0]
1340 1344 man = self.manifest.ancestor(m1n, m2n)
1341 1345 m1 = self.manifest.read(m1n)
1342 1346 mf1 = self.manifest.readflags(m1n)
1343 1347 m2 = self.manifest.read(m2n)
1344 1348 mf2 = self.manifest.readflags(m2n)
1345 1349 ma = self.manifest.read(man)
1346 1350 mfa = self.manifest.readflags(man)
1347 1351
1348 1352 (c, a, d, u) = self.changes()
1349 1353
1350 1354 # is this a jump, or a merge? i.e. is there a linear path
1351 1355 # from p1 to p2?
1352 1356 linear_path = (pa == p1 or pa == p2)
1353 1357
1354 1358 # resolve the manifest to determine which files
1355 1359 # we care about merging
1356 1360 self.ui.note("resolving manifests\n")
1357 1361 self.ui.debug(" force %s allow %s moddirstate %s linear %s\n" %
1358 1362 (force, allow, moddirstate, linear_path))
1359 1363 self.ui.debug(" ancestor %s local %s remote %s\n" %
1360 1364 (short(man), short(m1n), short(m2n)))
1361 1365
1362 1366 merge = {}
1363 1367 get = {}
1364 1368 remove = []
1365 1369 mark = {}
1366 1370
1367 1371 # construct a working dir manifest
1368 1372 mw = m1.copy()
1369 1373 mfw = mf1.copy()
1370 1374 umap = dict.fromkeys(u)
1371 1375
1372 1376 for f in a + c + u:
1373 1377 mw[f] = ""
1374 1378 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1375 1379
1376 1380 for f in d:
1377 1381 if f in mw: del mw[f]
1378 1382
1379 1383 # If we're jumping between revisions (as opposed to merging),
1380 1384 # and if neither the working directory nor the target rev has
1381 1385 # the file, then we need to remove it from the dirstate, to
1382 1386 # prevent the dirstate from listing the file when it is no
1383 1387 # longer in the manifest.
1384 1388 if moddirstate and linear_path and f not in m2:
1385 1389 self.dirstate.forget((f,))
1386 1390
1387 1391 # Compare manifests
1388 1392 for f, n in mw.iteritems():
1389 1393 if choose and not choose(f): continue
1390 1394 if f in m2:
1391 1395 s = 0
1392 1396
1393 1397 # is the wfile new since m1, and match m2?
1394 1398 if f not in m1:
1395 1399 t1 = self.wfile(f).read()
1396 1400 t2 = self.file(f).revision(m2[f])
1397 1401 if cmp(t1, t2) == 0:
1398 1402 mark[f] = 1
1399 1403 n = m2[f]
1400 1404 del t1, t2
1401 1405
1402 1406 # are files different?
1403 1407 if n != m2[f]:
1404 1408 a = ma.get(f, nullid)
1405 1409 # are both different from the ancestor?
1406 1410 if n != a and m2[f] != a:
1407 1411 self.ui.debug(" %s versions differ, resolve\n" % f)
1408 1412 # merge executable bits
1409 1413 # "if we changed or they changed, change in merge"
1410 1414 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1411 1415 mode = ((a^b) | (a^c)) ^ a
1412 1416 merge[f] = (m1.get(f, nullid), m2[f], mode)
1413 1417 s = 1
1414 1418 # are we clobbering?
1415 1419 # is remote's version newer?
1416 1420 # or are we going back in time?
1417 1421 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1418 1422 self.ui.debug(" remote %s is newer, get\n" % f)
1419 1423 get[f] = m2[f]
1420 1424 s = 1
1421 1425 else:
1422 1426 mark[f] = 1
1423 1427 elif f in umap:
1424 1428 # this unknown file is the same as the checkout
1425 1429 get[f] = m2[f]
1426 1430
1427 1431 if not s and mfw[f] != mf2[f]:
1428 1432 if force:
1429 1433 self.ui.debug(" updating permissions for %s\n" % f)
1430 1434 util.set_exec(self.wjoin(f), mf2[f])
1431 1435 else:
1432 1436 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1433 1437 mode = ((a^b) | (a^c)) ^ a
1434 1438 if mode != b:
1435 1439 self.ui.debug(" updating permissions for %s\n" % f)
1436 1440 util.set_exec(self.wjoin(f), mode)
1437 1441 mark[f] = 1
1438 1442 del m2[f]
1439 1443 elif f in ma:
1440 1444 if n != ma[f]:
1441 1445 r = "d"
1442 1446 if not force and (linear_path or allow):
1443 1447 r = self.ui.prompt(
1444 1448 (" local changed %s which remote deleted\n" % f) +
1445 1449 "(k)eep or (d)elete?", "[kd]", "k")
1446 1450 if r == "d":
1447 1451 remove.append(f)
1448 1452 else:
1449 1453 self.ui.debug("other deleted %s\n" % f)
1450 1454 remove.append(f) # other deleted it
1451 1455 else:
1452 1456 if n == m1.get(f, nullid): # same as parent
1453 1457 if p2 == pa: # going backwards?
1454 1458 self.ui.debug("remote deleted %s\n" % f)
1455 1459 remove.append(f)
1456 1460 else:
1457 1461 self.ui.debug("local created %s, keeping\n" % f)
1458 1462 else:
1459 1463 self.ui.debug("working dir created %s, keeping\n" % f)
1460 1464
1461 1465 for f, n in m2.iteritems():
1462 1466 if choose and not choose(f): continue
1463 1467 if f[0] == "/": continue
1464 1468 if f in ma and n != ma[f]:
1465 1469 r = "k"
1466 1470 if not force and (linear_path or allow):
1467 1471 r = self.ui.prompt(
1468 1472 ("remote changed %s which local deleted\n" % f) +
1469 1473 "(k)eep or (d)elete?", "[kd]", "k")
1470 1474 if r == "k": get[f] = n
1471 1475 elif f not in ma:
1472 1476 self.ui.debug("remote created %s\n" % f)
1473 1477 get[f] = n
1474 1478 else:
1475 1479 if force or p2 == pa: # going backwards?
1476 1480 self.ui.debug("local deleted %s, recreating\n" % f)
1477 1481 get[f] = n
1478 1482 else:
1479 1483 self.ui.debug("local deleted %s\n" % f)
1480 1484
1481 1485 del mw, m1, m2, ma
1482 1486
1483 1487 if force:
1484 1488 for f in merge:
1485 1489 get[f] = merge[f][1]
1486 1490 merge = {}
1487 1491
1488 1492 if linear_path or force:
1489 1493 # we don't need to do any magic, just jump to the new rev
1490 1494 mode = 'n'
1491 1495 p1, p2 = p2, nullid
1492 1496 else:
1493 1497 if not allow:
1494 1498 self.ui.status("this update spans a branch" +
1495 1499 " affecting the following files:\n")
1496 1500 fl = merge.keys() + get.keys()
1497 1501 fl.sort()
1498 1502 for f in fl:
1499 1503 cf = ""
1500 1504 if f in merge: cf = " (resolve)"
1501 1505 self.ui.status(" %s%s\n" % (f, cf))
1502 1506 self.ui.warn("aborting update spanning branches!\n")
1503 1507 self.ui.status("(use update -m to merge across branches" +
1504 1508 " or -C to lose changes)\n")
1505 1509 return 1
1506 1510 # we have to remember what files we needed to get/change
1507 1511 # because any file that's different from either one of its
1508 1512 # parents must be in the changeset
1509 1513 mode = 'm'
1510 1514 if moddirstate:
1511 1515 self.dirstate.update(mark.keys(), "m")
1512 1516
1513 1517 if moddirstate:
1514 1518 self.dirstate.setparents(p1, p2)
1515 1519
1516 1520 # get the files we don't need to change
1517 1521 files = get.keys()
1518 1522 files.sort()
1519 1523 for f in files:
1520 1524 if f[0] == "/": continue
1521 1525 self.ui.note("getting %s\n" % f)
1522 1526 t = self.file(f).read(get[f])
1523 1527 try:
1524 1528 self.wfile(f, "w").write(t)
1525 1529 except IOError:
1526 1530 os.makedirs(os.path.dirname(self.wjoin(f)))
1527 1531 self.wfile(f, "w").write(t)
1528 1532 util.set_exec(self.wjoin(f), mf2[f])
1529 1533 if moddirstate:
1530 1534 self.dirstate.update([f], mode)
1531 1535
1532 1536 # merge the tricky bits
1533 1537 files = merge.keys()
1534 1538 files.sort()
1535 1539 for f in files:
1536 1540 self.ui.status("merging %s\n" % f)
1537 1541 m, o, flag = merge[f]
1538 1542 self.merge3(f, m, o)
1539 1543 util.set_exec(self.wjoin(f), flag)
1540 1544 if moddirstate and mode == 'm':
1541 1545 # only update dirstate on branch merge, otherwise we
1542 1546 # could mark files with changes as unchanged
1543 1547 self.dirstate.update([f], mode)
1544 1548
1545 1549 remove.sort()
1546 1550 for f in remove:
1547 1551 self.ui.note("removing %s\n" % f)
1548 1552 try:
1549 1553 os.unlink(f)
1550 1554 except OSError, inst:
1551 1555 self.ui.warn("update failed to remove %s: %s!\n" % (f, inst))
1552 1556 # try removing directories that might now be empty
1553 1557 try: os.removedirs(os.path.dirname(f))
1554 1558 except: pass
1555 1559 if moddirstate:
1556 1560 if mode == 'n':
1557 1561 self.dirstate.forget(remove)
1558 1562 else:
1559 1563 self.dirstate.update(remove, 'r')
1560 1564
1561 1565 def merge3(self, fn, my, other):
1562 1566 """perform a 3-way merge in the working directory"""
1563 1567
1564 1568 def temp(prefix, node):
1565 1569 pre = "%s~%s." % (os.path.basename(fn), prefix)
1566 1570 (fd, name) = tempfile.mkstemp("", pre)
1567 1571 f = os.fdopen(fd, "wb")
1568 1572 f.write(fl.revision(node))
1569 1573 f.close()
1570 1574 return name
1571 1575
1572 1576 fl = self.file(fn)
1573 1577 base = fl.ancestor(my, other)
1574 1578 a = self.wjoin(fn)
1575 1579 b = temp("base", base)
1576 1580 c = temp("other", other)
1577 1581
1578 1582 self.ui.note("resolving %s\n" % fn)
1579 1583 self.ui.debug("file %s: other %s ancestor %s\n" %
1580 1584 (fn, short(other), short(base)))
1581 1585
1582 1586 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1583 1587 or "hgmerge")
1584 1588 r = os.system("%s %s %s %s" % (cmd, a, b, c))
1585 1589 if r:
1586 1590 self.ui.warn("merging %s failed!\n" % fn)
1587 1591
1588 1592 os.unlink(b)
1589 1593 os.unlink(c)
1590 1594
1591 1595 def verify(self):
1592 1596 filelinkrevs = {}
1593 1597 filenodes = {}
1594 1598 changesets = revisions = files = 0
1595 1599 errors = 0
1596 1600
1597 1601 seen = {}
1598 1602 self.ui.status("checking changesets\n")
1599 1603 for i in range(self.changelog.count()):
1600 1604 changesets += 1
1601 1605 n = self.changelog.node(i)
1602 1606 if n in seen:
1603 1607 self.ui.warn("duplicate changeset at revision %d\n" % i)
1604 1608 errors += 1
1605 1609 seen[n] = 1
1606 1610
1607 1611 for p in self.changelog.parents(n):
1608 1612 if p not in self.changelog.nodemap:
1609 1613 self.ui.warn("changeset %s has unknown parent %s\n" %
1610 1614 (short(n), short(p)))
1611 1615 errors += 1
1612 1616 try:
1613 1617 changes = self.changelog.read(n)
1614 1618 except Exception, inst:
1615 1619 self.ui.warn("unpacking changeset %s: %s\n" % (short(n), inst))
1616 1620 errors += 1
1617 1621
1618 1622 for f in changes[3]:
1619 1623 filelinkrevs.setdefault(f, []).append(i)
1620 1624
1621 1625 seen = {}
1622 1626 self.ui.status("checking manifests\n")
1623 1627 for i in range(self.manifest.count()):
1624 1628 n = self.manifest.node(i)
1625 1629 if n in seen:
1626 1630 self.ui.warn("duplicate manifest at revision %d\n" % i)
1627 1631 errors += 1
1628 1632 seen[n] = 1
1629 1633
1630 1634 for p in self.manifest.parents(n):
1631 1635 if p not in self.manifest.nodemap:
1632 1636 self.ui.warn("manifest %s has unknown parent %s\n" %
1633 1637 (short(n), short(p)))
1634 1638 errors += 1
1635 1639
1636 1640 try:
1637 1641 delta = mdiff.patchtext(self.manifest.delta(n))
1638 1642 except KeyboardInterrupt:
1639 1643 self.ui.warn("aborted")
1640 1644 sys.exit(0)
1641 1645 except Exception, inst:
1642 1646 self.ui.warn("unpacking manifest %s: %s\n"
1643 1647 % (short(n), inst))
1644 1648 errors += 1
1645 1649
1646 1650 ff = [ l.split('\0') for l in delta.splitlines() ]
1647 1651 for f, fn in ff:
1648 1652 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1649 1653
1650 1654 self.ui.status("crosschecking files in changesets and manifests\n")
1651 1655 for f in filenodes:
1652 1656 if f not in filelinkrevs:
1653 1657 self.ui.warn("file %s in manifest but not in changesets\n" % f)
1654 1658 errors += 1
1655 1659
1656 1660 for f in filelinkrevs:
1657 1661 if f not in filenodes:
1658 1662 self.ui.warn("file %s in changeset but not in manifest\n" % f)
1659 1663 errors += 1
1660 1664
1661 1665 self.ui.status("checking files\n")
1662 1666 ff = filenodes.keys()
1663 1667 ff.sort()
1664 1668 for f in ff:
1665 1669 if f == "/dev/null": continue
1666 1670 files += 1
1667 1671 fl = self.file(f)
1668 1672 nodes = { nullid: 1 }
1669 1673 seen = {}
1670 1674 for i in range(fl.count()):
1671 1675 revisions += 1
1672 1676 n = fl.node(i)
1673 1677
1674 1678 if n in seen:
1675 1679 self.ui.warn("%s: duplicate revision %d\n" % (f, i))
1676 1680 errors += 1
1677 1681
1678 1682 if n not in filenodes[f]:
1679 1683 self.ui.warn("%s: %d:%s not in manifests\n"
1680 1684 % (f, i, short(n)))
1681 1685 errors += 1
1682 1686 else:
1683 1687 del filenodes[f][n]
1684 1688
1685 1689 flr = fl.linkrev(n)
1686 1690 if flr not in filelinkrevs[f]:
1687 1691 self.ui.warn("%s:%s points to unexpected changeset %d\n"
1688 1692 % (f, short(n), fl.linkrev(n)))
1689 1693 errors += 1
1690 1694 else:
1691 1695 filelinkrevs[f].remove(flr)
1692 1696
1693 1697 # verify contents
1694 1698 try:
1695 1699 t = fl.read(n)
1696 1700 except Exception, inst:
1697 1701 self.ui.warn("unpacking file %s %s: %s\n"
1698 1702 % (f, short(n), inst))
1699 1703 errors += 1
1700 1704
1701 1705 # verify parents
1702 1706 (p1, p2) = fl.parents(n)
1703 1707 if p1 not in nodes:
1704 1708 self.ui.warn("file %s:%s unknown parent 1 %s" %
1705 1709 (f, short(n), short(p1)))
1706 1710 errors += 1
1707 1711 if p2 not in nodes:
1708 1712 self.ui.warn("file %s:%s unknown parent 2 %s" %
1709 1713 (f, short(n), short(p1)))
1710 1714 errors += 1
1711 1715 nodes[n] = 1
1712 1716
1713 1717 # cross-check
1714 1718 for node in filenodes[f]:
1715 1719 self.ui.warn("node %s in manifests not in %s\n"
1716 1720 % (hex(node), f))
1717 1721 errors += 1
1718 1722
1719 1723 self.ui.status("%d files, %d changesets, %d total revisions\n" %
1720 1724 (files, changesets, revisions))
1721 1725
1722 1726 if errors:
1723 1727 self.ui.warn("%d integrity errors encountered!\n" % errors)
1724 1728 return 1
1725 1729
1726 1730 class httprepository:
1727 1731 def __init__(self, ui, path):
1728 1732 # fix missing / after hostname
1729 1733 s = urlparse.urlsplit(path)
1730 1734 partial = s[2]
1731 1735 if not partial: partial = "/"
1732 1736 self.url = urlparse.urlunsplit((s[0], s[1], partial, '', ''))
1733 1737 self.ui = ui
1734 1738 no_list = [ "localhost", "127.0.0.1" ]
1735 1739 host = ui.config("http_proxy", "host")
1736 1740 if host is None:
1737 1741 host = os.environ.get("http_proxy")
1738 1742 if host and host.startswith('http://'):
1739 1743 host = host[7:]
1740 1744 user = ui.config("http_proxy", "user")
1741 1745 passwd = ui.config("http_proxy", "passwd")
1742 1746 no = ui.config("http_proxy", "no")
1743 1747 if no is None:
1744 1748 no = os.environ.get("no_proxy")
1745 1749 if no:
1746 1750 no_list = no_list + no.split(",")
1747 1751
1748 1752 no_proxy = 0
1749 1753 for h in no_list:
1750 1754 if (path.startswith("http://" + h + "/") or
1751 1755 path.startswith("http://" + h + ":") or
1752 1756 path == "http://" + h):
1753 1757 no_proxy = 1
1754 1758
1755 1759 # Note: urllib2 takes proxy values from the environment and those will
1756 1760 # take precedence
1757 1761 for env in ["HTTP_PROXY", "http_proxy", "no_proxy"]:
1758 1762 if os.environ.has_key(env):
1759 1763 del os.environ[env]
1760 1764
1761 1765 proxy_handler = urllib2.BaseHandler()
1762 1766 if host and not no_proxy:
1763 1767 proxy_handler = urllib2.ProxyHandler({"http" : "http://" + host})
1764 1768
1765 1769 authinfo = None
1766 1770 if user and passwd:
1767 1771 passmgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
1768 1772 passmgr.add_password(None, host, user, passwd)
1769 1773 authinfo = urllib2.ProxyBasicAuthHandler(passmgr)
1770 1774
1771 1775 opener = urllib2.build_opener(proxy_handler, authinfo)
1772 1776 urllib2.install_opener(opener)
1773 1777
1774 1778 def dev(self):
1775 1779 return -1
1776 1780
1777 1781 def do_cmd(self, cmd, **args):
1778 1782 self.ui.debug("sending %s command\n" % cmd)
1779 1783 q = {"cmd": cmd}
1780 1784 q.update(args)
1781 1785 qs = urllib.urlencode(q)
1782 1786 cu = "%s?%s" % (self.url, qs)
1783 1787 resp = urllib2.urlopen(cu)
1784 1788 proto = resp.headers['content-type']
1785 1789
1786 1790 # accept old "text/plain" and "application/hg-changegroup" for now
1787 1791 if not proto.startswith('application/mercurial') and \
1788 1792 not proto.startswith('text/plain') and \
1789 1793 not proto.startswith('application/hg-changegroup'):
1790 1794 raise RepoError("'%s' does not appear to be an hg repository"
1791 1795 % self.url)
1792 1796
1793 1797 if proto.startswith('application/mercurial'):
1794 1798 version = proto[22:]
1795 1799 if float(version) > 0.1:
1796 1800 raise RepoError("'%s' uses newer protocol %s" %
1797 1801 (self.url, version))
1798 1802
1799 1803 return resp
1800 1804
1801 1805 def heads(self):
1802 1806 d = self.do_cmd("heads").read()
1803 1807 try:
1804 1808 return map(bin, d[:-1].split(" "))
1805 1809 except:
1806 1810 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
1807 1811 raise
1808 1812
1809 1813 def branches(self, nodes):
1810 1814 n = " ".join(map(hex, nodes))
1811 1815 d = self.do_cmd("branches", nodes=n).read()
1812 1816 try:
1813 1817 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
1814 1818 return br
1815 1819 except:
1816 1820 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
1817 1821 raise
1818 1822
1819 1823 def between(self, pairs):
1820 1824 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
1821 1825 d = self.do_cmd("between", pairs=n).read()
1822 1826 try:
1823 1827 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
1824 1828 return p
1825 1829 except:
1826 1830 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
1827 1831 raise
1828 1832
1829 1833 def changegroup(self, nodes):
1830 1834 n = " ".join(map(hex, nodes))
1831 1835 f = self.do_cmd("changegroup", roots=n)
1832 1836 bytes = 0
1833 1837
1834 1838 class zread:
1835 1839 def __init__(self, f):
1836 1840 self.zd = zlib.decompressobj()
1837 1841 self.f = f
1838 1842 self.buf = ""
1839 1843 def read(self, l):
1840 1844 while l > len(self.buf):
1841 1845 r = self.f.read(4096)
1842 1846 if r:
1843 1847 self.buf += self.zd.decompress(r)
1844 1848 else:
1845 1849 self.buf += self.zd.flush()
1846 1850 break
1847 1851 d, self.buf = self.buf[:l], self.buf[l:]
1848 1852 return d
1849 1853
1850 1854 return zread(f)
1851 1855
1852 1856 class remotelock:
1853 1857 def __init__(self, repo):
1854 1858 self.repo = repo
1855 1859 def release(self):
1856 1860 self.repo.unlock()
1857 1861 self.repo = None
1858 1862 def __del__(self):
1859 1863 if self.repo:
1860 1864 self.release()
1861 1865
1862 1866 class sshrepository:
1863 1867 def __init__(self, ui, path):
1864 1868 self.url = path
1865 1869 self.ui = ui
1866 1870
1867 1871 m = re.match(r'ssh://(([^@]+)@)?([^:/]+)(:(\d+))?(/(.*))?', path)
1868 1872 if not m:
1869 1873 raise RepoError("couldn't parse destination %s\n" % path)
1870 1874
1871 1875 self.user = m.group(2)
1872 1876 self.host = m.group(3)
1873 1877 self.port = m.group(5)
1874 1878 self.path = m.group(7)
1875 1879
1876 1880 args = self.user and ("%s@%s" % (self.user, self.host)) or self.host
1877 1881 args = self.port and ("%s -p %s") % (args, self.port) or args
1878 1882 path = self.path or ""
1879 1883
1880 1884 cmd = "ssh %s 'hg -R %s serve --stdio'"
1881 1885 cmd = cmd % (args, path)
1882 1886
1883 1887 self.pipeo, self.pipei, self.pipee = os.popen3(cmd)
1884 1888
1885 1889 def readerr(self):
1886 1890 while 1:
1887 1891 r,w,x = select.select([self.pipee], [], [], 0)
1888 1892 if not r: break
1889 1893 l = self.pipee.readline()
1890 1894 if not l: break
1891 1895 self.ui.status("remote: ", l)
1892 1896
1893 1897 def __del__(self):
1894 1898 self.pipeo.close()
1895 1899 self.pipei.close()
1896 1900 for l in self.pipee:
1897 1901 self.ui.status("remote: ", l)
1898 1902 self.pipee.close()
1899 1903
1900 1904 def dev(self):
1901 1905 return -1
1902 1906
1903 1907 def do_cmd(self, cmd, **args):
1904 1908 self.ui.debug("sending %s command\n" % cmd)
1905 1909 self.pipeo.write("%s\n" % cmd)
1906 1910 for k, v in args.items():
1907 1911 self.pipeo.write("%s %d\n" % (k, len(v)))
1908 1912 self.pipeo.write(v)
1909 1913 self.pipeo.flush()
1910 1914
1911 1915 return self.pipei
1912 1916
1913 1917 def call(self, cmd, **args):
1914 1918 r = self.do_cmd(cmd, **args)
1915 1919 l = r.readline()
1916 1920 self.readerr()
1917 1921 try:
1918 1922 l = int(l)
1919 1923 except:
1920 1924 raise RepoError("unexpected response '%s'" % l)
1921 1925 return r.read(l)
1922 1926
1923 1927 def lock(self):
1924 1928 self.call("lock")
1925 1929 return remotelock(self)
1926 1930
1927 1931 def unlock(self):
1928 1932 self.call("unlock")
1929 1933
1930 1934 def heads(self):
1931 1935 d = self.call("heads")
1932 1936 try:
1933 1937 return map(bin, d[:-1].split(" "))
1934 1938 except:
1935 1939 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
1936 1940
1937 1941 def branches(self, nodes):
1938 1942 n = " ".join(map(hex, nodes))
1939 1943 d = self.call("branches", nodes=n)
1940 1944 try:
1941 1945 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
1942 1946 return br
1943 1947 except:
1944 1948 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
1945 1949
1946 1950 def between(self, pairs):
1947 1951 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
1948 1952 d = self.call("between", pairs=n)
1949 1953 try:
1950 1954 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
1951 1955 return p
1952 1956 except:
1953 1957 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
1954 1958
1955 1959 def changegroup(self, nodes):
1956 1960 n = " ".join(map(hex, nodes))
1957 1961 f = self.do_cmd("changegroup", roots=n)
1958 1962 return self.pipei
1959 1963
1960 1964 def addchangegroup(self, cg):
1961 1965 d = self.call("addchangegroup")
1962 1966 if d:
1963 1967 raise RepoError("push refused: %s", d)
1964 1968
1965 1969 while 1:
1966 1970 d = cg.read(4096)
1967 1971 if not d: break
1968 1972 self.pipeo.write(d)
1969 1973 self.readerr()
1970 1974
1971 1975 self.pipeo.flush()
1972 1976
1973 1977 self.readerr()
1974 1978 l = int(self.pipei.readline())
1975 1979 return self.pipei.read(l) != ""
1976 1980
1977 1981 def repository(ui, path=None, create=0):
1978 1982 if path:
1979 1983 if path.startswith("http://"):
1980 1984 return httprepository(ui, path)
1981 1985 if path.startswith("hg://"):
1982 1986 return httprepository(ui, path.replace("hg://", "http://"))
1983 1987 if path.startswith("old-http://"):
1984 1988 return localrepository(ui, path.replace("old-http://", "http://"))
1985 1989 if path.startswith("ssh://"):
1986 1990 return sshrepository(ui, path)
1987 1991
1988 1992 return localrepository(ui, path, create)
General Comments 0
You need to be logged in to leave comments. Login now