##// END OF EJS Templates
Fix the directory and revlog collision problem...
mpm@selenic.com -
r786:902b12d5 default
parent child Browse files
Show More
@@ -1,1965 +1,1979 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import sys, struct, os
9 9 import util
10 10 from revlog import *
11 11 from demandload import *
12 12 demandload(globals(), "re lock urllib urllib2 transaction time socket")
13 13 demandload(globals(), "tempfile httprangereader bdiff urlparse")
14 14 demandload(globals(), "bisect select")
15 15
16 16 class filelog(revlog):
17 17 def __init__(self, opener, path):
18 18 revlog.__init__(self, opener,
19 os.path.join("data", path + ".i"),
20 os.path.join("data", path + ".d"))
19 os.path.join("data", self.encodedir(path + ".i")),
20 os.path.join("data", self.encodedir(path + ".d")))
21
22 # This avoids a collision between a file named foo and a dir named
23 # foo.i or foo.d
24 def encodedir(self, path):
25 path.replace(".hg/", ".hg.hg/")
26 path.replace(".i/", ".i.hg/")
27 path.replace(".d/", ".i.hg/")
28 return path
29
30 def decodedir(self, path):
31 path.replace(".d.hg/", ".d/")
32 path.replace(".i.hg/", ".i/")
33 path.replace(".hg.hg/", ".hg/")
34 return path
21 35
22 36 def read(self, node):
23 37 t = self.revision(node)
24 38 if not t.startswith('\1\n'):
25 39 return t
26 40 s = t.find('\1\n', 2)
27 41 return t[s+2:]
28 42
29 43 def readmeta(self, node):
30 44 t = self.revision(node)
31 45 if not t.startswith('\1\n'):
32 46 return t
33 47 s = t.find('\1\n', 2)
34 48 mt = t[2:s]
35 49 for l in mt.splitlines():
36 50 k, v = l.split(": ", 1)
37 51 m[k] = v
38 52 return m
39 53
40 54 def add(self, text, meta, transaction, link, p1=None, p2=None):
41 55 if meta or text.startswith('\1\n'):
42 56 mt = ""
43 57 if meta:
44 58 mt = [ "%s: %s\n" % (k, v) for k,v in meta.items() ]
45 59 text = "\1\n" + "".join(mt) + "\1\n" + text
46 60 return self.addrevision(text, transaction, link, p1, p2)
47 61
48 62 def annotate(self, node):
49 63
50 64 def decorate(text, rev):
51 65 return ([rev] * len(text.splitlines()), text)
52 66
53 67 def pair(parent, child):
54 68 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
55 69 child[0][b1:b2] = parent[0][a1:a2]
56 70 return child
57 71
58 72 # find all ancestors
59 73 needed = {node:1}
60 74 visit = [node]
61 75 while visit:
62 76 n = visit.pop(0)
63 77 for p in self.parents(n):
64 78 if p not in needed:
65 79 needed[p] = 1
66 80 visit.append(p)
67 81 else:
68 82 # count how many times we'll use this
69 83 needed[p] += 1
70 84
71 85 # sort by revision which is a topological order
72 86 visit = [ (self.rev(n), n) for n in needed.keys() ]
73 87 visit.sort()
74 88 hist = {}
75 89
76 90 for r,n in visit:
77 91 curr = decorate(self.read(n), self.linkrev(n))
78 92 for p in self.parents(n):
79 93 if p != nullid:
80 94 curr = pair(hist[p], curr)
81 95 # trim the history of unneeded revs
82 96 needed[p] -= 1
83 97 if not needed[p]:
84 98 del hist[p]
85 99 hist[n] = curr
86 100
87 101 return zip(hist[n][0], hist[n][1].splitlines(1))
88 102
89 103 class manifest(revlog):
90 104 def __init__(self, opener):
91 105 self.mapcache = None
92 106 self.listcache = None
93 107 self.addlist = None
94 108 revlog.__init__(self, opener, "00manifest.i", "00manifest.d")
95 109
96 110 def read(self, node):
97 111 if node == nullid: return {} # don't upset local cache
98 112 if self.mapcache and self.mapcache[0] == node:
99 113 return self.mapcache[1]
100 114 text = self.revision(node)
101 115 map = {}
102 116 flag = {}
103 117 self.listcache = (text, text.splitlines(1))
104 118 for l in self.listcache[1]:
105 119 (f, n) = l.split('\0')
106 120 map[f] = bin(n[:40])
107 121 flag[f] = (n[40:-1] == "x")
108 122 self.mapcache = (node, map, flag)
109 123 return map
110 124
111 125 def readflags(self, node):
112 126 if node == nullid: return {} # don't upset local cache
113 127 if not self.mapcache or self.mapcache[0] != node:
114 128 self.read(node)
115 129 return self.mapcache[2]
116 130
117 131 def diff(self, a, b):
118 132 # this is sneaky, as we're not actually using a and b
119 133 if self.listcache and self.addlist and self.listcache[0] == a:
120 134 d = mdiff.diff(self.listcache[1], self.addlist, 1)
121 135 if mdiff.patch(a, d) != b:
122 136 sys.stderr.write("*** sortdiff failed, falling back ***\n")
123 137 return mdiff.textdiff(a, b)
124 138 return d
125 139 else:
126 140 return mdiff.textdiff(a, b)
127 141
128 142 def add(self, map, flags, transaction, link, p1=None, p2=None,
129 143 changed=None):
130 144 # directly generate the mdiff delta from the data collected during
131 145 # the bisect loop below
132 146 def gendelta(delta):
133 147 i = 0
134 148 result = []
135 149 while i < len(delta):
136 150 start = delta[i][2]
137 151 end = delta[i][3]
138 152 l = delta[i][4]
139 153 if l == None:
140 154 l = ""
141 155 while i < len(delta) - 1 and start <= delta[i+1][2] \
142 156 and end >= delta[i+1][2]:
143 157 if delta[i+1][3] > end:
144 158 end = delta[i+1][3]
145 159 if delta[i+1][4]:
146 160 l += delta[i+1][4]
147 161 i += 1
148 162 result.append(struct.pack(">lll", start, end, len(l)) + l)
149 163 i += 1
150 164 return result
151 165
152 166 # apply the changes collected during the bisect loop to our addlist
153 167 def addlistdelta(addlist, delta):
154 168 # apply the deltas to the addlist. start from the bottom up
155 169 # so changes to the offsets don't mess things up.
156 170 i = len(delta)
157 171 while i > 0:
158 172 i -= 1
159 173 start = delta[i][0]
160 174 end = delta[i][1]
161 175 if delta[i][4]:
162 176 addlist[start:end] = [delta[i][4]]
163 177 else:
164 178 del addlist[start:end]
165 179 return addlist
166 180
167 181 # calculate the byte offset of the start of each line in the
168 182 # manifest
169 183 def calcoffsets(addlist):
170 184 offsets = [0] * (len(addlist) + 1)
171 185 offset = 0
172 186 i = 0
173 187 while i < len(addlist):
174 188 offsets[i] = offset
175 189 offset += len(addlist[i])
176 190 i += 1
177 191 offsets[i] = offset
178 192 return offsets
179 193
180 194 # if we're using the listcache, make sure it is valid and
181 195 # parented by the same node we're diffing against
182 196 if not changed or not self.listcache or not p1 or \
183 197 self.mapcache[0] != p1:
184 198 files = map.keys()
185 199 files.sort()
186 200
187 201 self.addlist = ["%s\000%s%s\n" %
188 202 (f, hex(map[f]), flags[f] and "x" or '')
189 203 for f in files]
190 204 cachedelta = None
191 205 else:
192 206 addlist = self.listcache[1]
193 207
194 208 # find the starting offset for each line in the add list
195 209 offsets = calcoffsets(addlist)
196 210
197 211 # combine the changed lists into one list for sorting
198 212 work = [[x, 0] for x in changed[0]]
199 213 work[len(work):] = [[x, 1] for x in changed[1]]
200 214 work.sort()
201 215
202 216 delta = []
203 217 bs = 0
204 218
205 219 for w in work:
206 220 f = w[0]
207 221 # bs will either be the index of the item or the insert point
208 222 bs = bisect.bisect(addlist, f, bs)
209 223 if bs < len(addlist):
210 224 fn = addlist[bs][:addlist[bs].index('\0')]
211 225 else:
212 226 fn = None
213 227 if w[1] == 0:
214 228 l = "%s\000%s%s\n" % (f, hex(map[f]),
215 229 flags[f] and "x" or '')
216 230 else:
217 231 l = None
218 232 start = bs
219 233 if fn != f:
220 234 # item not found, insert a new one
221 235 end = bs
222 236 if w[1] == 1:
223 237 sys.stderr.write("failed to remove %s from manifest\n"
224 238 % f)
225 239 sys.exit(1)
226 240 else:
227 241 # item is found, replace/delete the existing line
228 242 end = bs + 1
229 243 delta.append([start, end, offsets[start], offsets[end], l])
230 244
231 245 self.addlist = addlistdelta(addlist, delta)
232 246 if self.mapcache[0] == self.tip():
233 247 cachedelta = "".join(gendelta(delta))
234 248 else:
235 249 cachedelta = None
236 250
237 251 text = "".join(self.addlist)
238 252 if cachedelta and mdiff.patch(self.listcache[0], cachedelta) != text:
239 253 sys.stderr.write("manifest delta failure\n")
240 254 sys.exit(1)
241 255 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
242 256 self.mapcache = (n, map, flags)
243 257 self.listcache = (text, self.addlist)
244 258 self.addlist = None
245 259
246 260 return n
247 261
248 262 class changelog(revlog):
249 263 def __init__(self, opener):
250 264 revlog.__init__(self, opener, "00changelog.i", "00changelog.d")
251 265
252 266 def extract(self, text):
253 267 if not text:
254 268 return (nullid, "", "0", [], "")
255 269 last = text.index("\n\n")
256 270 desc = text[last + 2:]
257 271 l = text[:last].splitlines()
258 272 manifest = bin(l[0])
259 273 user = l[1]
260 274 date = l[2]
261 275 files = l[3:]
262 276 return (manifest, user, date, files, desc)
263 277
264 278 def read(self, node):
265 279 return self.extract(self.revision(node))
266 280
267 281 def add(self, manifest, list, desc, transaction, p1=None, p2=None,
268 282 user=None, date=None):
269 283 date = date or "%d %d" % (time.time(), time.timezone)
270 284 list.sort()
271 285 l = [hex(manifest), user, date] + list + ["", desc]
272 286 text = "\n".join(l)
273 287 return self.addrevision(text, transaction, self.count(), p1, p2)
274 288
275 289 class dirstate:
276 290 def __init__(self, opener, ui, root):
277 291 self.opener = opener
278 292 self.root = root
279 293 self.dirty = 0
280 294 self.ui = ui
281 295 self.map = None
282 296 self.pl = None
283 297 self.copies = {}
284 298 self.ignorefunc = None
285 299
286 300 def wjoin(self, f):
287 301 return os.path.join(self.root, f)
288 302
289 303 def ignore(self, f):
290 304 if not self.ignorefunc:
291 305 bigpat = []
292 306 try:
293 307 l = file(self.wjoin(".hgignore"))
294 308 for pat in l:
295 309 if pat != "\n":
296 310 p = util.pconvert(pat[:-1])
297 311 try:
298 312 r = re.compile(p)
299 313 except:
300 314 self.ui.warn("ignoring invalid ignore"
301 315 + " regular expression '%s'\n" % p)
302 316 else:
303 317 bigpat.append(util.pconvert(pat[:-1]))
304 318 except IOError: pass
305 319
306 320 if bigpat:
307 321 s = "(?:%s)" % (")|(?:".join(bigpat))
308 322 r = re.compile(s)
309 323 self.ignorefunc = r.search
310 324 else:
311 325 self.ignorefunc = util.never
312 326
313 327 return self.ignorefunc(f)
314 328
315 329 def __del__(self):
316 330 if self.dirty:
317 331 self.write()
318 332
319 333 def __getitem__(self, key):
320 334 try:
321 335 return self.map[key]
322 336 except TypeError:
323 337 self.read()
324 338 return self[key]
325 339
326 340 def __contains__(self, key):
327 341 if not self.map: self.read()
328 342 return key in self.map
329 343
330 344 def parents(self):
331 345 if not self.pl:
332 346 self.read()
333 347 return self.pl
334 348
335 349 def markdirty(self):
336 350 if not self.dirty:
337 351 self.dirty = 1
338 352
339 353 def setparents(self, p1, p2 = nullid):
340 354 self.markdirty()
341 355 self.pl = p1, p2
342 356
343 357 def state(self, key):
344 358 try:
345 359 return self[key][0]
346 360 except KeyError:
347 361 return "?"
348 362
349 363 def read(self):
350 364 if self.map is not None: return self.map
351 365
352 366 self.map = {}
353 367 self.pl = [nullid, nullid]
354 368 try:
355 369 st = self.opener("dirstate").read()
356 370 if not st: return
357 371 except: return
358 372
359 373 self.pl = [st[:20], st[20: 40]]
360 374
361 375 pos = 40
362 376 while pos < len(st):
363 377 e = struct.unpack(">cllll", st[pos:pos+17])
364 378 l = e[4]
365 379 pos += 17
366 380 f = st[pos:pos + l]
367 381 if '\0' in f:
368 382 f, c = f.split('\0')
369 383 self.copies[f] = c
370 384 self.map[f] = e[:4]
371 385 pos += l
372 386
373 387 def copy(self, source, dest):
374 388 self.read()
375 389 self.markdirty()
376 390 self.copies[dest] = source
377 391
378 392 def copied(self, file):
379 393 return self.copies.get(file, None)
380 394
381 395 def update(self, files, state):
382 396 ''' current states:
383 397 n normal
384 398 m needs merging
385 399 r marked for removal
386 400 a marked for addition'''
387 401
388 402 if not files: return
389 403 self.read()
390 404 self.markdirty()
391 405 for f in files:
392 406 if state == "r":
393 407 self.map[f] = ('r', 0, 0, 0)
394 408 else:
395 409 s = os.stat(os.path.join(self.root, f))
396 410 self.map[f] = (state, s.st_mode, s.st_size, s.st_mtime)
397 411
398 412 def forget(self, files):
399 413 if not files: return
400 414 self.read()
401 415 self.markdirty()
402 416 for f in files:
403 417 try:
404 418 del self.map[f]
405 419 except KeyError:
406 420 self.ui.warn("not in dirstate: %s!\n" % f)
407 421 pass
408 422
409 423 def clear(self):
410 424 self.map = {}
411 425 self.markdirty()
412 426
413 427 def write(self):
414 428 st = self.opener("dirstate", "w")
415 429 st.write("".join(self.pl))
416 430 for f, e in self.map.items():
417 431 c = self.copied(f)
418 432 if c:
419 433 f = f + "\0" + c
420 434 e = struct.pack(">cllll", e[0], e[1], e[2], e[3], len(f))
421 435 st.write(e + f)
422 436 self.dirty = 0
423 437
424 438 def walk(self, files = None, match = util.always):
425 439 self.read()
426 440 dc = self.map.copy()
427 441 # walk all files by default
428 442 if not files: files = [self.root]
429 443 def traverse():
430 444 for f in util.unique(files):
431 445 f = os.path.join(self.root, f)
432 446 if os.path.isdir(f):
433 447 for dir, subdirs, fl in os.walk(f):
434 448 d = dir[len(self.root) + 1:]
435 449 if d == '.hg':
436 450 subdirs[:] = []
437 451 continue
438 452 for sd in subdirs:
439 453 ds = os.path.join(d, sd +'/')
440 454 if self.ignore(ds) or not match(ds):
441 455 subdirs.remove(sd)
442 456 for fn in fl:
443 457 fn = util.pconvert(os.path.join(d, fn))
444 458 yield 'f', fn
445 459 else:
446 460 yield 'f', f[len(self.root) + 1:]
447 461
448 462 for k in dc.keys():
449 463 yield 'm', k
450 464
451 465 # yield only files that match: all in dirstate, others only if
452 466 # not in .hgignore
453 467
454 468 for src, fn in util.unique(traverse()):
455 469 if fn in dc:
456 470 del dc[fn]
457 471 elif self.ignore(fn):
458 472 continue
459 473 if match(fn):
460 474 yield src, fn
461 475
462 476 def changes(self, files = None, match = util.always):
463 477 self.read()
464 478 dc = self.map.copy()
465 479 lookup, changed, added, unknown = [], [], [], []
466 480
467 481 for src, fn in self.walk(files, match):
468 482 try: s = os.stat(os.path.join(self.root, fn))
469 483 except: continue
470 484
471 485 if fn in dc:
472 486 c = dc[fn]
473 487 del dc[fn]
474 488
475 489 if c[0] == 'm':
476 490 changed.append(fn)
477 491 elif c[0] == 'a':
478 492 added.append(fn)
479 493 elif c[0] == 'r':
480 494 unknown.append(fn)
481 495 elif c[2] != s.st_size or (c[1] ^ s.st_mode) & 0100:
482 496 changed.append(fn)
483 497 elif c[1] != s.st_mode or c[3] != s.st_mtime:
484 498 lookup.append(fn)
485 499 else:
486 500 if match(fn): unknown.append(fn)
487 501
488 502 return (lookup, changed, added, filter(match, dc.keys()), unknown)
489 503
490 504 # used to avoid circular references so destructors work
491 505 def opener(base):
492 506 p = base
493 507 def o(path, mode="r"):
494 508 if p.startswith("http://"):
495 509 f = os.path.join(p, urllib.quote(path))
496 510 return httprangereader.httprangereader(f)
497 511
498 512 f = os.path.join(p, path)
499 513
500 514 mode += "b" # for that other OS
501 515
502 516 if mode[0] != "r":
503 517 try:
504 518 s = os.stat(f)
505 519 except OSError:
506 520 d = os.path.dirname(f)
507 521 if not os.path.isdir(d):
508 522 os.makedirs(d)
509 523 else:
510 524 if s.st_nlink > 1:
511 525 file(f + ".tmp", "wb").write(file(f, "rb").read())
512 526 util.rename(f+".tmp", f)
513 527
514 528 return file(f, mode)
515 529
516 530 return o
517 531
518 532 class RepoError(Exception): pass
519 533
520 534 class localrepository:
521 535 def __init__(self, ui, path=None, create=0):
522 536 self.remote = 0
523 537 if path and path.startswith("http://"):
524 538 self.remote = 1
525 539 self.path = path
526 540 else:
527 541 if not path:
528 542 p = os.getcwd()
529 543 while not os.path.isdir(os.path.join(p, ".hg")):
530 544 oldp = p
531 545 p = os.path.dirname(p)
532 546 if p == oldp: raise RepoError("no repo found")
533 547 path = p
534 548 self.path = os.path.join(path, ".hg")
535 549
536 550 if not create and not os.path.isdir(self.path):
537 551 raise RepoError("repository %s not found" % self.path)
538 552
539 553 self.root = path
540 554 self.ui = ui
541 555
542 556 if create:
543 557 os.mkdir(self.path)
544 558 os.mkdir(self.join("data"))
545 559
546 560 self.opener = opener(self.path)
547 561 self.wopener = opener(self.root)
548 562 self.manifest = manifest(self.opener)
549 563 self.changelog = changelog(self.opener)
550 564 self.tagscache = None
551 565 self.nodetagscache = None
552 566
553 567 if not self.remote:
554 568 self.dirstate = dirstate(self.opener, ui, self.root)
555 569 try:
556 570 self.ui.readconfig(self.opener("hgrc"))
557 571 except IOError: pass
558 572
559 573 def hook(self, name, **args):
560 574 s = self.ui.config("hooks", name)
561 575 if s:
562 576 self.ui.note("running hook %s: %s\n" % (name, s))
563 577 old = {}
564 578 for k, v in args.items():
565 579 k = k.upper()
566 580 old[k] = os.environ.get(k, None)
567 581 os.environ[k] = v
568 582
569 583 r = os.system(s)
570 584
571 585 for k, v in old.items():
572 586 if v != None:
573 587 os.environ[k] = v
574 588 else:
575 589 del os.environ[k]
576 590
577 591 if r:
578 592 self.ui.warn("abort: %s hook failed with status %d!\n" %
579 593 (name, r))
580 594 return False
581 595 return True
582 596
583 597 def tags(self):
584 598 '''return a mapping of tag to node'''
585 599 if not self.tagscache:
586 600 self.tagscache = {}
587 601 def addtag(self, k, n):
588 602 try:
589 603 bin_n = bin(n)
590 604 except TypeError:
591 605 bin_n = ''
592 606 self.tagscache[k.strip()] = bin_n
593 607
594 608 try:
595 609 # read each head of the tags file, ending with the tip
596 610 # and add each tag found to the map, with "newer" ones
597 611 # taking precedence
598 612 fl = self.file(".hgtags")
599 613 h = fl.heads()
600 614 h.reverse()
601 615 for r in h:
602 616 for l in fl.revision(r).splitlines():
603 617 if l:
604 618 n, k = l.split(" ", 1)
605 619 addtag(self, k, n)
606 620 except KeyError:
607 621 pass
608 622
609 623 try:
610 624 f = self.opener("localtags")
611 625 for l in f:
612 626 n, k = l.split(" ", 1)
613 627 addtag(self, k, n)
614 628 except IOError:
615 629 pass
616 630
617 631 self.tagscache['tip'] = self.changelog.tip()
618 632
619 633 return self.tagscache
620 634
621 635 def tagslist(self):
622 636 '''return a list of tags ordered by revision'''
623 637 l = []
624 638 for t, n in self.tags().items():
625 639 try:
626 640 r = self.changelog.rev(n)
627 641 except:
628 642 r = -2 # sort to the beginning of the list if unknown
629 643 l.append((r,t,n))
630 644 l.sort()
631 645 return [(t,n) for r,t,n in l]
632 646
633 647 def nodetags(self, node):
634 648 '''return the tags associated with a node'''
635 649 if not self.nodetagscache:
636 650 self.nodetagscache = {}
637 651 for t,n in self.tags().items():
638 652 self.nodetagscache.setdefault(n,[]).append(t)
639 653 return self.nodetagscache.get(node, [])
640 654
641 655 def lookup(self, key):
642 656 try:
643 657 return self.tags()[key]
644 658 except KeyError:
645 659 try:
646 660 return self.changelog.lookup(key)
647 661 except:
648 662 raise RepoError("unknown revision '%s'" % key)
649 663
650 664 def dev(self):
651 665 if self.remote: return -1
652 666 return os.stat(self.path).st_dev
653 667
654 668 def join(self, f):
655 669 return os.path.join(self.path, f)
656 670
657 671 def wjoin(self, f):
658 672 return os.path.join(self.root, f)
659 673
660 674 def file(self, f):
661 675 if f[0] == '/': f = f[1:]
662 676 return filelog(self.opener, f)
663 677
664 678 def getcwd(self):
665 679 cwd = os.getcwd()
666 680 if cwd == self.root: return ''
667 681 return cwd[len(self.root) + 1:]
668 682
669 683 def wfile(self, f, mode='r'):
670 684 return self.wopener(f, mode)
671 685
672 686 def transaction(self):
673 687 # save dirstate for undo
674 688 try:
675 689 ds = self.opener("dirstate").read()
676 690 except IOError:
677 691 ds = ""
678 692 self.opener("journal.dirstate", "w").write(ds)
679 693
680 694 def after():
681 695 util.rename(self.join("journal"), self.join("undo"))
682 696 util.rename(self.join("journal.dirstate"),
683 697 self.join("undo.dirstate"))
684 698
685 699 return transaction.transaction(self.ui.warn, self.opener,
686 700 self.join("journal"), after)
687 701
688 702 def recover(self):
689 703 lock = self.lock()
690 704 if os.path.exists(self.join("journal")):
691 705 self.ui.status("rolling back interrupted transaction\n")
692 706 return transaction.rollback(self.opener, self.join("journal"))
693 707 else:
694 708 self.ui.warn("no interrupted transaction available\n")
695 709
696 710 def undo(self):
697 711 lock = self.lock()
698 712 if os.path.exists(self.join("undo")):
699 713 self.ui.status("rolling back last transaction\n")
700 714 transaction.rollback(self.opener, self.join("undo"))
701 715 self.dirstate = None
702 716 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
703 717 self.dirstate = dirstate(self.opener, self.ui, self.root)
704 718 else:
705 719 self.ui.warn("no undo information available\n")
706 720
707 721 def lock(self, wait = 1):
708 722 try:
709 723 return lock.lock(self.join("lock"), 0)
710 724 except lock.LockHeld, inst:
711 725 if wait:
712 726 self.ui.warn("waiting for lock held by %s\n" % inst.args[0])
713 727 return lock.lock(self.join("lock"), wait)
714 728 raise inst
715 729
716 730 def rawcommit(self, files, text, user, date, p1=None, p2=None):
717 731 orig_parent = self.dirstate.parents()[0] or nullid
718 732 p1 = p1 or self.dirstate.parents()[0] or nullid
719 733 p2 = p2 or self.dirstate.parents()[1] or nullid
720 734 c1 = self.changelog.read(p1)
721 735 c2 = self.changelog.read(p2)
722 736 m1 = self.manifest.read(c1[0])
723 737 mf1 = self.manifest.readflags(c1[0])
724 738 m2 = self.manifest.read(c2[0])
725 739
726 740 if orig_parent == p1:
727 741 update_dirstate = 1
728 742 else:
729 743 update_dirstate = 0
730 744
731 745 tr = self.transaction()
732 746 mm = m1.copy()
733 747 mfm = mf1.copy()
734 748 linkrev = self.changelog.count()
735 749 for f in files:
736 750 try:
737 751 t = self.wfile(f).read()
738 752 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
739 753 r = self.file(f)
740 754 mfm[f] = tm
741 755 mm[f] = r.add(t, {}, tr, linkrev,
742 756 m1.get(f, nullid), m2.get(f, nullid))
743 757 if update_dirstate:
744 758 self.dirstate.update([f], "n")
745 759 except IOError:
746 760 try:
747 761 del mm[f]
748 762 del mfm[f]
749 763 if update_dirstate:
750 764 self.dirstate.forget([f])
751 765 except:
752 766 # deleted from p2?
753 767 pass
754 768
755 769 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
756 770 user = user or self.ui.username()
757 771 n = self.changelog.add(mnode, files, text, tr, p1, p2, user, date)
758 772 tr.close()
759 773 if update_dirstate:
760 774 self.dirstate.setparents(n, nullid)
761 775
762 776 def commit(self, files = None, text = "", user = None, date = None):
763 777 commit = []
764 778 remove = []
765 779 if files:
766 780 for f in files:
767 781 s = self.dirstate.state(f)
768 782 if s in 'nmai':
769 783 commit.append(f)
770 784 elif s == 'r':
771 785 remove.append(f)
772 786 else:
773 787 self.ui.warn("%s not tracked!\n" % f)
774 788 else:
775 789 (c, a, d, u) = self.changes()
776 790 commit = c + a
777 791 remove = d
778 792
779 793 if not commit and not remove:
780 794 self.ui.status("nothing changed\n")
781 795 return
782 796
783 797 if not self.hook("precommit"):
784 798 return 1
785 799
786 800 p1, p2 = self.dirstate.parents()
787 801 c1 = self.changelog.read(p1)
788 802 c2 = self.changelog.read(p2)
789 803 m1 = self.manifest.read(c1[0])
790 804 mf1 = self.manifest.readflags(c1[0])
791 805 m2 = self.manifest.read(c2[0])
792 806 lock = self.lock()
793 807 tr = self.transaction()
794 808
795 809 # check in files
796 810 new = {}
797 811 linkrev = self.changelog.count()
798 812 commit.sort()
799 813 for f in commit:
800 814 self.ui.note(f + "\n")
801 815 try:
802 816 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
803 817 t = self.wfile(f).read()
804 818 except IOError:
805 819 self.ui.warn("trouble committing %s!\n" % f)
806 820 raise
807 821
808 822 meta = {}
809 823 cp = self.dirstate.copied(f)
810 824 if cp:
811 825 meta["copy"] = cp
812 826 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
813 827 self.ui.debug(" %s: copy %s:%s\n" % (f, cp, meta["copyrev"]))
814 828
815 829 r = self.file(f)
816 830 fp1 = m1.get(f, nullid)
817 831 fp2 = m2.get(f, nullid)
818 832 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
819 833
820 834 # update manifest
821 835 m1.update(new)
822 836 for f in remove:
823 837 if f in m1:
824 838 del m1[f]
825 839 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
826 840 (new, remove))
827 841
828 842 # add changeset
829 843 new = new.keys()
830 844 new.sort()
831 845
832 846 if not text:
833 847 edittext = "\n" + "HG: manifest hash %s\n" % hex(mn)
834 848 edittext += "".join(["HG: changed %s\n" % f for f in new])
835 849 edittext += "".join(["HG: removed %s\n" % f for f in remove])
836 850 edittext = self.ui.edit(edittext)
837 851 if not edittext.rstrip():
838 852 return 1
839 853 text = edittext
840 854
841 855 user = user or self.ui.username()
842 856 n = self.changelog.add(mn, new, text, tr, p1, p2, user, date)
843 857
844 858 tr.close()
845 859
846 860 self.dirstate.setparents(n)
847 861 self.dirstate.update(new, "n")
848 862 self.dirstate.forget(remove)
849 863
850 864 if not self.hook("commit", node=hex(n)):
851 865 return 1
852 866
853 867 def walk(self, node = None, files = [], match = util.always):
854 868 if node:
855 869 for fn in self.manifest.read(self.changelog.read(node)[0]):
856 870 yield 'm', fn
857 871 else:
858 872 for src, fn in self.dirstate.walk(files, match):
859 873 yield src, fn
860 874
861 875 def changes(self, node1 = None, node2 = None, files = [],
862 876 match = util.always):
863 877 mf2, u = None, []
864 878
865 879 def fcmp(fn, mf):
866 880 t1 = self.wfile(fn).read()
867 881 t2 = self.file(fn).revision(mf[fn])
868 882 return cmp(t1, t2)
869 883
870 884 def mfmatches(node):
871 885 mf = dict(self.manifest.read(node))
872 886 for fn in mf.keys():
873 887 if not match(fn):
874 888 del mf[fn]
875 889 return mf
876 890
877 891 # are we comparing the working directory?
878 892 if not node2:
879 893 l, c, a, d, u = self.dirstate.changes(files, match)
880 894
881 895 # are we comparing working dir against its parent?
882 896 if not node1:
883 897 if l:
884 898 # do a full compare of any files that might have changed
885 899 change = self.changelog.read(self.dirstate.parents()[0])
886 900 mf2 = mfmatches(change[0])
887 901 for f in l:
888 902 if fcmp(f, mf2):
889 903 c.append(f)
890 904
891 905 for l in c, a, d, u:
892 906 l.sort()
893 907
894 908 return (c, a, d, u)
895 909
896 910 # are we comparing working dir against non-tip?
897 911 # generate a pseudo-manifest for the working dir
898 912 if not node2:
899 913 if not mf2:
900 914 change = self.changelog.read(self.dirstate.parents()[0])
901 915 mf2 = mfmatches(change[0])
902 916 for f in a + c + l:
903 917 mf2[f] = ""
904 918 for f in d:
905 919 if f in mf2: del mf2[f]
906 920 else:
907 921 change = self.changelog.read(node2)
908 922 mf2 = mfmatches(change[0])
909 923
910 924 # flush lists from dirstate before comparing manifests
911 925 c, a = [], []
912 926
913 927 change = self.changelog.read(node1)
914 928 mf1 = mfmatches(change[0])
915 929
916 930 for fn in mf2:
917 931 if mf1.has_key(fn):
918 932 if mf1[fn] != mf2[fn]:
919 933 if mf2[fn] != "" or fcmp(fn, mf1):
920 934 c.append(fn)
921 935 del mf1[fn]
922 936 else:
923 937 a.append(fn)
924 938
925 939 d = mf1.keys()
926 940
927 941 for l in c, a, d, u:
928 942 l.sort()
929 943
930 944 return (c, a, d, u)
931 945
932 946 def add(self, list):
933 947 for f in list:
934 948 p = self.wjoin(f)
935 949 if not os.path.exists(p):
936 950 self.ui.warn("%s does not exist!\n" % f)
937 951 elif not os.path.isfile(p):
938 952 self.ui.warn("%s not added: only files supported currently\n" % f)
939 953 elif self.dirstate.state(f) in 'an':
940 954 self.ui.warn("%s already tracked!\n" % f)
941 955 else:
942 956 self.dirstate.update([f], "a")
943 957
944 958 def forget(self, list):
945 959 for f in list:
946 960 if self.dirstate.state(f) not in 'ai':
947 961 self.ui.warn("%s not added!\n" % f)
948 962 else:
949 963 self.dirstate.forget([f])
950 964
951 965 def remove(self, list):
952 966 for f in list:
953 967 p = self.wjoin(f)
954 968 if os.path.exists(p):
955 969 self.ui.warn("%s still exists!\n" % f)
956 970 elif self.dirstate.state(f) == 'a':
957 971 self.ui.warn("%s never committed!\n" % f)
958 972 self.dirstate.forget([f])
959 973 elif f not in self.dirstate:
960 974 self.ui.warn("%s not tracked!\n" % f)
961 975 else:
962 976 self.dirstate.update([f], "r")
963 977
964 978 def copy(self, source, dest):
965 979 p = self.wjoin(dest)
966 980 if not os.path.exists(p):
967 981 self.ui.warn("%s does not exist!\n" % dest)
968 982 elif not os.path.isfile(p):
969 983 self.ui.warn("copy failed: %s is not a file\n" % dest)
970 984 else:
971 985 if self.dirstate.state(dest) == '?':
972 986 self.dirstate.update([dest], "a")
973 987 self.dirstate.copy(source, dest)
974 988
975 989 def heads(self):
976 990 return self.changelog.heads()
977 991
978 992 def branches(self, nodes):
979 993 if not nodes: nodes = [self.changelog.tip()]
980 994 b = []
981 995 for n in nodes:
982 996 t = n
983 997 while n:
984 998 p = self.changelog.parents(n)
985 999 if p[1] != nullid or p[0] == nullid:
986 1000 b.append((t, n, p[0], p[1]))
987 1001 break
988 1002 n = p[0]
989 1003 return b
990 1004
991 1005 def between(self, pairs):
992 1006 r = []
993 1007
994 1008 for top, bottom in pairs:
995 1009 n, l, i = top, [], 0
996 1010 f = 1
997 1011
998 1012 while n != bottom:
999 1013 p = self.changelog.parents(n)[0]
1000 1014 if i == f:
1001 1015 l.append(n)
1002 1016 f = f * 2
1003 1017 n = p
1004 1018 i += 1
1005 1019
1006 1020 r.append(l)
1007 1021
1008 1022 return r
1009 1023
1010 1024 def newer(self, nodes):
1011 1025 m = {}
1012 1026 nl = []
1013 1027 pm = {}
1014 1028 cl = self.changelog
1015 1029 t = l = cl.count()
1016 1030
1017 1031 # find the lowest numbered node
1018 1032 for n in nodes:
1019 1033 l = min(l, cl.rev(n))
1020 1034 m[n] = 1
1021 1035
1022 1036 for i in xrange(l, t):
1023 1037 n = cl.node(i)
1024 1038 if n in m: # explicitly listed
1025 1039 pm[n] = 1
1026 1040 nl.append(n)
1027 1041 continue
1028 1042 for p in cl.parents(n):
1029 1043 if p in pm: # parent listed
1030 1044 pm[n] = 1
1031 1045 nl.append(n)
1032 1046 break
1033 1047
1034 1048 return nl
1035 1049
1036 1050 def findincoming(self, remote, base={}):
1037 1051 m = self.changelog.nodemap
1038 1052 search = []
1039 1053 fetch = []
1040 1054 seen = {}
1041 1055 seenbranch = {}
1042 1056
1043 1057 # assume we're closer to the tip than the root
1044 1058 # and start by examining the heads
1045 1059 self.ui.status("searching for changes\n")
1046 1060 heads = remote.heads()
1047 1061 unknown = []
1048 1062 for h in heads:
1049 1063 if h not in m:
1050 1064 unknown.append(h)
1051 1065 else:
1052 1066 base[h] = 1
1053 1067
1054 1068 if not unknown:
1055 1069 return None
1056 1070
1057 1071 rep = {}
1058 1072 reqcnt = 0
1059 1073
1060 1074 # search through remote branches
1061 1075 # a 'branch' here is a linear segment of history, with four parts:
1062 1076 # head, root, first parent, second parent
1063 1077 # (a branch always has two parents (or none) by definition)
1064 1078 unknown = remote.branches(unknown)
1065 1079 while unknown:
1066 1080 r = []
1067 1081 while unknown:
1068 1082 n = unknown.pop(0)
1069 1083 if n[0] in seen:
1070 1084 continue
1071 1085
1072 1086 self.ui.debug("examining %s:%s\n" % (short(n[0]), short(n[1])))
1073 1087 if n[0] == nullid:
1074 1088 break
1075 1089 if n in seenbranch:
1076 1090 self.ui.debug("branch already found\n")
1077 1091 continue
1078 1092 if n[1] and n[1] in m: # do we know the base?
1079 1093 self.ui.debug("found incomplete branch %s:%s\n"
1080 1094 % (short(n[0]), short(n[1])))
1081 1095 search.append(n) # schedule branch range for scanning
1082 1096 seenbranch[n] = 1
1083 1097 else:
1084 1098 if n[1] not in seen and n[1] not in fetch:
1085 1099 if n[2] in m and n[3] in m:
1086 1100 self.ui.debug("found new changeset %s\n" %
1087 1101 short(n[1]))
1088 1102 fetch.append(n[1]) # earliest unknown
1089 1103 base[n[2]] = 1 # latest known
1090 1104 continue
1091 1105
1092 1106 for a in n[2:4]:
1093 1107 if a not in rep:
1094 1108 r.append(a)
1095 1109 rep[a] = 1
1096 1110
1097 1111 seen[n[0]] = 1
1098 1112
1099 1113 if r:
1100 1114 reqcnt += 1
1101 1115 self.ui.debug("request %d: %s\n" %
1102 1116 (reqcnt, " ".join(map(short, r))))
1103 1117 for p in range(0, len(r), 10):
1104 1118 for b in remote.branches(r[p:p+10]):
1105 1119 self.ui.debug("received %s:%s\n" %
1106 1120 (short(b[0]), short(b[1])))
1107 1121 if b[0] not in m and b[0] not in seen:
1108 1122 unknown.append(b)
1109 1123
1110 1124 # do binary search on the branches we found
1111 1125 while search:
1112 1126 n = search.pop(0)
1113 1127 reqcnt += 1
1114 1128 l = remote.between([(n[0], n[1])])[0]
1115 1129 l.append(n[1])
1116 1130 p = n[0]
1117 1131 f = 1
1118 1132 for i in l:
1119 1133 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1120 1134 if i in m:
1121 1135 if f <= 2:
1122 1136 self.ui.debug("found new branch changeset %s\n" %
1123 1137 short(p))
1124 1138 fetch.append(p)
1125 1139 base[i] = 1
1126 1140 else:
1127 1141 self.ui.debug("narrowed branch search to %s:%s\n"
1128 1142 % (short(p), short(i)))
1129 1143 search.append((p, i))
1130 1144 break
1131 1145 p, f = i, f * 2
1132 1146
1133 1147 # sanity check our fetch list
1134 1148 for f in fetch:
1135 1149 if f in m:
1136 1150 raise RepoError("already have changeset " + short(f[:4]))
1137 1151
1138 1152 if base.keys() == [nullid]:
1139 1153 self.ui.warn("warning: pulling from an unrelated repository!\n")
1140 1154
1141 1155 self.ui.note("adding new changesets starting at " +
1142 1156 " ".join([short(f) for f in fetch]) + "\n")
1143 1157
1144 1158 self.ui.debug("%d total queries\n" % reqcnt)
1145 1159
1146 1160 return fetch
1147 1161
1148 1162 def findoutgoing(self, remote):
1149 1163 base = {}
1150 1164 self.findincoming(remote, base)
1151 1165 remain = dict.fromkeys(self.changelog.nodemap)
1152 1166
1153 1167 # prune everything remote has from the tree
1154 1168 del remain[nullid]
1155 1169 remove = base.keys()
1156 1170 while remove:
1157 1171 n = remove.pop(0)
1158 1172 if n in remain:
1159 1173 del remain[n]
1160 1174 for p in self.changelog.parents(n):
1161 1175 remove.append(p)
1162 1176
1163 1177 # find every node whose parents have been pruned
1164 1178 subset = []
1165 1179 for n in remain:
1166 1180 p1, p2 = self.changelog.parents(n)
1167 1181 if p1 not in remain and p2 not in remain:
1168 1182 subset.append(n)
1169 1183
1170 1184 # this is the set of all roots we have to push
1171 1185 return subset
1172 1186
1173 1187 def pull(self, remote):
1174 1188 lock = self.lock()
1175 1189
1176 1190 # if we have an empty repo, fetch everything
1177 1191 if self.changelog.tip() == nullid:
1178 1192 self.ui.status("requesting all changes\n")
1179 1193 fetch = [nullid]
1180 1194 else:
1181 1195 fetch = self.findincoming(remote)
1182 1196
1183 1197 if not fetch:
1184 1198 self.ui.status("no changes found\n")
1185 1199 return 1
1186 1200
1187 1201 cg = remote.changegroup(fetch)
1188 1202 return self.addchangegroup(cg)
1189 1203
1190 1204 def push(self, remote):
1191 1205 lock = remote.lock()
1192 1206 update = self.findoutgoing(remote)
1193 1207 if not update:
1194 1208 self.ui.status("no changes found\n")
1195 1209 return 1
1196 1210
1197 1211 cg = self.changegroup(update)
1198 1212 return remote.addchangegroup(cg)
1199 1213
1200 1214 def changegroup(self, basenodes):
1201 1215 class genread:
1202 1216 def __init__(self, generator):
1203 1217 self.g = generator
1204 1218 self.buf = ""
1205 1219 def read(self, l):
1206 1220 while l > len(self.buf):
1207 1221 try:
1208 1222 self.buf += self.g.next()
1209 1223 except StopIteration:
1210 1224 break
1211 1225 d, self.buf = self.buf[:l], self.buf[l:]
1212 1226 return d
1213 1227
1214 1228 def gengroup():
1215 1229 nodes = self.newer(basenodes)
1216 1230
1217 1231 # construct the link map
1218 1232 linkmap = {}
1219 1233 for n in nodes:
1220 1234 linkmap[self.changelog.rev(n)] = n
1221 1235
1222 1236 # construct a list of all changed files
1223 1237 changed = {}
1224 1238 for n in nodes:
1225 1239 c = self.changelog.read(n)
1226 1240 for f in c[3]:
1227 1241 changed[f] = 1
1228 1242 changed = changed.keys()
1229 1243 changed.sort()
1230 1244
1231 1245 # the changegroup is changesets + manifests + all file revs
1232 1246 revs = [ self.changelog.rev(n) for n in nodes ]
1233 1247
1234 1248 for y in self.changelog.group(linkmap): yield y
1235 1249 for y in self.manifest.group(linkmap): yield y
1236 1250 for f in changed:
1237 1251 yield struct.pack(">l", len(f) + 4) + f
1238 1252 g = self.file(f).group(linkmap)
1239 1253 for y in g:
1240 1254 yield y
1241 1255
1242 1256 yield struct.pack(">l", 0)
1243 1257
1244 1258 return genread(gengroup())
1245 1259
1246 1260 def addchangegroup(self, source):
1247 1261
1248 1262 def getchunk():
1249 1263 d = source.read(4)
1250 1264 if not d: return ""
1251 1265 l = struct.unpack(">l", d)[0]
1252 1266 if l <= 4: return ""
1253 1267 return source.read(l - 4)
1254 1268
1255 1269 def getgroup():
1256 1270 while 1:
1257 1271 c = getchunk()
1258 1272 if not c: break
1259 1273 yield c
1260 1274
1261 1275 def csmap(x):
1262 1276 self.ui.debug("add changeset %s\n" % short(x))
1263 1277 return self.changelog.count()
1264 1278
1265 1279 def revmap(x):
1266 1280 return self.changelog.rev(x)
1267 1281
1268 1282 if not source: return
1269 1283 changesets = files = revisions = 0
1270 1284
1271 1285 tr = self.transaction()
1272 1286
1273 1287 # pull off the changeset group
1274 1288 self.ui.status("adding changesets\n")
1275 1289 co = self.changelog.tip()
1276 1290 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1277 1291 changesets = self.changelog.rev(cn) - self.changelog.rev(co)
1278 1292
1279 1293 # pull off the manifest group
1280 1294 self.ui.status("adding manifests\n")
1281 1295 mm = self.manifest.tip()
1282 1296 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1283 1297
1284 1298 # process the files
1285 1299 self.ui.status("adding file changes\n")
1286 1300 while 1:
1287 1301 f = getchunk()
1288 1302 if not f: break
1289 1303 self.ui.debug("adding %s revisions\n" % f)
1290 1304 fl = self.file(f)
1291 1305 o = fl.count()
1292 1306 n = fl.addgroup(getgroup(), revmap, tr)
1293 1307 revisions += fl.count() - o
1294 1308 files += 1
1295 1309
1296 1310 self.ui.status(("added %d changesets" +
1297 1311 " with %d changes to %d files\n")
1298 1312 % (changesets, revisions, files))
1299 1313
1300 1314 tr.close()
1301 1315
1302 1316 if not self.hook("changegroup"):
1303 1317 return 1
1304 1318
1305 1319 return
1306 1320
1307 1321 def update(self, node, allow=False, force=False, choose=None,
1308 1322 moddirstate=True):
1309 1323 pl = self.dirstate.parents()
1310 1324 if not force and pl[1] != nullid:
1311 1325 self.ui.warn("aborting: outstanding uncommitted merges\n")
1312 1326 return 1
1313 1327
1314 1328 p1, p2 = pl[0], node
1315 1329 pa = self.changelog.ancestor(p1, p2)
1316 1330 m1n = self.changelog.read(p1)[0]
1317 1331 m2n = self.changelog.read(p2)[0]
1318 1332 man = self.manifest.ancestor(m1n, m2n)
1319 1333 m1 = self.manifest.read(m1n)
1320 1334 mf1 = self.manifest.readflags(m1n)
1321 1335 m2 = self.manifest.read(m2n)
1322 1336 mf2 = self.manifest.readflags(m2n)
1323 1337 ma = self.manifest.read(man)
1324 1338 mfa = self.manifest.readflags(man)
1325 1339
1326 1340 (c, a, d, u) = self.changes()
1327 1341
1328 1342 # is this a jump, or a merge? i.e. is there a linear path
1329 1343 # from p1 to p2?
1330 1344 linear_path = (pa == p1 or pa == p2)
1331 1345
1332 1346 # resolve the manifest to determine which files
1333 1347 # we care about merging
1334 1348 self.ui.note("resolving manifests\n")
1335 1349 self.ui.debug(" force %s allow %s moddirstate %s linear %s\n" %
1336 1350 (force, allow, moddirstate, linear_path))
1337 1351 self.ui.debug(" ancestor %s local %s remote %s\n" %
1338 1352 (short(man), short(m1n), short(m2n)))
1339 1353
1340 1354 merge = {}
1341 1355 get = {}
1342 1356 remove = []
1343 1357 mark = {}
1344 1358
1345 1359 # construct a working dir manifest
1346 1360 mw = m1.copy()
1347 1361 mfw = mf1.copy()
1348 1362 umap = dict.fromkeys(u)
1349 1363
1350 1364 for f in a + c + u:
1351 1365 mw[f] = ""
1352 1366 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1353 1367
1354 1368 for f in d:
1355 1369 if f in mw: del mw[f]
1356 1370
1357 1371 # If we're jumping between revisions (as opposed to merging),
1358 1372 # and if neither the working directory nor the target rev has
1359 1373 # the file, then we need to remove it from the dirstate, to
1360 1374 # prevent the dirstate from listing the file when it is no
1361 1375 # longer in the manifest.
1362 1376 if moddirstate and linear_path and f not in m2:
1363 1377 self.dirstate.forget((f,))
1364 1378
1365 1379 # Compare manifests
1366 1380 for f, n in mw.iteritems():
1367 1381 if choose and not choose(f): continue
1368 1382 if f in m2:
1369 1383 s = 0
1370 1384
1371 1385 # is the wfile new since m1, and match m2?
1372 1386 if f not in m1:
1373 1387 t1 = self.wfile(f).read()
1374 1388 t2 = self.file(f).revision(m2[f])
1375 1389 if cmp(t1, t2) == 0:
1376 1390 mark[f] = 1
1377 1391 n = m2[f]
1378 1392 del t1, t2
1379 1393
1380 1394 # are files different?
1381 1395 if n != m2[f]:
1382 1396 a = ma.get(f, nullid)
1383 1397 # are both different from the ancestor?
1384 1398 if n != a and m2[f] != a:
1385 1399 self.ui.debug(" %s versions differ, resolve\n" % f)
1386 1400 # merge executable bits
1387 1401 # "if we changed or they changed, change in merge"
1388 1402 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1389 1403 mode = ((a^b) | (a^c)) ^ a
1390 1404 merge[f] = (m1.get(f, nullid), m2[f], mode)
1391 1405 s = 1
1392 1406 # are we clobbering?
1393 1407 # is remote's version newer?
1394 1408 # or are we going back in time?
1395 1409 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1396 1410 self.ui.debug(" remote %s is newer, get\n" % f)
1397 1411 get[f] = m2[f]
1398 1412 s = 1
1399 1413 else:
1400 1414 mark[f] = 1
1401 1415 elif f in umap:
1402 1416 # this unknown file is the same as the checkout
1403 1417 get[f] = m2[f]
1404 1418
1405 1419 if not s and mfw[f] != mf2[f]:
1406 1420 if force:
1407 1421 self.ui.debug(" updating permissions for %s\n" % f)
1408 1422 util.set_exec(self.wjoin(f), mf2[f])
1409 1423 else:
1410 1424 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1411 1425 mode = ((a^b) | (a^c)) ^ a
1412 1426 if mode != b:
1413 1427 self.ui.debug(" updating permissions for %s\n" % f)
1414 1428 util.set_exec(self.wjoin(f), mode)
1415 1429 mark[f] = 1
1416 1430 del m2[f]
1417 1431 elif f in ma:
1418 1432 if n != ma[f]:
1419 1433 r = "d"
1420 1434 if not force and (linear_path or allow):
1421 1435 r = self.ui.prompt(
1422 1436 (" local changed %s which remote deleted\n" % f) +
1423 1437 "(k)eep or (d)elete?", "[kd]", "k")
1424 1438 if r == "d":
1425 1439 remove.append(f)
1426 1440 else:
1427 1441 self.ui.debug("other deleted %s\n" % f)
1428 1442 remove.append(f) # other deleted it
1429 1443 else:
1430 1444 if n == m1.get(f, nullid): # same as parent
1431 1445 if p2 == pa: # going backwards?
1432 1446 self.ui.debug("remote deleted %s\n" % f)
1433 1447 remove.append(f)
1434 1448 else:
1435 1449 self.ui.debug("local created %s, keeping\n" % f)
1436 1450 else:
1437 1451 self.ui.debug("working dir created %s, keeping\n" % f)
1438 1452
1439 1453 for f, n in m2.iteritems():
1440 1454 if choose and not choose(f): continue
1441 1455 if f[0] == "/": continue
1442 1456 if f in ma and n != ma[f]:
1443 1457 r = "k"
1444 1458 if not force and (linear_path or allow):
1445 1459 r = self.ui.prompt(
1446 1460 ("remote changed %s which local deleted\n" % f) +
1447 1461 "(k)eep or (d)elete?", "[kd]", "k")
1448 1462 if r == "k": get[f] = n
1449 1463 elif f not in ma:
1450 1464 self.ui.debug("remote created %s\n" % f)
1451 1465 get[f] = n
1452 1466 else:
1453 1467 if force or p2 == pa: # going backwards?
1454 1468 self.ui.debug("local deleted %s, recreating\n" % f)
1455 1469 get[f] = n
1456 1470 else:
1457 1471 self.ui.debug("local deleted %s\n" % f)
1458 1472
1459 1473 del mw, m1, m2, ma
1460 1474
1461 1475 if force:
1462 1476 for f in merge:
1463 1477 get[f] = merge[f][1]
1464 1478 merge = {}
1465 1479
1466 1480 if linear_path or force:
1467 1481 # we don't need to do any magic, just jump to the new rev
1468 1482 mode = 'n'
1469 1483 p1, p2 = p2, nullid
1470 1484 else:
1471 1485 if not allow:
1472 1486 self.ui.status("this update spans a branch" +
1473 1487 " affecting the following files:\n")
1474 1488 fl = merge.keys() + get.keys()
1475 1489 fl.sort()
1476 1490 for f in fl:
1477 1491 cf = ""
1478 1492 if f in merge: cf = " (resolve)"
1479 1493 self.ui.status(" %s%s\n" % (f, cf))
1480 1494 self.ui.warn("aborting update spanning branches!\n")
1481 1495 self.ui.status("(use update -m to perform a branch merge)\n")
1482 1496 return 1
1483 1497 # we have to remember what files we needed to get/change
1484 1498 # because any file that's different from either one of its
1485 1499 # parents must be in the changeset
1486 1500 mode = 'm'
1487 1501 if moddirstate:
1488 1502 self.dirstate.update(mark.keys(), "m")
1489 1503
1490 1504 if moddirstate:
1491 1505 self.dirstate.setparents(p1, p2)
1492 1506
1493 1507 # get the files we don't need to change
1494 1508 files = get.keys()
1495 1509 files.sort()
1496 1510 for f in files:
1497 1511 if f[0] == "/": continue
1498 1512 self.ui.note("getting %s\n" % f)
1499 1513 t = self.file(f).read(get[f])
1500 1514 try:
1501 1515 self.wfile(f, "w").write(t)
1502 1516 except IOError:
1503 1517 os.makedirs(os.path.dirname(self.wjoin(f)))
1504 1518 self.wfile(f, "w").write(t)
1505 1519 util.set_exec(self.wjoin(f), mf2[f])
1506 1520 if moddirstate:
1507 1521 self.dirstate.update([f], mode)
1508 1522
1509 1523 # merge the tricky bits
1510 1524 files = merge.keys()
1511 1525 files.sort()
1512 1526 for f in files:
1513 1527 self.ui.status("merging %s\n" % f)
1514 1528 m, o, flag = merge[f]
1515 1529 self.merge3(f, m, o)
1516 1530 util.set_exec(self.wjoin(f), flag)
1517 1531 if moddirstate and mode == 'm':
1518 1532 # only update dirstate on branch merge, otherwise we
1519 1533 # could mark files with changes as unchanged
1520 1534 self.dirstate.update([f], mode)
1521 1535
1522 1536 remove.sort()
1523 1537 for f in remove:
1524 1538 self.ui.note("removing %s\n" % f)
1525 1539 try:
1526 1540 os.unlink(f)
1527 1541 except OSError, inst:
1528 1542 self.ui.warn("update failed to remove %s: %s!\n" % (f, inst))
1529 1543 # try removing directories that might now be empty
1530 1544 try: os.removedirs(os.path.dirname(f))
1531 1545 except: pass
1532 1546 if moddirstate:
1533 1547 if mode == 'n':
1534 1548 self.dirstate.forget(remove)
1535 1549 else:
1536 1550 self.dirstate.update(remove, 'r')
1537 1551
1538 1552 def merge3(self, fn, my, other):
1539 1553 """perform a 3-way merge in the working directory"""
1540 1554
1541 1555 def temp(prefix, node):
1542 1556 pre = "%s~%s." % (os.path.basename(fn), prefix)
1543 1557 (fd, name) = tempfile.mkstemp("", pre)
1544 1558 f = os.fdopen(fd, "wb")
1545 1559 f.write(fl.revision(node))
1546 1560 f.close()
1547 1561 return name
1548 1562
1549 1563 fl = self.file(fn)
1550 1564 base = fl.ancestor(my, other)
1551 1565 a = self.wjoin(fn)
1552 1566 b = temp("base", base)
1553 1567 c = temp("other", other)
1554 1568
1555 1569 self.ui.note("resolving %s\n" % fn)
1556 1570 self.ui.debug("file %s: other %s ancestor %s\n" %
1557 1571 (fn, short(other), short(base)))
1558 1572
1559 1573 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1560 1574 or "hgmerge")
1561 1575 r = os.system("%s %s %s %s" % (cmd, a, b, c))
1562 1576 if r:
1563 1577 self.ui.warn("merging %s failed!\n" % fn)
1564 1578
1565 1579 os.unlink(b)
1566 1580 os.unlink(c)
1567 1581
1568 1582 def verify(self):
1569 1583 filelinkrevs = {}
1570 1584 filenodes = {}
1571 1585 changesets = revisions = files = 0
1572 1586 errors = 0
1573 1587
1574 1588 seen = {}
1575 1589 self.ui.status("checking changesets\n")
1576 1590 for i in range(self.changelog.count()):
1577 1591 changesets += 1
1578 1592 n = self.changelog.node(i)
1579 1593 if n in seen:
1580 1594 self.ui.warn("duplicate changeset at revision %d\n" % i)
1581 1595 errors += 1
1582 1596 seen[n] = 1
1583 1597
1584 1598 for p in self.changelog.parents(n):
1585 1599 if p not in self.changelog.nodemap:
1586 1600 self.ui.warn("changeset %s has unknown parent %s\n" %
1587 1601 (short(n), short(p)))
1588 1602 errors += 1
1589 1603 try:
1590 1604 changes = self.changelog.read(n)
1591 1605 except Exception, inst:
1592 1606 self.ui.warn("unpacking changeset %s: %s\n" % (short(n), inst))
1593 1607 errors += 1
1594 1608
1595 1609 for f in changes[3]:
1596 1610 filelinkrevs.setdefault(f, []).append(i)
1597 1611
1598 1612 seen = {}
1599 1613 self.ui.status("checking manifests\n")
1600 1614 for i in range(self.manifest.count()):
1601 1615 n = self.manifest.node(i)
1602 1616 if n in seen:
1603 1617 self.ui.warn("duplicate manifest at revision %d\n" % i)
1604 1618 errors += 1
1605 1619 seen[n] = 1
1606 1620
1607 1621 for p in self.manifest.parents(n):
1608 1622 if p not in self.manifest.nodemap:
1609 1623 self.ui.warn("manifest %s has unknown parent %s\n" %
1610 1624 (short(n), short(p)))
1611 1625 errors += 1
1612 1626
1613 1627 try:
1614 1628 delta = mdiff.patchtext(self.manifest.delta(n))
1615 1629 except KeyboardInterrupt:
1616 1630 self.ui.warn("aborted")
1617 1631 sys.exit(0)
1618 1632 except Exception, inst:
1619 1633 self.ui.warn("unpacking manifest %s: %s\n"
1620 1634 % (short(n), inst))
1621 1635 errors += 1
1622 1636
1623 1637 ff = [ l.split('\0') for l in delta.splitlines() ]
1624 1638 for f, fn in ff:
1625 1639 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1626 1640
1627 1641 self.ui.status("crosschecking files in changesets and manifests\n")
1628 1642 for f in filenodes:
1629 1643 if f not in filelinkrevs:
1630 1644 self.ui.warn("file %s in manifest but not in changesets\n" % f)
1631 1645 errors += 1
1632 1646
1633 1647 for f in filelinkrevs:
1634 1648 if f not in filenodes:
1635 1649 self.ui.warn("file %s in changeset but not in manifest\n" % f)
1636 1650 errors += 1
1637 1651
1638 1652 self.ui.status("checking files\n")
1639 1653 ff = filenodes.keys()
1640 1654 ff.sort()
1641 1655 for f in ff:
1642 1656 if f == "/dev/null": continue
1643 1657 files += 1
1644 1658 fl = self.file(f)
1645 1659 nodes = { nullid: 1 }
1646 1660 seen = {}
1647 1661 for i in range(fl.count()):
1648 1662 revisions += 1
1649 1663 n = fl.node(i)
1650 1664
1651 1665 if n in seen:
1652 1666 self.ui.warn("%s: duplicate revision %d\n" % (f, i))
1653 1667 errors += 1
1654 1668
1655 1669 if n not in filenodes[f]:
1656 1670 self.ui.warn("%s: %d:%s not in manifests\n"
1657 1671 % (f, i, short(n)))
1658 1672 errors += 1
1659 1673 else:
1660 1674 del filenodes[f][n]
1661 1675
1662 1676 flr = fl.linkrev(n)
1663 1677 if flr not in filelinkrevs[f]:
1664 1678 self.ui.warn("%s:%s points to unexpected changeset %d\n"
1665 1679 % (f, short(n), fl.linkrev(n)))
1666 1680 errors += 1
1667 1681 else:
1668 1682 filelinkrevs[f].remove(flr)
1669 1683
1670 1684 # verify contents
1671 1685 try:
1672 1686 t = fl.read(n)
1673 1687 except Exception, inst:
1674 1688 self.ui.warn("unpacking file %s %s: %s\n"
1675 1689 % (f, short(n), inst))
1676 1690 errors += 1
1677 1691
1678 1692 # verify parents
1679 1693 (p1, p2) = fl.parents(n)
1680 1694 if p1 not in nodes:
1681 1695 self.ui.warn("file %s:%s unknown parent 1 %s" %
1682 1696 (f, short(n), short(p1)))
1683 1697 errors += 1
1684 1698 if p2 not in nodes:
1685 1699 self.ui.warn("file %s:%s unknown parent 2 %s" %
1686 1700 (f, short(n), short(p1)))
1687 1701 errors += 1
1688 1702 nodes[n] = 1
1689 1703
1690 1704 # cross-check
1691 1705 for node in filenodes[f]:
1692 1706 self.ui.warn("node %s in manifests not in %s\n"
1693 1707 % (hex(node), f))
1694 1708 errors += 1
1695 1709
1696 1710 self.ui.status("%d files, %d changesets, %d total revisions\n" %
1697 1711 (files, changesets, revisions))
1698 1712
1699 1713 if errors:
1700 1714 self.ui.warn("%d integrity errors encountered!\n" % errors)
1701 1715 return 1
1702 1716
1703 1717 class httprepository:
1704 1718 def __init__(self, ui, path):
1705 1719 # fix missing / after hostname
1706 1720 s = urlparse.urlsplit(path)
1707 1721 partial = s[2]
1708 1722 if not partial: partial = "/"
1709 1723 self.url = urlparse.urlunsplit((s[0], s[1], partial, '', ''))
1710 1724 self.ui = ui
1711 1725 no_list = [ "localhost", "127.0.0.1" ]
1712 1726 host = ui.config("http_proxy", "host")
1713 1727 if host is None:
1714 1728 host = os.environ.get("http_proxy")
1715 1729 if host and host.startswith('http://'):
1716 1730 host = host[7:]
1717 1731 user = ui.config("http_proxy", "user")
1718 1732 passwd = ui.config("http_proxy", "passwd")
1719 1733 no = ui.config("http_proxy", "no")
1720 1734 if no is None:
1721 1735 no = os.environ.get("no_proxy")
1722 1736 if no:
1723 1737 no_list = no_list + no.split(",")
1724 1738
1725 1739 no_proxy = 0
1726 1740 for h in no_list:
1727 1741 if (path.startswith("http://" + h + "/") or
1728 1742 path.startswith("http://" + h + ":") or
1729 1743 path == "http://" + h):
1730 1744 no_proxy = 1
1731 1745
1732 1746 # Note: urllib2 takes proxy values from the environment and those will
1733 1747 # take precedence
1734 1748 for env in ["HTTP_PROXY", "http_proxy", "no_proxy"]:
1735 1749 if os.environ.has_key(env):
1736 1750 del os.environ[env]
1737 1751
1738 1752 proxy_handler = urllib2.BaseHandler()
1739 1753 if host and not no_proxy:
1740 1754 proxy_handler = urllib2.ProxyHandler({"http" : "http://" + host})
1741 1755
1742 1756 authinfo = None
1743 1757 if user and passwd:
1744 1758 passmgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
1745 1759 passmgr.add_password(None, host, user, passwd)
1746 1760 authinfo = urllib2.ProxyBasicAuthHandler(passmgr)
1747 1761
1748 1762 opener = urllib2.build_opener(proxy_handler, authinfo)
1749 1763 urllib2.install_opener(opener)
1750 1764
1751 1765 def dev(self):
1752 1766 return -1
1753 1767
1754 1768 def do_cmd(self, cmd, **args):
1755 1769 self.ui.debug("sending %s command\n" % cmd)
1756 1770 q = {"cmd": cmd}
1757 1771 q.update(args)
1758 1772 qs = urllib.urlencode(q)
1759 1773 cu = "%s?%s" % (self.url, qs)
1760 1774 resp = urllib2.urlopen(cu)
1761 1775 proto = resp.headers['content-type']
1762 1776
1763 1777 # accept old "text/plain" and "application/hg-changegroup" for now
1764 1778 if not proto.startswith('application/mercurial') and \
1765 1779 not proto.startswith('text/plain') and \
1766 1780 not proto.startswith('application/hg-changegroup'):
1767 1781 raise RepoError("'%s' does not appear to be an hg repository"
1768 1782 % self.url)
1769 1783
1770 1784 if proto.startswith('application/mercurial'):
1771 1785 version = proto[22:]
1772 1786 if float(version) > 0.1:
1773 1787 raise RepoError("'%s' uses newer protocol %s" %
1774 1788 (self.url, version))
1775 1789
1776 1790 return resp
1777 1791
1778 1792 def heads(self):
1779 1793 d = self.do_cmd("heads").read()
1780 1794 try:
1781 1795 return map(bin, d[:-1].split(" "))
1782 1796 except:
1783 1797 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
1784 1798 raise
1785 1799
1786 1800 def branches(self, nodes):
1787 1801 n = " ".join(map(hex, nodes))
1788 1802 d = self.do_cmd("branches", nodes=n).read()
1789 1803 try:
1790 1804 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
1791 1805 return br
1792 1806 except:
1793 1807 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
1794 1808 raise
1795 1809
1796 1810 def between(self, pairs):
1797 1811 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
1798 1812 d = self.do_cmd("between", pairs=n).read()
1799 1813 try:
1800 1814 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
1801 1815 return p
1802 1816 except:
1803 1817 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
1804 1818 raise
1805 1819
1806 1820 def changegroup(self, nodes):
1807 1821 n = " ".join(map(hex, nodes))
1808 1822 f = self.do_cmd("changegroup", roots=n)
1809 1823 bytes = 0
1810 1824
1811 1825 class zread:
1812 1826 def __init__(self, f):
1813 1827 self.zd = zlib.decompressobj()
1814 1828 self.f = f
1815 1829 self.buf = ""
1816 1830 def read(self, l):
1817 1831 while l > len(self.buf):
1818 1832 r = self.f.read(4096)
1819 1833 if r:
1820 1834 self.buf += self.zd.decompress(r)
1821 1835 else:
1822 1836 self.buf += self.zd.flush()
1823 1837 break
1824 1838 d, self.buf = self.buf[:l], self.buf[l:]
1825 1839 return d
1826 1840
1827 1841 return zread(f)
1828 1842
1829 1843 class remotelock:
1830 1844 def __init__(self, repo):
1831 1845 self.repo = repo
1832 1846 def release(self):
1833 1847 self.repo.unlock()
1834 1848 self.repo = None
1835 1849 def __del__(self):
1836 1850 if self.repo:
1837 1851 self.release()
1838 1852
1839 1853 class sshrepository:
1840 1854 def __init__(self, ui, path):
1841 1855 self.url = path
1842 1856 self.ui = ui
1843 1857
1844 1858 m = re.match(r'ssh://(([^@]+)@)?([^:/]+)(:(\d+))?(/(.*))?', path)
1845 1859 if not m:
1846 1860 raise RepoError("couldn't parse destination %s\n" % path)
1847 1861
1848 1862 self.user = m.group(2)
1849 1863 self.host = m.group(3)
1850 1864 self.port = m.group(5)
1851 1865 self.path = m.group(7)
1852 1866
1853 1867 args = self.user and ("%s@%s" % (self.user, self.host)) or self.host
1854 1868 args = self.port and ("%s -p %s") % (args, self.port) or args
1855 1869 path = self.path or ""
1856 1870
1857 1871 cmd = "ssh %s 'hg -R %s serve --stdio'"
1858 1872 cmd = cmd % (args, path)
1859 1873
1860 1874 self.pipeo, self.pipei, self.pipee = os.popen3(cmd)
1861 1875
1862 1876 def readerr(self):
1863 1877 while 1:
1864 1878 r,w,x = select.select([self.pipee], [], [], 0)
1865 1879 if not r: break
1866 1880 l = self.pipee.readline()
1867 1881 if not l: break
1868 1882 self.ui.status("remote: ", l)
1869 1883
1870 1884 def __del__(self):
1871 1885 self.pipeo.close()
1872 1886 self.pipei.close()
1873 1887 for l in self.pipee:
1874 1888 self.ui.status("remote: ", l)
1875 1889 self.pipee.close()
1876 1890
1877 1891 def dev(self):
1878 1892 return -1
1879 1893
1880 1894 def do_cmd(self, cmd, **args):
1881 1895 self.ui.debug("sending %s command\n" % cmd)
1882 1896 self.pipeo.write("%s\n" % cmd)
1883 1897 for k, v in args.items():
1884 1898 self.pipeo.write("%s %d\n" % (k, len(v)))
1885 1899 self.pipeo.write(v)
1886 1900 self.pipeo.flush()
1887 1901
1888 1902 return self.pipei
1889 1903
1890 1904 def call(self, cmd, **args):
1891 1905 r = self.do_cmd(cmd, **args)
1892 1906 l = r.readline()
1893 1907 self.readerr()
1894 1908 try:
1895 1909 l = int(l)
1896 1910 except:
1897 1911 raise RepoError("unexpected response '%s'" % l)
1898 1912 return r.read(l)
1899 1913
1900 1914 def lock(self):
1901 1915 self.call("lock")
1902 1916 return remotelock(self)
1903 1917
1904 1918 def unlock(self):
1905 1919 self.call("unlock")
1906 1920
1907 1921 def heads(self):
1908 1922 d = self.call("heads")
1909 1923 try:
1910 1924 return map(bin, d[:-1].split(" "))
1911 1925 except:
1912 1926 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
1913 1927
1914 1928 def branches(self, nodes):
1915 1929 n = " ".join(map(hex, nodes))
1916 1930 d = self.call("branches", nodes=n)
1917 1931 try:
1918 1932 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
1919 1933 return br
1920 1934 except:
1921 1935 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
1922 1936
1923 1937 def between(self, pairs):
1924 1938 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
1925 1939 d = self.call("between", pairs=n)
1926 1940 try:
1927 1941 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
1928 1942 return p
1929 1943 except:
1930 1944 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
1931 1945
1932 1946 def changegroup(self, nodes):
1933 1947 n = " ".join(map(hex, nodes))
1934 1948 f = self.do_cmd("changegroup", roots=n)
1935 1949 return self.pipei
1936 1950
1937 1951 def addchangegroup(self, cg):
1938 1952 d = self.call("addchangegroup")
1939 1953 if d:
1940 1954 raise RepoError("push refused: %s", d)
1941 1955
1942 1956 while 1:
1943 1957 d = cg.read(4096)
1944 1958 if not d: break
1945 1959 self.pipeo.write(d)
1946 1960 self.readerr()
1947 1961
1948 1962 self.pipeo.flush()
1949 1963
1950 1964 self.readerr()
1951 1965 l = int(self.pipei.readline())
1952 1966 return self.pipei.read(l) != ""
1953 1967
1954 1968 def repository(ui, path=None, create=0):
1955 1969 if path:
1956 1970 if path.startswith("http://"):
1957 1971 return httprepository(ui, path)
1958 1972 if path.startswith("hg://"):
1959 1973 return httprepository(ui, path.replace("hg://", "http://"))
1960 1974 if path.startswith("old-http://"):
1961 1975 return localrepository(ui, path.replace("old-http://", "http://"))
1962 1976 if path.startswith("ssh://"):
1963 1977 return sshrepository(ui, path)
1964 1978
1965 1979 return localrepository(ui, path, create)
General Comments 0
You need to be logged in to leave comments. Login now