##// END OF EJS Templates
Handle undeletion of files when checking out old revisions...
Matt Mackall -
r680:4b7b79d2 default
parent child Browse files
Show More
@@ -1,1881 +1,1883 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import sys, struct, os
9 9 import util
10 10 from revlog import *
11 11 from demandload import *
12 12 demandload(globals(), "re lock urllib urllib2 transaction time socket")
13 13 demandload(globals(), "tempfile httprangereader bdiff")
14 14 demandload(globals(), "bisect select")
15 15
16 16 class filelog(revlog):
17 17 def __init__(self, opener, path):
18 18 revlog.__init__(self, opener,
19 19 os.path.join("data", path + ".i"),
20 20 os.path.join("data", path + ".d"))
21 21
22 22 def read(self, node):
23 23 t = self.revision(node)
24 24 if t[:2] != '\1\n':
25 25 return t
26 26 s = t.find('\1\n', 2)
27 27 return t[s+2:]
28 28
29 29 def readmeta(self, node):
30 30 t = self.revision(node)
31 31 if t[:2] != '\1\n':
32 32 return t
33 33 s = t.find('\1\n', 2)
34 34 mt = t[2:s]
35 35 for l in mt.splitlines():
36 36 k, v = l.split(": ", 1)
37 37 m[k] = v
38 38 return m
39 39
40 40 def add(self, text, meta, transaction, link, p1=None, p2=None):
41 41 if meta or text[:2] == '\1\n':
42 42 mt = ""
43 43 if meta:
44 44 mt = [ "%s: %s\n" % (k, v) for k,v in meta.items() ]
45 45 text = "\1\n" + "".join(mt) + "\1\n" + text
46 46 return self.addrevision(text, transaction, link, p1, p2)
47 47
48 48 def annotate(self, node):
49 49
50 50 def decorate(text, rev):
51 51 return ([rev] * len(text.splitlines()), text)
52 52
53 53 def pair(parent, child):
54 54 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
55 55 child[0][b1:b2] = parent[0][a1:a2]
56 56 return child
57 57
58 58 # find all ancestors
59 59 needed = {node:1}
60 60 visit = [node]
61 61 while visit:
62 62 n = visit.pop(0)
63 63 for p in self.parents(n):
64 64 if p not in needed:
65 65 needed[p] = 1
66 66 visit.append(p)
67 67 else:
68 68 # count how many times we'll use this
69 69 needed[p] += 1
70 70
71 71 # sort by revision which is a topological order
72 72 visit = [ (self.rev(n), n) for n in needed.keys() ]
73 73 visit.sort()
74 74 hist = {}
75 75
76 76 for r,n in visit:
77 77 curr = decorate(self.read(n), self.linkrev(n))
78 78 for p in self.parents(n):
79 79 if p != nullid:
80 80 curr = pair(hist[p], curr)
81 81 # trim the history of unneeded revs
82 82 needed[p] -= 1
83 83 if not needed[p]:
84 84 del hist[p]
85 85 hist[n] = curr
86 86
87 87 return zip(hist[n][0], hist[n][1].splitlines(1))
88 88
89 89 class manifest(revlog):
90 90 def __init__(self, opener):
91 91 self.mapcache = None
92 92 self.listcache = None
93 93 self.addlist = None
94 94 revlog.__init__(self, opener, "00manifest.i", "00manifest.d")
95 95
96 96 def read(self, node):
97 97 if node == nullid: return {} # don't upset local cache
98 98 if self.mapcache and self.mapcache[0] == node:
99 99 return self.mapcache[1]
100 100 text = self.revision(node)
101 101 map = {}
102 102 flag = {}
103 103 self.listcache = (text, text.splitlines(1))
104 104 for l in self.listcache[1]:
105 105 (f, n) = l.split('\0')
106 106 map[f] = bin(n[:40])
107 107 flag[f] = (n[40:-1] == "x")
108 108 self.mapcache = (node, map, flag)
109 109 return map
110 110
111 111 def readflags(self, node):
112 112 if node == nullid: return {} # don't upset local cache
113 113 if not self.mapcache or self.mapcache[0] != node:
114 114 self.read(node)
115 115 return self.mapcache[2]
116 116
117 117 def diff(self, a, b):
118 118 # this is sneaky, as we're not actually using a and b
119 119 if self.listcache and self.addlist and self.listcache[0] == a:
120 120 d = mdiff.diff(self.listcache[1], self.addlist, 1)
121 121 if mdiff.patch(a, d) != b:
122 122 sys.stderr.write("*** sortdiff failed, falling back ***\n")
123 123 return mdiff.textdiff(a, b)
124 124 return d
125 125 else:
126 126 return mdiff.textdiff(a, b)
127 127
128 128 def add(self, map, flags, transaction, link, p1=None, p2=None,changed=None):
129 129 # directly generate the mdiff delta from the data collected during
130 130 # the bisect loop below
131 131 def gendelta(delta):
132 132 i = 0
133 133 result = []
134 134 while i < len(delta):
135 135 start = delta[i][2]
136 136 end = delta[i][3]
137 137 l = delta[i][4]
138 138 if l == None:
139 139 l = ""
140 140 while i < len(delta) - 1 and start <= delta[i+1][2] and end >= delta[i+1][2]:
141 141 if delta[i+1][3] > end:
142 142 end = delta[i+1][3]
143 143 if delta[i+1][4]:
144 144 l += delta[i+1][4]
145 145 i += 1
146 146 result.append(struct.pack(">lll", start, end, len(l)) + l)
147 147 i += 1
148 148 return result
149 149
150 150 # apply the changes collected during the bisect loop to our addlist
151 151 def addlistdelta(addlist, delta):
152 152 # apply the deltas to the addlist. start from the bottom up
153 153 # so changes to the offsets don't mess things up.
154 154 i = len(delta)
155 155 while i > 0:
156 156 i -= 1
157 157 start = delta[i][0]
158 158 end = delta[i][1]
159 159 if delta[i][4]:
160 160 addlist[start:end] = [delta[i][4]]
161 161 else:
162 162 del addlist[start:end]
163 163 return addlist
164 164
165 165 # calculate the byte offset of the start of each line in the
166 166 # manifest
167 167 def calcoffsets(addlist):
168 168 offsets = [0] * (len(addlist) + 1)
169 169 offset = 0
170 170 i = 0
171 171 while i < len(addlist):
172 172 offsets[i] = offset
173 173 offset += len(addlist[i])
174 174 i += 1
175 175 offsets[i] = offset
176 176 return offsets
177 177
178 178 # if we're using the listcache, make sure it is valid and
179 179 # parented by the same node we're diffing against
180 180 if not changed or not self.listcache or not p1 or self.mapcache[0] != p1:
181 181 files = map.keys()
182 182 files.sort()
183 183
184 184 self.addlist = ["%s\000%s%s\n" %
185 185 (f, hex(map[f]), flags[f] and "x" or '')
186 186 for f in files]
187 187 cachedelta = None
188 188 else:
189 189 addlist = self.listcache[1]
190 190
191 191 # find the starting offset for each line in the add list
192 192 offsets = calcoffsets(addlist)
193 193
194 194 # combine the changed lists into one list for sorting
195 195 work = [[x, 0] for x in changed[0]]
196 196 work[len(work):] = [[x, 1] for x in changed[1]]
197 197 work.sort()
198 198
199 199 delta = []
200 200 bs = 0
201 201
202 202 for w in work:
203 203 f = w[0]
204 204 # bs will either be the index of the item or the insertion point
205 205 bs = bisect.bisect(addlist, f, bs)
206 206 if bs < len(addlist):
207 207 fn = addlist[bs][:addlist[bs].index('\0')]
208 208 else:
209 209 fn = None
210 210 if w[1] == 0:
211 211 l = "%s\000%s%s\n" % (f, hex(map[f]), flags[f] and "x" or '')
212 212 else:
213 213 l = None
214 214 start = bs
215 215 if fn != f:
216 216 # item not found, insert a new one
217 217 end = bs
218 218 if w[1] == 1:
219 219 sys.stderr.write("failed to remove %s from manifest" % f)
220 220 sys.exit(1)
221 221 else:
222 222 # item is found, replace/delete the existing line
223 223 end = bs + 1
224 224 delta.append([start, end, offsets[start], offsets[end], l])
225 225
226 226 self.addlist = addlistdelta(addlist, delta)
227 227 if self.mapcache[0] == self.tip():
228 228 cachedelta = "".join(gendelta(delta))
229 229 else:
230 230 cachedelta = None
231 231
232 232 text = "".join(self.addlist)
233 233 if cachedelta and mdiff.patch(self.listcache[0], cachedelta) != text:
234 234 sys.stderr.write("manifest delta failure")
235 235 sys.exit(1)
236 236 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
237 237 self.mapcache = (n, map, flags)
238 238 self.listcache = (text, self.addlist)
239 239 self.addlist = None
240 240
241 241 return n
242 242
243 243 class changelog(revlog):
244 244 def __init__(self, opener):
245 245 revlog.__init__(self, opener, "00changelog.i", "00changelog.d")
246 246
247 247 def extract(self, text):
248 248 if not text:
249 249 return (nullid, "", "0", [], "")
250 250 last = text.index("\n\n")
251 251 desc = text[last + 2:]
252 252 l = text[:last].splitlines()
253 253 manifest = bin(l[0])
254 254 user = l[1]
255 255 date = l[2]
256 256 files = l[3:]
257 257 return (manifest, user, date, files, desc)
258 258
259 259 def read(self, node):
260 260 return self.extract(self.revision(node))
261 261
262 262 def add(self, manifest, list, desc, transaction, p1=None, p2=None,
263 263 user=None, date=None):
264 264 date = date or "%d %d" % (time.time(), time.timezone)
265 265 list.sort()
266 266 l = [hex(manifest), user, date] + list + ["", desc]
267 267 text = "\n".join(l)
268 268 return self.addrevision(text, transaction, self.count(), p1, p2)
269 269
270 270 class dirstate:
271 271 def __init__(self, opener, ui, root):
272 272 self.opener = opener
273 273 self.root = root
274 274 self.dirty = 0
275 275 self.ui = ui
276 276 self.map = None
277 277 self.pl = None
278 278 self.copies = {}
279 279
280 280 def __del__(self):
281 281 if self.dirty:
282 282 self.write()
283 283
284 284 def __getitem__(self, key):
285 285 try:
286 286 return self.map[key]
287 287 except TypeError:
288 288 self.read()
289 289 return self[key]
290 290
291 291 def __contains__(self, key):
292 292 if not self.map: self.read()
293 293 return key in self.map
294 294
295 295 def parents(self):
296 296 if not self.pl:
297 297 self.read()
298 298 return self.pl
299 299
300 300 def setparents(self, p1, p2 = nullid):
301 301 self.dirty = 1
302 302 self.pl = p1, p2
303 303
304 304 def state(self, key):
305 305 try:
306 306 return self[key][0]
307 307 except KeyError:
308 308 return "?"
309 309
310 310 def read(self):
311 311 if self.map is not None: return self.map
312 312
313 313 self.map = {}
314 314 self.pl = [nullid, nullid]
315 315 try:
316 316 st = self.opener("dirstate").read()
317 317 if not st: return
318 318 except: return
319 319
320 320 self.pl = [st[:20], st[20: 40]]
321 321
322 322 pos = 40
323 323 while pos < len(st):
324 324 e = struct.unpack(">cllll", st[pos:pos+17])
325 325 l = e[4]
326 326 pos += 17
327 327 f = st[pos:pos + l]
328 328 if '\0' in f:
329 329 f, c = f.split('\0')
330 330 self.copies[f] = c
331 331 self.map[f] = e[:4]
332 332 pos += l
333 333
334 334 def copy(self, source, dest):
335 335 self.read()
336 336 self.dirty = 1
337 337 self.copies[dest] = source
338 338
339 339 def copied(self, file):
340 340 return self.copies.get(file, None)
341 341
342 342 def update(self, files, state):
343 343 ''' current states:
344 344 n normal
345 345 m needs merging
346 346 r marked for removal
347 347 a marked for addition'''
348 348
349 349 if not files: return
350 350 self.read()
351 351 self.dirty = 1
352 352 for f in files:
353 353 if state == "r":
354 354 self.map[f] = ('r', 0, 0, 0)
355 355 else:
356 356 s = os.stat(os.path.join(self.root, f))
357 357 self.map[f] = (state, s.st_mode, s.st_size, s.st_mtime)
358 358
359 359 def forget(self, files):
360 360 if not files: return
361 361 self.read()
362 362 self.dirty = 1
363 363 for f in files:
364 364 try:
365 365 del self.map[f]
366 366 except KeyError:
367 367 self.ui.warn("not in dirstate: %s!\n" % f)
368 368 pass
369 369
370 370 def clear(self):
371 371 self.map = {}
372 372 self.dirty = 1
373 373
374 374 def write(self):
375 375 st = self.opener("dirstate", "w")
376 376 st.write("".join(self.pl))
377 377 for f, e in self.map.items():
378 378 c = self.copied(f)
379 379 if c:
380 380 f = f + "\0" + c
381 381 e = struct.pack(">cllll", e[0], e[1], e[2], e[3], len(f))
382 382 st.write(e + f)
383 383 self.dirty = 0
384 384
385 385 def changes(self, files, ignore):
386 386 self.read()
387 387 dc = self.map.copy()
388 388 lookup, changed, added, unknown = [], [], [], []
389 389
390 390 # compare all files by default
391 391 if not files: files = [self.root]
392 392
393 393 # recursive generator of all files listed
394 394 def walk(files):
395 395 for f in util.unique(files):
396 396 f = os.path.join(self.root, f)
397 397 if os.path.isdir(f):
398 398 for dir, subdirs, fl in os.walk(f):
399 399 d = dir[len(self.root) + 1:]
400 400 for sd in subdirs:
401 401 if ignore(os.path.join(d, sd +'/')):
402 402 subdirs.remove(sd)
403 403 for fn in fl:
404 404 fn = util.pconvert(os.path.join(d, fn))
405 405 yield fn
406 406 else:
407 407 yield f[len(self.root) + 1:]
408 408
409 409 for k in dc.keys():
410 410 yield k
411 411
412 412 for fn in util.unique(walk(files)):
413 413 try: s = os.stat(os.path.join(self.root, fn))
414 414 except: continue
415 415
416 416 if fn in dc:
417 417 c = dc[fn]
418 418 del dc[fn]
419 419
420 420 if c[0] == 'm':
421 421 changed.append(fn)
422 422 elif c[0] == 'a':
423 423 added.append(fn)
424 424 elif c[0] == 'r':
425 425 unknown.append(fn)
426 426 elif c[2] != s.st_size or (c[1] ^ s.st_mode) & 0100:
427 427 changed.append(fn)
428 428 elif c[1] != s.st_mode or c[3] != s.st_mtime:
429 429 lookup.append(fn)
430 430 else:
431 431 if not ignore(fn): unknown.append(fn)
432 432
433 433 return (lookup, changed, added, dc.keys(), unknown)
434 434
435 435 # used to avoid circular references so destructors work
436 436 def opener(base):
437 437 p = base
438 438 def o(path, mode="r"):
439 439 if p[:7] == "http://":
440 440 f = os.path.join(p, urllib.quote(path))
441 441 return httprangereader.httprangereader(f)
442 442
443 443 f = os.path.join(p, path)
444 444
445 445 mode += "b" # for that other OS
446 446
447 447 if mode[0] != "r":
448 448 try:
449 449 s = os.stat(f)
450 450 except OSError:
451 451 d = os.path.dirname(f)
452 452 if not os.path.isdir(d):
453 453 os.makedirs(d)
454 454 else:
455 455 if s.st_nlink > 1:
456 456 file(f + ".tmp", "wb").write(file(f, "rb").read())
457 457 util.rename(f+".tmp", f)
458 458
459 459 return file(f, mode)
460 460
461 461 return o
462 462
463 463 class RepoError(Exception): pass
464 464
465 465 class localrepository:
466 466 def __init__(self, ui, path=None, create=0):
467 467 self.remote = 0
468 468 if path and path[:7] == "http://":
469 469 self.remote = 1
470 470 self.path = path
471 471 else:
472 472 if not path:
473 473 p = os.getcwd()
474 474 while not os.path.isdir(os.path.join(p, ".hg")):
475 475 oldp = p
476 476 p = os.path.dirname(p)
477 477 if p == oldp: raise RepoError("no repo found")
478 478 path = p
479 479 self.path = os.path.join(path, ".hg")
480 480
481 481 if not create and not os.path.isdir(self.path):
482 482 raise RepoError("repository %s not found" % self.path)
483 483
484 484 self.root = path
485 485 self.ui = ui
486 486
487 487 if create:
488 488 os.mkdir(self.path)
489 489 os.mkdir(self.join("data"))
490 490
491 491 self.opener = opener(self.path)
492 492 self.wopener = opener(self.root)
493 493 self.manifest = manifest(self.opener)
494 494 self.changelog = changelog(self.opener)
495 495 self.ignorefunc = None
496 496 self.tagscache = None
497 497 self.nodetagscache = None
498 498
499 499 if not self.remote:
500 500 self.dirstate = dirstate(self.opener, ui, self.root)
501 501 try:
502 502 self.ui.readconfig(self.opener("hgrc"))
503 503 except IOError: pass
504 504
505 505 def ignore(self, f):
506 506 if not self.ignorefunc:
507 507 bigpat = ["^.hg/$"]
508 508 try:
509 509 l = file(self.wjoin(".hgignore"))
510 510 for pat in l:
511 511 if pat != "\n":
512 512 p = util.pconvert(pat[:-1])
513 513 try:
514 514 r = re.compile(p)
515 515 except:
516 516 self.ui.warn("ignoring invalid ignore"
517 517 + " regular expression '%s'\n" % p)
518 518 else:
519 519 bigpat.append(util.pconvert(pat[:-1]))
520 520 except IOError: pass
521 521
522 522 s = "(?:%s)" % (")|(?:".join(bigpat))
523 523 r = re.compile(s)
524 524 self.ignorefunc = r.search
525 525
526 526 return self.ignorefunc(f)
527 527
528 528 def hook(self, name, **args):
529 529 s = self.ui.config("hooks", name)
530 530 if s:
531 531 self.ui.note("running hook %s: %s\n" % (name, s))
532 532 old = {}
533 533 for k, v in args.items():
534 534 k = k.upper()
535 535 old[k] = os.environ.get(k, None)
536 536 os.environ[k] = v
537 537
538 538 r = os.system(s)
539 539
540 540 for k, v in old.items():
541 541 if v != None:
542 542 os.environ[k] = v
543 543 else:
544 544 del os.environ[k]
545 545
546 546 if r:
547 547 self.ui.warn("abort: %s hook failed with status %d!\n" %
548 548 (name, r))
549 549 return False
550 550 return True
551 551
552 552 def tags(self):
553 553 '''return a mapping of tag to node'''
554 554 if not self.tagscache:
555 555 self.tagscache = {}
556 556 def addtag(self, k, n):
557 557 try:
558 558 bin_n = bin(n)
559 559 except TypeError:
560 560 bin_n = ''
561 561 self.tagscache[k.strip()] = bin_n
562 562
563 563 try:
564 564 # read each head of the tags file, ending with the tip
565 565 # and add each tag found to the map, with "newer" ones
566 566 # taking precedence
567 567 fl = self.file(".hgtags")
568 568 h = fl.heads()
569 569 h.reverse()
570 570 for r in h:
571 571 for l in fl.revision(r).splitlines():
572 572 if l:
573 573 n, k = l.split(" ", 1)
574 574 addtag(self, k, n)
575 575 except KeyError:
576 576 pass
577 577
578 578 try:
579 579 f = self.opener("localtags")
580 580 for l in f:
581 581 n, k = l.split(" ", 1)
582 582 addtag(self, k, n)
583 583 except IOError:
584 584 pass
585 585
586 586 self.tagscache['tip'] = self.changelog.tip()
587 587
588 588 return self.tagscache
589 589
590 590 def tagslist(self):
591 591 '''return a list of tags ordered by revision'''
592 592 l = []
593 593 for t, n in self.tags().items():
594 594 try:
595 595 r = self.changelog.rev(n)
596 596 except:
597 597 r = -2 # sort to the beginning of the list if unknown
598 598 l.append((r,t,n))
599 599 l.sort()
600 600 return [(t,n) for r,t,n in l]
601 601
602 602 def nodetags(self, node):
603 603 '''return the tags associated with a node'''
604 604 if not self.nodetagscache:
605 605 self.nodetagscache = {}
606 606 for t,n in self.tags().items():
607 607 self.nodetagscache.setdefault(n,[]).append(t)
608 608 return self.nodetagscache.get(node, [])
609 609
610 610 def lookup(self, key):
611 611 try:
612 612 return self.tags()[key]
613 613 except KeyError:
614 614 try:
615 615 return self.changelog.lookup(key)
616 616 except:
617 617 raise RepoError("unknown revision '%s'" % key)
618 618
619 619 def dev(self):
620 620 if self.remote: return -1
621 621 return os.stat(self.path).st_dev
622 622
623 623 def join(self, f):
624 624 return os.path.join(self.path, f)
625 625
626 626 def wjoin(self, f):
627 627 return os.path.join(self.root, f)
628 628
629 629 def file(self, f):
630 630 if f[0] == '/': f = f[1:]
631 631 return filelog(self.opener, f)
632 632
633 633 def getcwd(self):
634 634 cwd = os.getcwd()
635 635 if cwd == self.root: return ''
636 636 return cwd[len(self.root) + 1:]
637 637
638 638 def wfile(self, f, mode='r'):
639 639 return self.wopener(f, mode)
640 640
641 641 def transaction(self):
642 642 # save dirstate for undo
643 643 try:
644 644 ds = self.opener("dirstate").read()
645 645 except IOError:
646 646 ds = ""
647 647 self.opener("undo.dirstate", "w").write(ds)
648 648
649 649 return transaction.transaction(self.ui.warn,
650 650 self.opener, self.join("journal"),
651 651 self.join("undo"))
652 652
653 653 def recover(self):
654 654 lock = self.lock()
655 655 if os.path.exists(self.join("journal")):
656 656 self.ui.status("rolling back interrupted transaction\n")
657 657 return transaction.rollback(self.opener, self.join("journal"))
658 658 else:
659 659 self.ui.warn("no interrupted transaction available\n")
660 660
661 661 def undo(self):
662 662 lock = self.lock()
663 663 if os.path.exists(self.join("undo")):
664 664 self.ui.status("rolling back last transaction\n")
665 665 transaction.rollback(self.opener, self.join("undo"))
666 666 self.dirstate = None
667 667 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
668 668 self.dirstate = dirstate(self.opener, self.ui, self.root)
669 669 else:
670 670 self.ui.warn("no undo information available\n")
671 671
672 672 def lock(self, wait = 1):
673 673 try:
674 674 return lock.lock(self.join("lock"), 0)
675 675 except lock.LockHeld, inst:
676 676 if wait:
677 677 self.ui.warn("waiting for lock held by %s\n" % inst.args[0])
678 678 return lock.lock(self.join("lock"), wait)
679 679 raise inst
680 680
681 681 def rawcommit(self, files, text, user, date, p1=None, p2=None):
682 682 orig_parent = self.dirstate.parents()[0] or nullid
683 683 p1 = p1 or self.dirstate.parents()[0] or nullid
684 684 p2 = p2 or self.dirstate.parents()[1] or nullid
685 685 c1 = self.changelog.read(p1)
686 686 c2 = self.changelog.read(p2)
687 687 m1 = self.manifest.read(c1[0])
688 688 mf1 = self.manifest.readflags(c1[0])
689 689 m2 = self.manifest.read(c2[0])
690 690
691 691 if orig_parent == p1:
692 692 update_dirstate = 1
693 693 else:
694 694 update_dirstate = 0
695 695
696 696 tr = self.transaction()
697 697 mm = m1.copy()
698 698 mfm = mf1.copy()
699 699 linkrev = self.changelog.count()
700 700 for f in files:
701 701 try:
702 702 t = self.wfile(f).read()
703 703 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
704 704 r = self.file(f)
705 705 mfm[f] = tm
706 706 mm[f] = r.add(t, {}, tr, linkrev,
707 707 m1.get(f, nullid), m2.get(f, nullid))
708 708 if update_dirstate:
709 709 self.dirstate.update([f], "n")
710 710 except IOError:
711 711 try:
712 712 del mm[f]
713 713 del mfm[f]
714 714 if update_dirstate:
715 715 self.dirstate.forget([f])
716 716 except:
717 717 # deleted from p2?
718 718 pass
719 719
720 720 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
721 721 user = user or self.ui.username()
722 722 n = self.changelog.add(mnode, files, text, tr, p1, p2, user, date)
723 723 tr.close()
724 724 if update_dirstate:
725 725 self.dirstate.setparents(n, nullid)
726 726
727 727 def commit(self, files = None, text = "", user = None, date = None):
728 728 commit = []
729 729 remove = []
730 730 if files:
731 731 for f in files:
732 732 s = self.dirstate.state(f)
733 733 if s in 'nmai':
734 734 commit.append(f)
735 735 elif s == 'r':
736 736 remove.append(f)
737 737 else:
738 738 self.ui.warn("%s not tracked!\n" % f)
739 739 else:
740 740 (c, a, d, u) = self.changes(None, None)
741 741 commit = c + a
742 742 remove = d
743 743
744 744 if not commit and not remove:
745 745 self.ui.status("nothing changed\n")
746 746 return
747 747
748 748 if not self.hook("precommit"):
749 749 return 1
750 750
751 751 p1, p2 = self.dirstate.parents()
752 752 c1 = self.changelog.read(p1)
753 753 c2 = self.changelog.read(p2)
754 754 m1 = self.manifest.read(c1[0])
755 755 mf1 = self.manifest.readflags(c1[0])
756 756 m2 = self.manifest.read(c2[0])
757 757 lock = self.lock()
758 758 tr = self.transaction()
759 759
760 760 # check in files
761 761 new = {}
762 762 linkrev = self.changelog.count()
763 763 commit.sort()
764 764 for f in commit:
765 765 self.ui.note(f + "\n")
766 766 try:
767 767 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
768 768 t = self.wfile(f).read()
769 769 except IOError:
770 770 self.ui.warn("trouble committing %s!\n" % f)
771 771 raise
772 772
773 773 meta = {}
774 774 cp = self.dirstate.copied(f)
775 775 if cp:
776 776 meta["copy"] = cp
777 777 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
778 778 self.ui.debug(" %s: copy %s:%s\n" % (f, cp, meta["copyrev"]))
779 779
780 780 r = self.file(f)
781 781 fp1 = m1.get(f, nullid)
782 782 fp2 = m2.get(f, nullid)
783 783 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
784 784
785 785 # update manifest
786 786 m1.update(new)
787 787 for f in remove:
788 788 if f in m1:
789 789 del m1[f]
790 790 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0], (new,remove))
791 791
792 792 # add changeset
793 793 new = new.keys()
794 794 new.sort()
795 795
796 796 if not text:
797 797 edittext = "\n" + "HG: manifest hash %s\n" % hex(mn)
798 798 edittext += "".join(["HG: changed %s\n" % f for f in new])
799 799 edittext += "".join(["HG: removed %s\n" % f for f in remove])
800 800 edittext = self.ui.edit(edittext)
801 801 if not edittext.rstrip():
802 802 return 1
803 803 text = edittext
804 804
805 805 user = user or self.ui.username()
806 806 n = self.changelog.add(mn, new, text, tr, p1, p2, user, date)
807 807
808 808 tr.close()
809 809
810 810 self.dirstate.setparents(n)
811 811 self.dirstate.update(new, "n")
812 812 self.dirstate.forget(remove)
813 813
814 814 if not self.hook("commit", node=hex(n)):
815 815 return 1
816 816
817 817 def changes(self, node1, node2, files=None):
818 818 mf2, u = None, []
819 819
820 820 def fcmp(fn, mf):
821 821 t1 = self.wfile(fn).read()
822 822 t2 = self.file(fn).revision(mf[fn])
823 823 return cmp(t1, t2)
824 824
825 825 # are we comparing the working directory?
826 826 if not node2:
827 827 l, c, a, d, u = self.dirstate.changes(files, self.ignore)
828 828
829 829 # are we comparing working dir against its parent?
830 830 if not node1:
831 831 if l:
832 832 # do a full compare of any files that might have changed
833 833 change = self.changelog.read(self.dirstate.parents()[0])
834 834 mf2 = self.manifest.read(change[0])
835 835 for f in l:
836 836 if fcmp(f, mf2):
837 837 c.append(f)
838 838
839 839 for l in c, a, d, u:
840 840 l.sort()
841 841
842 842 return (c, a, d, u)
843 843
844 844 # are we comparing working dir against non-tip?
845 845 # generate a pseudo-manifest for the working dir
846 846 if not node2:
847 847 if not mf2:
848 848 change = self.changelog.read(self.dirstate.parents()[0])
849 849 mf2 = self.manifest.read(change[0]).copy()
850 850 for f in a + c + l:
851 851 mf2[f] = ""
852 852 for f in d:
853 853 if f in mf2: del mf2[f]
854 854 else:
855 855 change = self.changelog.read(node2)
856 856 mf2 = self.manifest.read(change[0])
857 857
858 858 # flush lists from dirstate before comparing manifests
859 859 c, a = [], []
860 860
861 861 change = self.changelog.read(node1)
862 862 mf1 = self.manifest.read(change[0]).copy()
863 863
864 864 for fn in mf2:
865 865 if mf1.has_key(fn):
866 866 if mf1[fn] != mf2[fn]:
867 867 if mf2[fn] != "" or fcmp(fn, mf1):
868 868 c.append(fn)
869 869 del mf1[fn]
870 870 else:
871 871 a.append(fn)
872 872
873 873 d = mf1.keys()
874 874
875 875 for l in c, a, d, u:
876 876 l.sort()
877 877
878 878 return (c, a, d, u)
879 879
880 880 def add(self, list):
881 881 for f in list:
882 882 p = self.wjoin(f)
883 883 if not os.path.exists(p):
884 884 self.ui.warn("%s does not exist!\n" % f)
885 885 elif not os.path.isfile(p):
886 886 self.ui.warn("%s not added: mercurial only supports files currently\n" % f)
887 887 elif self.dirstate.state(f) == 'n':
888 888 self.ui.warn("%s already tracked!\n" % f)
889 889 else:
890 890 self.dirstate.update([f], "a")
891 891
892 892 def forget(self, list):
893 893 for f in list:
894 894 if self.dirstate.state(f) not in 'ai':
895 895 self.ui.warn("%s not added!\n" % f)
896 896 else:
897 897 self.dirstate.forget([f])
898 898
899 899 def remove(self, list):
900 900 for f in list:
901 901 p = self.wjoin(f)
902 902 if os.path.exists(p):
903 903 self.ui.warn("%s still exists!\n" % f)
904 904 elif self.dirstate.state(f) == 'a':
905 905 self.ui.warn("%s never committed!\n" % f)
906 906 self.dirstate.forget([f])
907 907 elif f not in self.dirstate:
908 908 self.ui.warn("%s not tracked!\n" % f)
909 909 else:
910 910 self.dirstate.update([f], "r")
911 911
912 912 def copy(self, source, dest):
913 913 p = self.wjoin(dest)
914 914 if not os.path.exists(dest):
915 915 self.ui.warn("%s does not exist!\n" % dest)
916 916 elif not os.path.isfile(dest):
917 917 self.ui.warn("copy failed: %s is not a file\n" % dest)
918 918 else:
919 919 if self.dirstate.state(dest) == '?':
920 920 self.dirstate.update([dest], "a")
921 921 self.dirstate.copy(source, dest)
922 922
923 923 def heads(self):
924 924 return self.changelog.heads()
925 925
926 926 def branches(self, nodes):
927 927 if not nodes: nodes = [self.changelog.tip()]
928 928 b = []
929 929 for n in nodes:
930 930 t = n
931 931 while n:
932 932 p = self.changelog.parents(n)
933 933 if p[1] != nullid or p[0] == nullid:
934 934 b.append((t, n, p[0], p[1]))
935 935 break
936 936 n = p[0]
937 937 return b
938 938
939 939 def between(self, pairs):
940 940 r = []
941 941
942 942 for top, bottom in pairs:
943 943 n, l, i = top, [], 0
944 944 f = 1
945 945
946 946 while n != bottom:
947 947 p = self.changelog.parents(n)[0]
948 948 if i == f:
949 949 l.append(n)
950 950 f = f * 2
951 951 n = p
952 952 i += 1
953 953
954 954 r.append(l)
955 955
956 956 return r
957 957
958 958 def newer(self, nodes):
959 959 m = {}
960 960 nl = []
961 961 pm = {}
962 962 cl = self.changelog
963 963 t = l = cl.count()
964 964
965 965 # find the lowest numbered node
966 966 for n in nodes:
967 967 l = min(l, cl.rev(n))
968 968 m[n] = 1
969 969
970 970 for i in xrange(l, t):
971 971 n = cl.node(i)
972 972 if n in m: # explicitly listed
973 973 pm[n] = 1
974 974 nl.append(n)
975 975 continue
976 976 for p in cl.parents(n):
977 977 if p in pm: # parent listed
978 978 pm[n] = 1
979 979 nl.append(n)
980 980 break
981 981
982 982 return nl
983 983
984 984 def findincoming(self, remote, base={}):
985 985 m = self.changelog.nodemap
986 986 search = []
987 987 fetch = []
988 988 seen = {}
989 989 seenbranch = {}
990 990
991 991 # assume we're closer to the tip than the root
992 992 # and start by examining the heads
993 993 self.ui.status("searching for changes\n")
994 994 heads = remote.heads()
995 995 unknown = []
996 996 for h in heads:
997 997 if h not in m:
998 998 unknown.append(h)
999 999 else:
1000 1000 base[h] = 1
1001 1001
1002 1002 if not unknown:
1003 1003 return None
1004 1004
1005 1005 rep = {}
1006 1006 reqcnt = 0
1007 1007
1008 1008 # search through remote branches
1009 1009 # a 'branch' here is a linear segment of history, with four parts:
1010 1010 # head, root, first parent, second parent
1011 1011 # (a branch always has two parents (or none) by definition)
1012 1012 unknown = remote.branches(unknown)
1013 1013 while unknown:
1014 1014 r = []
1015 1015 while unknown:
1016 1016 n = unknown.pop(0)
1017 1017 if n[0] in seen:
1018 1018 continue
1019 1019
1020 1020 self.ui.debug("examining %s:%s\n" % (short(n[0]), short(n[1])))
1021 1021 if n[0] == nullid:
1022 1022 break
1023 1023 if n in seenbranch:
1024 1024 self.ui.debug("branch already found\n")
1025 1025 continue
1026 1026 if n[1] and n[1] in m: # do we know the base?
1027 1027 self.ui.debug("found incomplete branch %s:%s\n"
1028 1028 % (short(n[0]), short(n[1])))
1029 1029 search.append(n) # schedule branch range for scanning
1030 1030 seenbranch[n] = 1
1031 1031 else:
1032 1032 if n[1] not in seen and n[1] not in fetch:
1033 1033 if n[2] in m and n[3] in m:
1034 1034 self.ui.debug("found new changeset %s\n" %
1035 1035 short(n[1]))
1036 1036 fetch.append(n[1]) # earliest unknown
1037 1037 base[n[2]] = 1 # latest known
1038 1038 continue
1039 1039
1040 1040 for a in n[2:4]:
1041 1041 if a not in rep:
1042 1042 r.append(a)
1043 1043 rep[a] = 1
1044 1044
1045 1045 seen[n[0]] = 1
1046 1046
1047 1047 if r:
1048 1048 reqcnt += 1
1049 1049 self.ui.debug("request %d: %s\n" %
1050 1050 (reqcnt, " ".join(map(short, r))))
1051 1051 for p in range(0, len(r), 10):
1052 1052 for b in remote.branches(r[p:p+10]):
1053 1053 self.ui.debug("received %s:%s\n" %
1054 1054 (short(b[0]), short(b[1])))
1055 1055 if b[0] not in m and b[0] not in seen:
1056 1056 unknown.append(b)
1057 1057
1058 1058 # do binary search on the branches we found
1059 1059 while search:
1060 1060 n = search.pop(0)
1061 1061 reqcnt += 1
1062 1062 l = remote.between([(n[0], n[1])])[0]
1063 1063 l.append(n[1])
1064 1064 p = n[0]
1065 1065 f = 1
1066 1066 for i in l:
1067 1067 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1068 1068 if i in m:
1069 1069 if f <= 2:
1070 1070 self.ui.debug("found new branch changeset %s\n" %
1071 1071 short(p))
1072 1072 fetch.append(p)
1073 1073 base[i] = 1
1074 1074 else:
1075 1075 self.ui.debug("narrowed branch search to %s:%s\n"
1076 1076 % (short(p), short(i)))
1077 1077 search.append((p, i))
1078 1078 break
1079 1079 p, f = i, f * 2
1080 1080
1081 1081 # sanity check our fetch list
1082 1082 for f in fetch:
1083 1083 if f in m:
1084 1084 raise RepoError("already have changeset " + short(f[:4]))
1085 1085
1086 1086 if base.keys() == [nullid]:
1087 1087 self.ui.warn("warning: pulling from an unrelated repository!\n")
1088 1088
1089 1089 self.ui.note("adding new changesets starting at " +
1090 1090 " ".join([short(f) for f in fetch]) + "\n")
1091 1091
1092 1092 self.ui.debug("%d total queries\n" % reqcnt)
1093 1093
1094 1094 return fetch
1095 1095
1096 1096 def findoutgoing(self, remote):
1097 1097 base = {}
1098 1098 self.findincoming(remote, base)
1099 1099 remain = dict.fromkeys(self.changelog.nodemap)
1100 1100
1101 1101 # prune everything remote has from the tree
1102 1102 del remain[nullid]
1103 1103 remove = base.keys()
1104 1104 while remove:
1105 1105 n = remove.pop(0)
1106 1106 if n in remain:
1107 1107 del remain[n]
1108 1108 for p in self.changelog.parents(n):
1109 1109 remove.append(p)
1110 1110
1111 1111 # find every node whose parents have been pruned
1112 1112 subset = []
1113 1113 for n in remain:
1114 1114 p1, p2 = self.changelog.parents(n)
1115 1115 if p1 not in remain and p2 not in remain:
1116 1116 subset.append(n)
1117 1117
1118 1118 # this is the set of all roots we have to push
1119 1119 return subset
1120 1120
1121 1121 def pull(self, remote):
1122 1122 lock = self.lock()
1123 1123
1124 1124 # if we have an empty repo, fetch everything
1125 1125 if self.changelog.tip() == nullid:
1126 1126 self.ui.status("requesting all changes\n")
1127 1127 fetch = [nullid]
1128 1128 else:
1129 1129 fetch = self.findincoming(remote)
1130 1130
1131 1131 if not fetch:
1132 1132 self.ui.status("no changes found\n")
1133 1133 return 1
1134 1134
1135 1135 cg = remote.changegroup(fetch)
1136 1136 return self.addchangegroup(cg)
1137 1137
1138 1138 def push(self, remote):
1139 1139 lock = remote.lock()
1140 1140 update = self.findoutgoing(remote)
1141 1141 if not update:
1142 1142 self.ui.status("no changes found\n")
1143 1143 return 1
1144 1144
1145 1145 cg = self.changegroup(update)
1146 1146 return remote.addchangegroup(cg)
1147 1147
1148 1148 def changegroup(self, basenodes):
1149 1149 class genread:
1150 1150 def __init__(self, generator):
1151 1151 self.g = generator
1152 1152 self.buf = ""
1153 1153 def read(self, l):
1154 1154 while l > len(self.buf):
1155 1155 try:
1156 1156 self.buf += self.g.next()
1157 1157 except StopIteration:
1158 1158 break
1159 1159 d, self.buf = self.buf[:l], self.buf[l:]
1160 1160 return d
1161 1161
1162 1162 def gengroup():
1163 1163 nodes = self.newer(basenodes)
1164 1164
1165 1165 # construct the link map
1166 1166 linkmap = {}
1167 1167 for n in nodes:
1168 1168 linkmap[self.changelog.rev(n)] = n
1169 1169
1170 1170 # construct a list of all changed files
1171 1171 changed = {}
1172 1172 for n in nodes:
1173 1173 c = self.changelog.read(n)
1174 1174 for f in c[3]:
1175 1175 changed[f] = 1
1176 1176 changed = changed.keys()
1177 1177 changed.sort()
1178 1178
1179 1179 # the changegroup is changesets + manifests + all file revs
1180 1180 revs = [ self.changelog.rev(n) for n in nodes ]
1181 1181
1182 1182 for y in self.changelog.group(linkmap): yield y
1183 1183 for y in self.manifest.group(linkmap): yield y
1184 1184 for f in changed:
1185 1185 yield struct.pack(">l", len(f) + 4) + f
1186 1186 g = self.file(f).group(linkmap)
1187 1187 for y in g:
1188 1188 yield y
1189 1189
1190 1190 yield struct.pack(">l", 0)
1191 1191
1192 1192 return genread(gengroup())
1193 1193
1194 1194 def addchangegroup(self, source):
1195 1195
1196 1196 def getchunk():
1197 1197 d = source.read(4)
1198 1198 if not d: return ""
1199 1199 l = struct.unpack(">l", d)[0]
1200 1200 if l <= 4: return ""
1201 1201 return source.read(l - 4)
1202 1202
1203 1203 def getgroup():
1204 1204 while 1:
1205 1205 c = getchunk()
1206 1206 if not c: break
1207 1207 yield c
1208 1208
1209 1209 def csmap(x):
1210 1210 self.ui.debug("add changeset %s\n" % short(x))
1211 1211 return self.changelog.count()
1212 1212
1213 1213 def revmap(x):
1214 1214 return self.changelog.rev(x)
1215 1215
1216 1216 if not source: return
1217 1217 changesets = files = revisions = 0
1218 1218
1219 1219 tr = self.transaction()
1220 1220
1221 1221 # pull off the changeset group
1222 1222 self.ui.status("adding changesets\n")
1223 1223 co = self.changelog.tip()
1224 1224 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1225 1225 changesets = self.changelog.rev(cn) - self.changelog.rev(co)
1226 1226
1227 1227 # pull off the manifest group
1228 1228 self.ui.status("adding manifests\n")
1229 1229 mm = self.manifest.tip()
1230 1230 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1231 1231
1232 1232 # process the files
1233 1233 self.ui.status("adding file revisions\n")
1234 1234 while 1:
1235 1235 f = getchunk()
1236 1236 if not f: break
1237 1237 self.ui.debug("adding %s revisions\n" % f)
1238 1238 fl = self.file(f)
1239 1239 o = fl.count()
1240 1240 n = fl.addgroup(getgroup(), revmap, tr)
1241 1241 revisions += fl.count() - o
1242 1242 files += 1
1243 1243
1244 1244 self.ui.status(("modified %d files, added %d changesets" +
1245 1245 " and %d new revisions\n")
1246 1246 % (files, changesets, revisions))
1247 1247
1248 1248 tr.close()
1249 1249 return
1250 1250
1251 1251 def update(self, node, allow=False, force=False, choose=None,
1252 1252 moddirstate=True):
1253 1253 pl = self.dirstate.parents()
1254 1254 if not force and pl[1] != nullid:
1255 1255 self.ui.warn("aborting: outstanding uncommitted merges\n")
1256 1256 return
1257 1257
1258 1258 p1, p2 = pl[0], node
1259 1259 pa = self.changelog.ancestor(p1, p2)
1260 1260 m1n = self.changelog.read(p1)[0]
1261 1261 m2n = self.changelog.read(p2)[0]
1262 1262 man = self.manifest.ancestor(m1n, m2n)
1263 1263 m1 = self.manifest.read(m1n)
1264 1264 mf1 = self.manifest.readflags(m1n)
1265 1265 m2 = self.manifest.read(m2n)
1266 1266 mf2 = self.manifest.readflags(m2n)
1267 1267 ma = self.manifest.read(man)
1268 1268 mfa = self.manifest.readflags(man)
1269 1269
1270 1270 (c, a, d, u) = self.changes(None, None)
1271 1271
1272 1272 # is this a jump, or a merge? i.e. is there a linear path
1273 1273 # from p1 to p2?
1274 1274 linear_path = (pa == p1 or pa == p2)
1275 1275
1276 1276 # resolve the manifest to determine which files
1277 1277 # we care about merging
1278 1278 self.ui.note("resolving manifests\n")
1279 1279 self.ui.debug(" force %s allow %s moddirstate %s linear %s\n" %
1280 1280 (force, allow, moddirstate, linear_path))
1281 1281 self.ui.debug(" ancestor %s local %s remote %s\n" %
1282 1282 (short(man), short(m1n), short(m2n)))
1283 1283
1284 1284 merge = {}
1285 1285 get = {}
1286 1286 remove = []
1287 1287 mark = {}
1288 1288
1289 1289 # construct a working dir manifest
1290 1290 mw = m1.copy()
1291 1291 mfw = mf1.copy()
1292 1292 umap = dict.fromkeys(u)
1293 1293
1294 1294 for f in a + c + u:
1295 1295 mw[f] = ""
1296 1296 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1297 1297
1298 1298 for f in d:
1299 1299 if f in mw: del mw[f]
1300 1300
1301 1301 # If we're jumping between revisions (as opposed to merging),
1302 1302 # and if neither the working directory nor the target rev has
1303 1303 # the file, then we need to remove it from the dirstate, to
1304 1304 # prevent the dirstate from listing the file when it is no
1305 1305 # longer in the manifest.
1306 1306 if moddirstate and linear_path and f not in m2:
1307 1307 self.dirstate.forget((f,))
1308 1308
1309 1309 # Compare manifests
1310 1310 for f, n in mw.iteritems():
1311 1311 if choose and not choose(f): continue
1312 1312 if f in m2:
1313 1313 s = 0
1314 1314
1315 1315 # is the wfile new since m1, and match m2?
1316 1316 if f not in m1:
1317 1317 t1 = self.wfile(f).read()
1318 1318 t2 = self.file(f).revision(m2[f])
1319 1319 if cmp(t1, t2) == 0:
1320 1320 mark[f] = 1
1321 1321 n = m2[f]
1322 1322 del t1, t2
1323 1323
1324 1324 # are files different?
1325 1325 if n != m2[f]:
1326 1326 a = ma.get(f, nullid)
1327 1327 # are both different from the ancestor?
1328 1328 if n != a and m2[f] != a:
1329 1329 self.ui.debug(" %s versions differ, resolve\n" % f)
1330 1330 # merge executable bits
1331 1331 # "if we changed or they changed, change in merge"
1332 1332 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1333 1333 mode = ((a^b) | (a^c)) ^ a
1334 1334 merge[f] = (m1.get(f, nullid), m2[f], mode)
1335 1335 s = 1
1336 1336 # are we clobbering?
1337 1337 # is remote's version newer?
1338 1338 # or are we going back in time?
1339 1339 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1340 1340 self.ui.debug(" remote %s is newer, get\n" % f)
1341 1341 get[f] = m2[f]
1342 1342 s = 1
1343 1343 else:
1344 1344 mark[f] = 1
1345 1345 elif f in umap:
1346 1346 # this unknown file is the same as the checkout
1347 1347 get[f] = m2[f]
1348 1348
1349 1349 if not s and mfw[f] != mf2[f]:
1350 1350 if force:
1351 1351 self.ui.debug(" updating permissions for %s\n" % f)
1352 1352 util.set_exec(self.wjoin(f), mf2[f])
1353 1353 else:
1354 1354 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1355 1355 mode = ((a^b) | (a^c)) ^ a
1356 1356 if mode != b:
1357 1357 self.ui.debug(" updating permissions for %s\n" % f)
1358 1358 util.set_exec(self.wjoin(f), mode)
1359 1359 mark[f] = 1
1360 1360 del m2[f]
1361 1361 elif f in ma:
1362 1362 if n != ma[f]:
1363 1363 r = "d"
1364 1364 if not force and (linear_path or allow):
1365 1365 r = self.ui.prompt(
1366 1366 (" local changed %s which remote deleted\n" % f) +
1367 1367 "(k)eep or (d)elete?", "[kd]", "k")
1368 1368 if r == "d":
1369 1369 remove.append(f)
1370 1370 else:
1371 1371 self.ui.debug("other deleted %s\n" % f)
1372 1372 remove.append(f) # other deleted it
1373 1373 else:
1374 1374 if n == m1.get(f, nullid): # same as parent
1375 1375 if p2 == pa: # going backwards?
1376 1376 self.ui.debug("remote deleted %s\n" % f)
1377 1377 remove.append(f)
1378 1378 else:
1379 1379 self.ui.debug("local created %s, keeping\n" % f)
1380 1380 else:
1381 1381 self.ui.debug("working dir created %s, keeping\n" % f)
1382 1382
1383 1383 for f, n in m2.iteritems():
1384 1384 if choose and not choose(f): continue
1385 1385 if f[0] == "/": continue
1386 1386 if f in ma and n != ma[f]:
1387 1387 r = "k"
1388 1388 if not force and (linear_path or allow):
1389 1389 r = self.ui.prompt(
1390 1390 ("remote changed %s which local deleted\n" % f) +
1391 1391 "(k)eep or (d)elete?", "[kd]", "k")
1392 1392 if r == "k": get[f] = n
1393 1393 elif f not in ma:
1394 1394 self.ui.debug("remote created %s\n" % f)
1395 1395 get[f] = n
1396 1396 else:
1397 if force or p2 == pa: # going backwards?
1398 self.ui.debug("local deleted %s, recreating\n" % f)
1399 get[f] = n
1400 else:
1397 1401 self.ui.debug("local deleted %s\n" % f)
1398 if force:
1399 get[f] = n
1400 1402
1401 1403 del mw, m1, m2, ma
1402 1404
1403 1405 if force:
1404 1406 for f in merge:
1405 1407 get[f] = merge[f][1]
1406 1408 merge = {}
1407 1409
1408 1410 if linear_path:
1409 1411 # we don't need to do any magic, just jump to the new rev
1410 1412 mode = 'n'
1411 1413 p1, p2 = p2, nullid
1412 1414 else:
1413 1415 if not allow:
1414 1416 self.ui.status("this update spans a branch" +
1415 1417 " affecting the following files:\n")
1416 1418 fl = merge.keys() + get.keys()
1417 1419 fl.sort()
1418 1420 for f in fl:
1419 1421 cf = ""
1420 1422 if f in merge: cf = " (resolve)"
1421 1423 self.ui.status(" %s%s\n" % (f, cf))
1422 1424 self.ui.warn("aborting update spanning branches!\n")
1423 1425 self.ui.status("(use update -m to perform a branch merge)\n")
1424 1426 return 1
1425 1427 # we have to remember what files we needed to get/change
1426 1428 # because any file that's different from either one of its
1427 1429 # parents must be in the changeset
1428 1430 mode = 'm'
1429 1431 if moddirstate:
1430 1432 self.dirstate.update(mark.keys(), "m")
1431 1433
1432 1434 if moddirstate:
1433 1435 self.dirstate.setparents(p1, p2)
1434 1436
1435 1437 # get the files we don't need to change
1436 1438 files = get.keys()
1437 1439 files.sort()
1438 1440 for f in files:
1439 1441 if f[0] == "/": continue
1440 1442 self.ui.note("getting %s\n" % f)
1441 1443 t = self.file(f).read(get[f])
1442 1444 try:
1443 1445 self.wfile(f, "w").write(t)
1444 1446 except IOError:
1445 1447 os.makedirs(os.path.dirname(self.wjoin(f)))
1446 1448 self.wfile(f, "w").write(t)
1447 1449 util.set_exec(self.wjoin(f), mf2[f])
1448 1450 if moddirstate:
1449 1451 self.dirstate.update([f], mode)
1450 1452
1451 1453 # merge the tricky bits
1452 1454 files = merge.keys()
1453 1455 files.sort()
1454 1456 for f in files:
1455 1457 self.ui.status("merging %s\n" % f)
1456 1458 m, o, flag = merge[f]
1457 1459 self.merge3(f, m, o)
1458 1460 util.set_exec(self.wjoin(f), flag)
1459 1461 if moddirstate:
1460 1462 self.dirstate.update([f], 'm')
1461 1463
1462 1464 for f in remove:
1463 1465 self.ui.note("removing %s\n" % f)
1464 1466 os.unlink(f)
1465 1467 # try removing directories that might now be empty
1466 1468 try: os.removedirs(os.path.dirname(f))
1467 1469 except: pass
1468 1470 if moddirstate:
1469 1471 if mode == 'n':
1470 1472 self.dirstate.forget(remove)
1471 1473 else:
1472 1474 self.dirstate.update(remove, 'r')
1473 1475
1474 1476 def merge3(self, fn, my, other):
1475 1477 """perform a 3-way merge in the working directory"""
1476 1478
1477 1479 def temp(prefix, node):
1478 1480 pre = "%s~%s." % (os.path.basename(fn), prefix)
1479 1481 (fd, name) = tempfile.mkstemp("", pre)
1480 1482 f = os.fdopen(fd, "wb")
1481 1483 f.write(fl.revision(node))
1482 1484 f.close()
1483 1485 return name
1484 1486
1485 1487 fl = self.file(fn)
1486 1488 base = fl.ancestor(my, other)
1487 1489 a = self.wjoin(fn)
1488 1490 b = temp("base", base)
1489 1491 c = temp("other", other)
1490 1492
1491 1493 self.ui.note("resolving %s\n" % fn)
1492 1494 self.ui.debug("file %s: other %s ancestor %s\n" %
1493 1495 (fn, short(other), short(base)))
1494 1496
1495 1497 cmd = self.ui.config("ui", "merge") or \
1496 1498 os.environ.get("HGMERGE", "hgmerge")
1497 1499 r = os.system("%s %s %s %s" % (cmd, a, b, c))
1498 1500 if r:
1499 1501 self.ui.warn("merging %s failed!\n" % fn)
1500 1502
1501 1503 os.unlink(b)
1502 1504 os.unlink(c)
1503 1505
1504 1506 def verify(self):
1505 1507 filelinkrevs = {}
1506 1508 filenodes = {}
1507 1509 changesets = revisions = files = 0
1508 1510 errors = 0
1509 1511
1510 1512 seen = {}
1511 1513 self.ui.status("checking changesets\n")
1512 1514 for i in range(self.changelog.count()):
1513 1515 changesets += 1
1514 1516 n = self.changelog.node(i)
1515 1517 if n in seen:
1516 1518 self.ui.warn("duplicate changeset at revision %d\n" % i)
1517 1519 errors += 1
1518 1520 seen[n] = 1
1519 1521
1520 1522 for p in self.changelog.parents(n):
1521 1523 if p not in self.changelog.nodemap:
1522 1524 self.ui.warn("changeset %s has unknown parent %s\n" %
1523 1525 (short(n), short(p)))
1524 1526 errors += 1
1525 1527 try:
1526 1528 changes = self.changelog.read(n)
1527 1529 except Exception, inst:
1528 1530 self.ui.warn("unpacking changeset %s: %s\n" % (short(n), inst))
1529 1531 errors += 1
1530 1532
1531 1533 for f in changes[3]:
1532 1534 filelinkrevs.setdefault(f, []).append(i)
1533 1535
1534 1536 seen = {}
1535 1537 self.ui.status("checking manifests\n")
1536 1538 for i in range(self.manifest.count()):
1537 1539 n = self.manifest.node(i)
1538 1540 if n in seen:
1539 1541 self.ui.warn("duplicate manifest at revision %d\n" % i)
1540 1542 errors += 1
1541 1543 seen[n] = 1
1542 1544
1543 1545 for p in self.manifest.parents(n):
1544 1546 if p not in self.manifest.nodemap:
1545 1547 self.ui.warn("manifest %s has unknown parent %s\n" %
1546 1548 (short(n), short(p)))
1547 1549 errors += 1
1548 1550
1549 1551 try:
1550 1552 delta = mdiff.patchtext(self.manifest.delta(n))
1551 1553 except KeyboardInterrupt:
1552 1554 self.ui.warn("aborted")
1553 1555 sys.exit(0)
1554 1556 except Exception, inst:
1555 1557 self.ui.warn("unpacking manifest %s: %s\n"
1556 1558 % (short(n), inst))
1557 1559 errors += 1
1558 1560
1559 1561 ff = [ l.split('\0') for l in delta.splitlines() ]
1560 1562 for f, fn in ff:
1561 1563 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1562 1564
1563 1565 self.ui.status("crosschecking files in changesets and manifests\n")
1564 1566 for f in filenodes:
1565 1567 if f not in filelinkrevs:
1566 1568 self.ui.warn("file %s in manifest but not in changesets\n" % f)
1567 1569 errors += 1
1568 1570
1569 1571 for f in filelinkrevs:
1570 1572 if f not in filenodes:
1571 1573 self.ui.warn("file %s in changeset but not in manifest\n" % f)
1572 1574 errors += 1
1573 1575
1574 1576 self.ui.status("checking files\n")
1575 1577 ff = filenodes.keys()
1576 1578 ff.sort()
1577 1579 for f in ff:
1578 1580 if f == "/dev/null": continue
1579 1581 files += 1
1580 1582 fl = self.file(f)
1581 1583 nodes = { nullid: 1 }
1582 1584 seen = {}
1583 1585 for i in range(fl.count()):
1584 1586 revisions += 1
1585 1587 n = fl.node(i)
1586 1588
1587 1589 if n in seen:
1588 1590 self.ui.warn("%s: duplicate revision %d\n" % (f, i))
1589 1591 errors += 1
1590 1592
1591 1593 if n not in filenodes[f]:
1592 1594 self.ui.warn("%s: %d:%s not in manifests\n"
1593 1595 % (f, i, short(n)))
1594 1596 errors += 1
1595 1597 else:
1596 1598 del filenodes[f][n]
1597 1599
1598 1600 flr = fl.linkrev(n)
1599 1601 if flr not in filelinkrevs[f]:
1600 1602 self.ui.warn("%s:%s points to unexpected changeset %d\n"
1601 1603 % (f, short(n), fl.linkrev(n)))
1602 1604 errors += 1
1603 1605 else:
1604 1606 filelinkrevs[f].remove(flr)
1605 1607
1606 1608 # verify contents
1607 1609 try:
1608 1610 t = fl.read(n)
1609 1611 except Exception, inst:
1610 1612 self.ui.warn("unpacking file %s %s: %s\n"
1611 1613 % (f, short(n), inst))
1612 1614 errors += 1
1613 1615
1614 1616 # verify parents
1615 1617 (p1, p2) = fl.parents(n)
1616 1618 if p1 not in nodes:
1617 1619 self.ui.warn("file %s:%s unknown parent 1 %s" %
1618 1620 (f, short(n), short(p1)))
1619 1621 errors += 1
1620 1622 if p2 not in nodes:
1621 1623 self.ui.warn("file %s:%s unknown parent 2 %s" %
1622 1624 (f, short(n), short(p1)))
1623 1625 errors += 1
1624 1626 nodes[n] = 1
1625 1627
1626 1628 # cross-check
1627 1629 for node in filenodes[f]:
1628 1630 self.ui.warn("node %s in manifests not in %s\n"
1629 1631 % (hex(n), f))
1630 1632 errors += 1
1631 1633
1632 1634 self.ui.status("%d files, %d changesets, %d total revisions\n" %
1633 1635 (files, changesets, revisions))
1634 1636
1635 1637 if errors:
1636 1638 self.ui.warn("%d integrity errors encountered!\n" % errors)
1637 1639 return 1
1638 1640
1639 1641 class httprepository:
1640 1642 def __init__(self, ui, path):
1641 1643 self.url = path
1642 1644 self.ui = ui
1643 1645 no_list = [ "localhost", "127.0.0.1" ]
1644 1646 host = ui.config("http_proxy", "host")
1645 1647 if host is None:
1646 1648 host = os.environ.get("http_proxy")
1647 1649 if host and host.startswith('http://'):
1648 1650 host = host[7:]
1649 1651 user = ui.config("http_proxy", "user")
1650 1652 passwd = ui.config("http_proxy", "passwd")
1651 1653 no = ui.config("http_proxy", "no")
1652 1654 if no is None:
1653 1655 no = os.environ.get("no_proxy")
1654 1656 if no:
1655 1657 no_list = no_list + no.split(",")
1656 1658
1657 1659 no_proxy = 0
1658 1660 for h in no_list:
1659 1661 if (path.startswith("http://" + h + "/") or
1660 1662 path.startswith("http://" + h + ":") or
1661 1663 path == "http://" + h):
1662 1664 no_proxy = 1
1663 1665
1664 1666 # Note: urllib2 takes proxy values from the environment and those will
1665 1667 # take precedence
1666 1668 for env in ["HTTP_PROXY", "http_proxy", "no_proxy"]:
1667 1669 if os.environ.has_key(env):
1668 1670 del os.environ[env]
1669 1671
1670 1672 proxy_handler = urllib2.BaseHandler()
1671 1673 if host and not no_proxy:
1672 1674 proxy_handler = urllib2.ProxyHandler({"http" : "http://" + host})
1673 1675
1674 1676 authinfo = None
1675 1677 if user and passwd:
1676 1678 passmgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
1677 1679 passmgr.add_password(None, host, user, passwd)
1678 1680 authinfo = urllib2.ProxyBasicAuthHandler(passmgr)
1679 1681
1680 1682 opener = urllib2.build_opener(proxy_handler, authinfo)
1681 1683 urllib2.install_opener(opener)
1682 1684
1683 1685 def dev(self):
1684 1686 return -1
1685 1687
1686 1688 def do_cmd(self, cmd, **args):
1687 1689 self.ui.debug("sending %s command\n" % cmd)
1688 1690 q = {"cmd": cmd}
1689 1691 q.update(args)
1690 1692 qs = urllib.urlencode(q)
1691 1693 cu = "%s?%s" % (self.url, qs)
1692 1694 return urllib2.urlopen(cu)
1693 1695
1694 1696 def heads(self):
1695 1697 d = self.do_cmd("heads").read()
1696 1698 try:
1697 1699 return map(bin, d[:-1].split(" "))
1698 1700 except:
1699 1701 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
1700 1702 raise
1701 1703
1702 1704 def branches(self, nodes):
1703 1705 n = " ".join(map(hex, nodes))
1704 1706 d = self.do_cmd("branches", nodes=n).read()
1705 1707 try:
1706 1708 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
1707 1709 return br
1708 1710 except:
1709 1711 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
1710 1712 raise
1711 1713
1712 1714 def between(self, pairs):
1713 1715 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
1714 1716 d = self.do_cmd("between", pairs=n).read()
1715 1717 try:
1716 1718 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
1717 1719 return p
1718 1720 except:
1719 1721 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
1720 1722 raise
1721 1723
1722 1724 def changegroup(self, nodes):
1723 1725 n = " ".join(map(hex, nodes))
1724 1726 f = self.do_cmd("changegroup", roots=n)
1725 1727 bytes = 0
1726 1728
1727 1729 class zread:
1728 1730 def __init__(self, f):
1729 1731 self.zd = zlib.decompressobj()
1730 1732 self.f = f
1731 1733 self.buf = ""
1732 1734 def read(self, l):
1733 1735 while l > len(self.buf):
1734 1736 r = f.read(4096)
1735 1737 if r:
1736 1738 self.buf += self.zd.decompress(r)
1737 1739 else:
1738 1740 self.buf += self.zd.flush()
1739 1741 break
1740 1742 d, self.buf = self.buf[:l], self.buf[l:]
1741 1743 return d
1742 1744
1743 1745 return zread(f)
1744 1746
1745 1747 class remotelock:
1746 1748 def __init__(self, repo):
1747 1749 self.repo = repo
1748 1750 def release(self):
1749 1751 self.repo.unlock()
1750 1752 self.repo = None
1751 1753 def __del__(self):
1752 1754 if self.repo:
1753 1755 self.release()
1754 1756
1755 1757 class sshrepository:
1756 1758 def __init__(self, ui, path):
1757 1759 self.url = path
1758 1760 self.ui = ui
1759 1761
1760 1762 m = re.match(r'ssh://(([^@]+)@)?([^:/]+)(:(\d+))?(/(.*))?', path)
1761 1763 if not m:
1762 1764 raise RepoError("couldn't parse destination %s\n" % path)
1763 1765
1764 1766 self.user = m.group(2)
1765 1767 self.host = m.group(3)
1766 1768 self.port = m.group(5)
1767 1769 self.path = m.group(7)
1768 1770
1769 1771 args = self.user and ("%s@%s" % (self.user, self.host)) or self.host
1770 1772 args = self.port and ("%s -p %s") % (args, self.port) or args
1771 1773 path = self.path or ""
1772 1774
1773 1775 cmd = "ssh %s 'hg -R %s serve --stdio'"
1774 1776 cmd = cmd % (args, path)
1775 1777
1776 1778 self.pipeo, self.pipei, self.pipee = os.popen3(cmd)
1777 1779
1778 1780 def readerr(self):
1779 1781 while 1:
1780 1782 r,w,x = select.select([self.pipee], [], [], 0)
1781 1783 if not r: break
1782 1784 l = self.pipee.readline()
1783 1785 if not l: break
1784 1786 self.ui.status("remote: ", l)
1785 1787
1786 1788 def __del__(self):
1787 1789 self.pipeo.close()
1788 1790 self.pipei.close()
1789 1791 for l in self.pipee:
1790 1792 self.ui.status("remote: ", l)
1791 1793 self.pipee.close()
1792 1794
1793 1795 def dev(self):
1794 1796 return -1
1795 1797
1796 1798 def do_cmd(self, cmd, **args):
1797 1799 self.ui.debug("sending %s command\n" % cmd)
1798 1800 self.pipeo.write("%s\n" % cmd)
1799 1801 for k, v in args.items():
1800 1802 self.pipeo.write("%s %d\n" % (k, len(v)))
1801 1803 self.pipeo.write(v)
1802 1804 self.pipeo.flush()
1803 1805
1804 1806 return self.pipei
1805 1807
1806 1808 def call(self, cmd, **args):
1807 1809 r = self.do_cmd(cmd, **args)
1808 1810 l = r.readline()
1809 1811 self.readerr()
1810 1812 try:
1811 1813 l = int(l)
1812 1814 except:
1813 1815 raise RepoError("unexpected response '%s'" % l)
1814 1816 return r.read(l)
1815 1817
1816 1818 def lock(self):
1817 1819 self.call("lock")
1818 1820 return remotelock(self)
1819 1821
1820 1822 def unlock(self):
1821 1823 self.call("unlock")
1822 1824
1823 1825 def heads(self):
1824 1826 d = self.call("heads")
1825 1827 try:
1826 1828 return map(bin, d[:-1].split(" "))
1827 1829 except:
1828 1830 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
1829 1831
1830 1832 def branches(self, nodes):
1831 1833 n = " ".join(map(hex, nodes))
1832 1834 d = self.call("branches", nodes=n)
1833 1835 try:
1834 1836 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
1835 1837 return br
1836 1838 except:
1837 1839 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
1838 1840
1839 1841 def between(self, pairs):
1840 1842 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
1841 1843 d = self.call("between", pairs=n)
1842 1844 try:
1843 1845 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
1844 1846 return p
1845 1847 except:
1846 1848 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
1847 1849
1848 1850 def changegroup(self, nodes):
1849 1851 n = " ".join(map(hex, nodes))
1850 1852 f = self.do_cmd("changegroup", roots=n)
1851 1853 return self.pipei
1852 1854
1853 1855 def addchangegroup(self, cg):
1854 1856 d = self.call("addchangegroup")
1855 1857 if d:
1856 1858 raise RepoError("push refused: %s", d)
1857 1859
1858 1860 while 1:
1859 1861 d = cg.read(4096)
1860 1862 if not d: break
1861 1863 self.pipeo.write(d)
1862 1864 self.readerr()
1863 1865
1864 1866 self.pipeo.flush()
1865 1867
1866 1868 self.readerr()
1867 1869 l = int(self.pipei.readline())
1868 1870 return self.pipei.read(l) != ""
1869 1871
1870 1872 def repository(ui, path=None, create=0):
1871 1873 if path:
1872 1874 if path.startswith("http://"):
1873 1875 return httprepository(ui, path)
1874 1876 if path.startswith("hg://"):
1875 1877 return httprepository(ui, path.replace("hg://", "http://"))
1876 1878 if path.startswith("old-http://"):
1877 1879 return localrepository(ui, path.replace("old-http://", "http://"))
1878 1880 if path.startswith("ssh://"):
1879 1881 return sshrepository(ui, path)
1880 1882
1881 1883 return localrepository(ui, path, create)
General Comments 0
You need to be logged in to leave comments. Login now