##// END OF EJS Templates
Fix zombie files in merge...
maf46@burn.cl.cam.ac.uk -
r616:d45d1c90 default
parent child Browse files
Show More
@@ -0,0 +1,46 b''
1 #!/bin/sh -x
2
3 cat <<'EOF' > merge
4 #!/bin/sh
5 echo merging for `basename $1`
6 EOF
7 chmod +x merge
8 export HGMERGE=./merge
9
10 mkdir A1
11 cd A1
12 hg init
13 echo This is file foo1 > foo
14 echo This is file bar1 > bar
15 hg add foo bar
16 hg commit -t "commit text" -d "0 0" -u user
17
18 cd ..
19 hg clone A1 B1
20
21 cd A1
22 rm bar
23 hg remove bar
24 hg commit -t "commit test" -d "0 0" -u user
25
26 cd ../B1
27 echo This is file foo22 > foo
28 hg commit -t "commit test" -d "0 0" -u user
29
30 cd ..
31 hg clone A1 A2
32 hg clone B1 B2
33
34 cd A1
35 hg pull ../B1
36 hg update -m
37 hg commit -t "commit test" -d "0 0" -u user
38 echo bar should remain deleted.
39 hg manifest
40
41 cd ../B2
42 hg pull ../A2
43 hg update -m
44 hg commit -t "commit test" -d "0 0" -u user
45 echo bar should remain deleted.
46 hg manifest
@@ -0,0 +1,53 b''
1 + cat
2 + chmod +x merge
3 + export HGMERGE=./merge
4 + HGMERGE=./merge
5 + mkdir A1
6 + cd A1
7 + hg init
8 + echo This is file foo1
9 + echo This is file bar1
10 + hg add foo bar
11 + hg commit -t 'commit text' -d '0 0' -u user
12 + cd ..
13 + hg clone A1 B1
14 + cd A1
15 + rm bar
16 + hg remove bar
17 + hg commit -t 'commit test' -d '0 0' -u user
18 + cd ../B1
19 + echo This is file foo22
20 + hg commit -t 'commit test' -d '0 0' -u user
21 + cd ..
22 + hg clone A1 A2
23 + hg clone B1 B2
24 + cd A1
25 + hg pull ../B1
26 pulling from ../B1
27 searching for changes
28 adding changesets
29 adding manifests
30 adding file revisions
31 modified 1 files, added 1 changesets and 1 new revisions
32 (run 'hg update' to get a working copy)
33 + hg update -m
34 + hg commit -t 'commit test' -d '0 0' -u user
35 + echo bar should remain deleted.
36 bar should remain deleted.
37 + hg manifest
38 6b70e9e451a5a33faad7bbebe627e46b937b7364 644 foo
39 + cd ../B2
40 + hg pull ../A2
41 pulling from ../A2
42 searching for changes
43 adding changesets
44 adding manifests
45 adding file revisions
46 modified 0 files, added 1 changesets and 0 new revisions
47 (run 'hg update' to get a working copy)
48 + hg update -m
49 + hg commit -t 'commit test' -d '0 0' -u user
50 + echo bar should remain deleted.
51 bar should remain deleted.
52 + hg manifest
53 6b70e9e451a5a33faad7bbebe627e46b937b7364 644 foo
@@ -1,1558 +1,1560 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import sys, struct, os
9 9 import util
10 10 from revlog import *
11 11 from demandload import *
12 12 demandload(globals(), "re lock urllib urllib2 transaction time socket")
13 13 demandload(globals(), "tempfile httprangereader bdiff")
14 14
15 15 class filelog(revlog):
16 16 def __init__(self, opener, path):
17 17 revlog.__init__(self, opener,
18 18 os.path.join("data", path + ".i"),
19 19 os.path.join("data", path + ".d"))
20 20
21 21 def read(self, node):
22 22 t = self.revision(node)
23 23 if t[:2] != '\1\n':
24 24 return t
25 25 s = t.find('\1\n', 2)
26 26 return t[s+2:]
27 27
28 28 def readmeta(self, node):
29 29 t = self.revision(node)
30 30 if t[:2] != '\1\n':
31 31 return t
32 32 s = t.find('\1\n', 2)
33 33 mt = t[2:s]
34 34 for l in mt.splitlines():
35 35 k, v = l.split(": ", 1)
36 36 m[k] = v
37 37 return m
38 38
39 39 def add(self, text, meta, transaction, link, p1=None, p2=None):
40 40 if meta or text[:2] == '\1\n':
41 41 mt = ""
42 42 if meta:
43 43 mt = [ "%s: %s\n" % (k, v) for k,v in meta.items() ]
44 44 text = "\1\n" + "".join(mt) + "\1\n" + text
45 45 return self.addrevision(text, transaction, link, p1, p2)
46 46
47 47 def annotate(self, node):
48 48
49 49 def decorate(text, rev):
50 50 return ([rev] * len(text.splitlines()), text)
51 51
52 52 def pair(parent, child):
53 53 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
54 54 child[0][b1:b2] = parent[0][a1:a2]
55 55 return child
56 56
57 57 # find all ancestors
58 58 needed = {node:1}
59 59 visit = [node]
60 60 while visit:
61 61 n = visit.pop(0)
62 62 for p in self.parents(n):
63 63 if p not in needed:
64 64 needed[p] = 1
65 65 visit.append(p)
66 66 else:
67 67 # count how many times we'll use this
68 68 needed[p] += 1
69 69
70 70 # sort by revision which is a topological order
71 71 visit = [ (self.rev(n), n) for n in needed.keys() ]
72 72 visit.sort()
73 73 hist = {}
74 74
75 75 for r,n in visit:
76 76 curr = decorate(self.read(n), self.linkrev(n))
77 77 for p in self.parents(n):
78 78 if p != nullid:
79 79 curr = pair(hist[p], curr)
80 80 # trim the history of unneeded revs
81 81 needed[p] -= 1
82 82 if not needed[p]:
83 83 del hist[p]
84 84 hist[n] = curr
85 85
86 86 return zip(hist[n][0], hist[n][1].splitlines(1))
87 87
88 88 class manifest(revlog):
89 89 def __init__(self, opener):
90 90 self.mapcache = None
91 91 self.listcache = None
92 92 self.addlist = None
93 93 revlog.__init__(self, opener, "00manifest.i", "00manifest.d")
94 94
95 95 def read(self, node):
96 96 if node == nullid: return {} # don't upset local cache
97 97 if self.mapcache and self.mapcache[0] == node:
98 98 return self.mapcache[1]
99 99 text = self.revision(node)
100 100 map = {}
101 101 flag = {}
102 102 self.listcache = (text, text.splitlines(1))
103 103 for l in self.listcache[1]:
104 104 (f, n) = l.split('\0')
105 105 map[f] = bin(n[:40])
106 106 flag[f] = (n[40:-1] == "x")
107 107 self.mapcache = (node, map, flag)
108 108 return map
109 109
110 110 def readflags(self, node):
111 111 if node == nullid: return {} # don't upset local cache
112 112 if not self.mapcache or self.mapcache[0] != node:
113 113 self.read(node)
114 114 return self.mapcache[2]
115 115
116 116 def diff(self, a, b):
117 117 # this is sneaky, as we're not actually using a and b
118 118 if self.listcache and self.addlist and self.listcache[0] == a:
119 119 d = mdiff.diff(self.listcache[1], self.addlist, 1)
120 120 if mdiff.patch(a, d) != b:
121 121 sys.stderr.write("*** sortdiff failed, falling back ***\n")
122 122 return mdiff.textdiff(a, b)
123 123 return d
124 124 else:
125 125 return mdiff.textdiff(a, b)
126 126
127 127 def add(self, map, flags, transaction, link, p1=None, p2=None):
128 128 files = map.keys()
129 129 files.sort()
130 130
131 131 self.addlist = ["%s\000%s%s\n" %
132 132 (f, hex(map[f]), flags[f] and "x" or '')
133 133 for f in files]
134 134 text = "".join(self.addlist)
135 135
136 136 n = self.addrevision(text, transaction, link, p1, p2)
137 137 self.mapcache = (n, map, flags)
138 138 self.listcache = (text, self.addlist)
139 139 self.addlist = None
140 140
141 141 return n
142 142
143 143 class changelog(revlog):
144 144 def __init__(self, opener):
145 145 revlog.__init__(self, opener, "00changelog.i", "00changelog.d")
146 146
147 147 def extract(self, text):
148 148 if not text:
149 149 return (nullid, "", "0", [], "")
150 150 last = text.index("\n\n")
151 151 desc = text[last + 2:]
152 152 l = text[:last].splitlines()
153 153 manifest = bin(l[0])
154 154 user = l[1]
155 155 date = l[2]
156 156 files = l[3:]
157 157 return (manifest, user, date, files, desc)
158 158
159 159 def read(self, node):
160 160 return self.extract(self.revision(node))
161 161
162 162 def add(self, manifest, list, desc, transaction, p1=None, p2=None,
163 163 user=None, date=None):
164 164 date = date or "%d %d" % (time.time(), time.timezone)
165 165 list.sort()
166 166 l = [hex(manifest), user, date] + list + ["", desc]
167 167 text = "\n".join(l)
168 168 return self.addrevision(text, transaction, self.count(), p1, p2)
169 169
170 170 class dirstate:
171 171 def __init__(self, opener, ui, root):
172 172 self.opener = opener
173 173 self.root = root
174 174 self.dirty = 0
175 175 self.ui = ui
176 176 self.map = None
177 177 self.pl = None
178 178 self.copies = {}
179 179
180 180 def __del__(self):
181 181 if self.dirty:
182 182 self.write()
183 183
184 184 def __getitem__(self, key):
185 185 try:
186 186 return self.map[key]
187 187 except TypeError:
188 188 self.read()
189 189 return self[key]
190 190
191 191 def __contains__(self, key):
192 192 if not self.map: self.read()
193 193 return key in self.map
194 194
195 195 def parents(self):
196 196 if not self.pl:
197 197 self.read()
198 198 return self.pl
199 199
200 200 def setparents(self, p1, p2 = nullid):
201 201 self.dirty = 1
202 202 self.pl = p1, p2
203 203
204 204 def state(self, key):
205 205 try:
206 206 return self[key][0]
207 207 except KeyError:
208 208 return "?"
209 209
210 210 def read(self):
211 211 if self.map is not None: return self.map
212 212
213 213 self.map = {}
214 214 self.pl = [nullid, nullid]
215 215 try:
216 216 st = self.opener("dirstate").read()
217 217 if not st: return
218 218 except: return
219 219
220 220 self.pl = [st[:20], st[20: 40]]
221 221
222 222 pos = 40
223 223 while pos < len(st):
224 224 e = struct.unpack(">cllll", st[pos:pos+17])
225 225 l = e[4]
226 226 pos += 17
227 227 f = st[pos:pos + l]
228 228 if '\0' in f:
229 229 f, c = f.split('\0')
230 230 self.copies[f] = c
231 231 self.map[f] = e[:4]
232 232 pos += l
233 233
234 234 def copy(self, source, dest):
235 235 self.read()
236 236 self.dirty = 1
237 237 self.copies[dest] = source
238 238
239 239 def copied(self, file):
240 240 return self.copies.get(file, None)
241 241
242 242 def update(self, files, state):
243 243 ''' current states:
244 244 n normal
245 245 m needs merging
246 246 r marked for removal
247 247 a marked for addition'''
248 248
249 249 if not files: return
250 250 self.read()
251 251 self.dirty = 1
252 252 for f in files:
253 253 if state == "r":
254 254 self.map[f] = ('r', 0, 0, 0)
255 255 else:
256 256 s = os.stat(os.path.join(self.root, f))
257 257 self.map[f] = (state, s.st_mode, s.st_size, s.st_mtime)
258 258
259 259 def forget(self, files):
260 260 if not files: return
261 261 self.read()
262 262 self.dirty = 1
263 263 for f in files:
264 264 try:
265 265 del self.map[f]
266 266 except KeyError:
267 267 self.ui.warn("not in dirstate: %s!\n" % f)
268 268 pass
269 269
270 270 def clear(self):
271 271 self.map = {}
272 272 self.dirty = 1
273 273
274 274 def write(self):
275 275 st = self.opener("dirstate", "w")
276 276 st.write("".join(self.pl))
277 277 for f, e in self.map.items():
278 278 c = self.copied(f)
279 279 if c:
280 280 f = f + "\0" + c
281 281 e = struct.pack(">cllll", e[0], e[1], e[2], e[3], len(f))
282 282 st.write(e + f)
283 283 self.dirty = 0
284 284
285 285 def changes(self, files, ignore):
286 286 self.read()
287 287 dc = self.map.copy()
288 288 lookup, changed, added, unknown = [], [], [], []
289 289
290 290 # compare all files by default
291 291 if not files: files = [self.root]
292 292
293 293 # recursive generator of all files listed
294 294 def walk(files):
295 295 for f in util.unique(files):
296 296 f = os.path.join(self.root, f)
297 297 if os.path.isdir(f):
298 298 for dir, subdirs, fl in os.walk(f):
299 299 d = dir[len(self.root) + 1:]
300 300 if ".hg" in subdirs: subdirs.remove(".hg")
301 301 for fn in fl:
302 302 fn = util.pconvert(os.path.join(d, fn))
303 303 yield fn
304 304 else:
305 305 yield f[len(self.root) + 1:]
306 306
307 307 for fn in util.unique(walk(files)):
308 308 try: s = os.stat(os.path.join(self.root, fn))
309 309 except: continue
310 310
311 311 if fn in dc:
312 312 c = dc[fn]
313 313 del dc[fn]
314 314
315 315 if c[0] == 'm':
316 316 changed.append(fn)
317 317 elif c[0] == 'a':
318 318 added.append(fn)
319 319 elif c[0] == 'r':
320 320 unknown.append(fn)
321 321 elif c[2] != s.st_size or (c[1] ^ s.st_mode) & 0100:
322 322 changed.append(fn)
323 323 elif c[1] != s.st_mode or c[3] != s.st_mtime:
324 324 lookup.append(fn)
325 325 else:
326 326 if not ignore(fn): unknown.append(fn)
327 327
328 328 return (lookup, changed, added, dc.keys(), unknown)
329 329
330 330 # used to avoid circular references so destructors work
331 331 def opener(base):
332 332 p = base
333 333 def o(path, mode="r"):
334 334 if p[:7] == "http://":
335 335 f = os.path.join(p, urllib.quote(path))
336 336 return httprangereader.httprangereader(f)
337 337
338 338 f = os.path.join(p, path)
339 339
340 340 mode += "b" # for that other OS
341 341
342 342 if mode[0] != "r":
343 343 try:
344 344 s = os.stat(f)
345 345 except OSError:
346 346 d = os.path.dirname(f)
347 347 if not os.path.isdir(d):
348 348 os.makedirs(d)
349 349 else:
350 350 if s.st_nlink > 1:
351 351 file(f + ".tmp", "wb").write(file(f, "rb").read())
352 352 util.rename(f+".tmp", f)
353 353
354 354 return file(f, mode)
355 355
356 356 return o
357 357
358 358 class RepoError(Exception): pass
359 359
360 360 class localrepository:
361 361 def __init__(self, ui, path=None, create=0):
362 362 self.remote = 0
363 363 if path and path[:7] == "http://":
364 364 self.remote = 1
365 365 self.path = path
366 366 else:
367 367 if not path:
368 368 p = os.getcwd()
369 369 while not os.path.isdir(os.path.join(p, ".hg")):
370 370 oldp = p
371 371 p = os.path.dirname(p)
372 372 if p == oldp: raise RepoError("no repo found")
373 373 path = p
374 374 self.path = os.path.join(path, ".hg")
375 375
376 376 if not create and not os.path.isdir(self.path):
377 377 raise RepoError("repository %s not found" % self.path)
378 378
379 379 self.root = path
380 380 self.ui = ui
381 381
382 382 if create:
383 383 os.mkdir(self.path)
384 384 os.mkdir(self.join("data"))
385 385
386 386 self.opener = opener(self.path)
387 387 self.wopener = opener(self.root)
388 388 self.manifest = manifest(self.opener)
389 389 self.changelog = changelog(self.opener)
390 390 self.ignorelist = None
391 391 self.tagscache = None
392 392 self.nodetagscache = None
393 393
394 394 if not self.remote:
395 395 self.dirstate = dirstate(self.opener, ui, self.root)
396 396 try:
397 397 self.ui.readconfig(self.opener("hgrc"))
398 398 except IOError: pass
399 399
400 400 def ignore(self, f):
401 401 if self.ignorelist is None:
402 402 self.ignorelist = []
403 403 try:
404 404 l = file(self.wjoin(".hgignore"))
405 405 for pat in l:
406 406 if pat != "\n":
407 407 self.ignorelist.append(re.compile(util.pconvert(pat[:-1])))
408 408 except IOError: pass
409 409 for pat in self.ignorelist:
410 410 if pat.search(f): return True
411 411 return False
412 412
413 413 def hook(self, name, **args):
414 414 s = self.ui.config("hooks", name)
415 415 if s:
416 416 self.ui.note("running hook %s: %s\n" % (name, s))
417 417 old = {}
418 418 for k, v in args.items():
419 419 k = k.upper()
420 420 old[k] = os.environ.get(k, None)
421 421 os.environ[k] = v
422 422
423 423 r = os.system(s)
424 424
425 425 for k, v in old.items():
426 426 if v != None:
427 427 os.environ[k] = v
428 428 else:
429 429 del os.environ[k]
430 430
431 431 if r:
432 432 self.ui.warn("abort: %s hook failed with status %d!\n" %
433 433 (name, r))
434 434 return False
435 435 return True
436 436
437 437 def tags(self):
438 438 '''return a mapping of tag to node'''
439 439 if not self.tagscache:
440 440 self.tagscache = {}
441 441 def addtag(self, k, n):
442 442 try:
443 443 bin_n = bin(n)
444 444 except TypeError:
445 445 bin_n = ''
446 446 self.tagscache[k.strip()] = bin_n
447 447
448 448 try:
449 449 # read each head of the tags file, ending with the tip
450 450 # and add each tag found to the map, with "newer" ones
451 451 # taking precedence
452 452 fl = self.file(".hgtags")
453 453 h = fl.heads()
454 454 h.reverse()
455 455 for r in h:
456 456 for l in fl.revision(r).splitlines():
457 457 if l:
458 458 n, k = l.split(" ", 1)
459 459 addtag(self, k, n)
460 460 except KeyError:
461 461 pass
462 462
463 463 try:
464 464 f = self.opener("localtags")
465 465 for l in f:
466 466 n, k = l.split(" ", 1)
467 467 addtag(self, k, n)
468 468 except IOError:
469 469 pass
470 470
471 471 self.tagscache['tip'] = self.changelog.tip()
472 472
473 473 return self.tagscache
474 474
475 475 def tagslist(self):
476 476 '''return a list of tags ordered by revision'''
477 477 l = []
478 478 for t, n in self.tags().items():
479 479 try:
480 480 r = self.changelog.rev(n)
481 481 except:
482 482 r = -2 # sort to the beginning of the list if unknown
483 483 l.append((r,t,n))
484 484 l.sort()
485 485 return [(t,n) for r,t,n in l]
486 486
487 487 def nodetags(self, node):
488 488 '''return the tags associated with a node'''
489 489 if not self.nodetagscache:
490 490 self.nodetagscache = {}
491 491 for t,n in self.tags().items():
492 492 self.nodetagscache.setdefault(n,[]).append(t)
493 493 return self.nodetagscache.get(node, [])
494 494
495 495 def lookup(self, key):
496 496 try:
497 497 return self.tags()[key]
498 498 except KeyError:
499 499 return self.changelog.lookup(key)
500 500
501 501 def join(self, f):
502 502 return os.path.join(self.path, f)
503 503
504 504 def wjoin(self, f):
505 505 return os.path.join(self.root, f)
506 506
507 507 def file(self, f):
508 508 if f[0] == '/': f = f[1:]
509 509 return filelog(self.opener, f)
510 510
511 511 def wfile(self, f, mode='r'):
512 512 return self.wopener(f, mode)
513 513
514 514 def transaction(self):
515 515 # save dirstate for undo
516 516 try:
517 517 ds = self.opener("dirstate").read()
518 518 except IOError:
519 519 ds = ""
520 520 self.opener("undo.dirstate", "w").write(ds)
521 521
522 522 return transaction.transaction(self.ui.warn,
523 523 self.opener, self.join("journal"),
524 524 self.join("undo"))
525 525
526 526 def recover(self):
527 527 lock = self.lock()
528 528 if os.path.exists(self.join("journal")):
529 529 self.ui.status("rolling back interrupted transaction\n")
530 530 return transaction.rollback(self.opener, self.join("journal"))
531 531 else:
532 532 self.ui.warn("no interrupted transaction available\n")
533 533
534 534 def undo(self):
535 535 lock = self.lock()
536 536 if os.path.exists(self.join("undo")):
537 537 self.ui.status("rolling back last transaction\n")
538 538 transaction.rollback(self.opener, self.join("undo"))
539 539 self.dirstate = None
540 540 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
541 541 self.dirstate = dirstate(self.opener, self.ui, self.root)
542 542 else:
543 543 self.ui.warn("no undo information available\n")
544 544
545 545 def lock(self, wait = 1):
546 546 try:
547 547 return lock.lock(self.join("lock"), 0)
548 548 except lock.LockHeld, inst:
549 549 if wait:
550 550 self.ui.warn("waiting for lock held by %s\n" % inst.args[0])
551 551 return lock.lock(self.join("lock"), wait)
552 552 raise inst
553 553
554 554 def rawcommit(self, files, text, user, date, p1=None, p2=None):
555 555 orig_parent = self.dirstate.parents()[0] or nullid
556 556 p1 = p1 or self.dirstate.parents()[0] or nullid
557 557 p2 = p2 or self.dirstate.parents()[1] or nullid
558 558 c1 = self.changelog.read(p1)
559 559 c2 = self.changelog.read(p2)
560 560 m1 = self.manifest.read(c1[0])
561 561 mf1 = self.manifest.readflags(c1[0])
562 562 m2 = self.manifest.read(c2[0])
563 563
564 564 if orig_parent == p1:
565 565 update_dirstate = 1
566 566 else:
567 567 update_dirstate = 0
568 568
569 569 tr = self.transaction()
570 570 mm = m1.copy()
571 571 mfm = mf1.copy()
572 572 linkrev = self.changelog.count()
573 573 for f in files:
574 574 try:
575 575 t = self.wfile(f).read()
576 576 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
577 577 r = self.file(f)
578 578 mfm[f] = tm
579 579 mm[f] = r.add(t, {}, tr, linkrev,
580 580 m1.get(f, nullid), m2.get(f, nullid))
581 581 if update_dirstate:
582 582 self.dirstate.update([f], "n")
583 583 except IOError:
584 584 try:
585 585 del mm[f]
586 586 del mfm[f]
587 587 if update_dirstate:
588 588 self.dirstate.forget([f])
589 589 except:
590 590 # deleted from p2?
591 591 pass
592 592
593 593 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
594 594 user = user or self.ui.username()
595 595 n = self.changelog.add(mnode, files, text, tr, p1, p2, user, date)
596 596 tr.close()
597 597 if update_dirstate:
598 598 self.dirstate.setparents(n, nullid)
599 599
600 600 def commit(self, files = None, text = "", user = None, date = None):
601 601 commit = []
602 602 remove = []
603 603 if files:
604 604 for f in files:
605 605 s = self.dirstate.state(f)
606 606 if s in 'nmai':
607 607 commit.append(f)
608 608 elif s == 'r':
609 609 remove.append(f)
610 610 else:
611 611 self.ui.warn("%s not tracked!\n" % f)
612 612 else:
613 613 (c, a, d, u) = self.changes(None, None)
614 614 commit = c + a
615 615 remove = d
616 616
617 617 if not commit and not remove:
618 618 self.ui.status("nothing changed\n")
619 619 return
620 620
621 621 if not self.hook("precommit"):
622 622 return 1
623 623
624 624 p1, p2 = self.dirstate.parents()
625 625 c1 = self.changelog.read(p1)
626 626 c2 = self.changelog.read(p2)
627 627 m1 = self.manifest.read(c1[0])
628 628 mf1 = self.manifest.readflags(c1[0])
629 629 m2 = self.manifest.read(c2[0])
630 630 lock = self.lock()
631 631 tr = self.transaction()
632 632
633 633 # check in files
634 634 new = {}
635 635 linkrev = self.changelog.count()
636 636 commit.sort()
637 637 for f in commit:
638 638 self.ui.note(f + "\n")
639 639 try:
640 640 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
641 641 t = self.wfile(f).read()
642 642 except IOError:
643 643 self.warn("trouble committing %s!\n" % f)
644 644 raise
645 645
646 646 meta = {}
647 647 cp = self.dirstate.copied(f)
648 648 if cp:
649 649 meta["copy"] = cp
650 650 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
651 651 self.ui.debug(" %s: copy %s:%s\n" % (f, cp, meta["copyrev"]))
652 652
653 653 r = self.file(f)
654 654 fp1 = m1.get(f, nullid)
655 655 fp2 = m2.get(f, nullid)
656 656 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
657 657
658 658 # update manifest
659 659 m1.update(new)
660 660 for f in remove:
661 661 if f in m1:
662 662 del m1[f]
663 663 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0])
664 664
665 665 # add changeset
666 666 new = new.keys()
667 667 new.sort()
668 668
669 669 if not text:
670 670 edittext = "\n" + "HG: manifest hash %s\n" % hex(mn)
671 671 edittext += "".join(["HG: changed %s\n" % f for f in new])
672 672 edittext += "".join(["HG: removed %s\n" % f for f in remove])
673 673 edittext = self.ui.edit(edittext)
674 674 if not edittext.rstrip():
675 675 return 1
676 676 text = edittext
677 677
678 678 user = user or self.ui.username()
679 679 n = self.changelog.add(mn, new, text, tr, p1, p2, user, date)
680 680
681 681 if not self.hook("commit", node=hex(n)):
682 682 return 1
683 683
684 684 tr.close()
685 685
686 686 self.dirstate.setparents(n)
687 687 self.dirstate.update(new, "n")
688 688 self.dirstate.forget(remove)
689 689
690 690 def changes(self, node1, node2, files=None):
691 691 mf2, u = None, []
692 692
693 693 def fcmp(fn, mf):
694 694 t1 = self.wfile(fn).read()
695 695 t2 = self.file(fn).revision(mf[fn])
696 696 return cmp(t1, t2)
697 697
698 698 # are we comparing the working directory?
699 699 if not node2:
700 700 l, c, a, d, u = self.dirstate.changes(files, self.ignore)
701 701
702 702 # are we comparing working dir against its parent?
703 703 if not node1:
704 704 if l:
705 705 # do a full compare of any files that might have changed
706 706 change = self.changelog.read(self.dirstate.parents()[0])
707 707 mf2 = self.manifest.read(change[0])
708 708 for f in l:
709 709 if fcmp(f, mf2):
710 710 c.append(f)
711 711
712 712 for l in c, a, d, u:
713 713 l.sort()
714 714
715 715 return (c, a, d, u)
716 716
717 717 # are we comparing working dir against non-tip?
718 718 # generate a pseudo-manifest for the working dir
719 719 if not node2:
720 720 if not mf2:
721 721 change = self.changelog.read(self.dirstate.parents()[0])
722 722 mf2 = self.manifest.read(change[0]).copy()
723 723 for f in a + c + l:
724 724 mf2[f] = ""
725 725 for f in d:
726 726 if f in mf2: del mf2[f]
727 727 else:
728 728 change = self.changelog.read(node2)
729 729 mf2 = self.manifest.read(change[0])
730 730
731 731 # flush lists from dirstate before comparing manifests
732 732 c, a = [], []
733 733
734 734 change = self.changelog.read(node1)
735 735 mf1 = self.manifest.read(change[0]).copy()
736 736
737 737 for fn in mf2:
738 738 if mf1.has_key(fn):
739 739 if mf1[fn] != mf2[fn]:
740 740 if mf2[fn] != "" or fcmp(fn, mf1):
741 741 c.append(fn)
742 742 del mf1[fn]
743 743 else:
744 744 a.append(fn)
745 745
746 746 d = mf1.keys()
747 747
748 748 for l in c, a, d, u:
749 749 l.sort()
750 750
751 751 return (c, a, d, u)
752 752
753 753 def add(self, list):
754 754 for f in list:
755 755 p = self.wjoin(f)
756 756 if not os.path.exists(p):
757 757 self.ui.warn("%s does not exist!\n" % f)
758 758 elif not os.path.isfile(p):
759 759 self.ui.warn("%s not added: mercurial only supports files currently\n" % f)
760 760 elif self.dirstate.state(f) == 'n':
761 761 self.ui.warn("%s already tracked!\n" % f)
762 762 else:
763 763 self.dirstate.update([f], "a")
764 764
765 765 def forget(self, list):
766 766 for f in list:
767 767 if self.dirstate.state(f) not in 'ai':
768 768 self.ui.warn("%s not added!\n" % f)
769 769 else:
770 770 self.dirstate.forget([f])
771 771
772 772 def remove(self, list):
773 773 for f in list:
774 774 p = self.wjoin(f)
775 775 if os.path.exists(p):
776 776 self.ui.warn("%s still exists!\n" % f)
777 777 elif self.dirstate.state(f) == 'a':
778 778 self.ui.warn("%s never committed!\n" % f)
779 779 self.dirstate.forget(f)
780 780 elif f not in self.dirstate:
781 781 self.ui.warn("%s not tracked!\n" % f)
782 782 else:
783 783 self.dirstate.update([f], "r")
784 784
785 785 def copy(self, source, dest):
786 786 p = self.wjoin(dest)
787 787 if not os.path.exists(dest):
788 788 self.ui.warn("%s does not exist!\n" % dest)
789 789 elif not os.path.isfile(dest):
790 790 self.ui.warn("copy failed: %s is not a file\n" % dest)
791 791 else:
792 792 if self.dirstate.state(dest) == '?':
793 793 self.dirstate.update([dest], "a")
794 794 self.dirstate.copy(source, dest)
795 795
796 796 def heads(self):
797 797 return self.changelog.heads()
798 798
799 799 def branches(self, nodes):
800 800 if not nodes: nodes = [self.changelog.tip()]
801 801 b = []
802 802 for n in nodes:
803 803 t = n
804 804 while n:
805 805 p = self.changelog.parents(n)
806 806 if p[1] != nullid or p[0] == nullid:
807 807 b.append((t, n, p[0], p[1]))
808 808 break
809 809 n = p[0]
810 810 return b
811 811
812 812 def between(self, pairs):
813 813 r = []
814 814
815 815 for top, bottom in pairs:
816 816 n, l, i = top, [], 0
817 817 f = 1
818 818
819 819 while n != bottom:
820 820 p = self.changelog.parents(n)[0]
821 821 if i == f:
822 822 l.append(n)
823 823 f = f * 2
824 824 n = p
825 825 i += 1
826 826
827 827 r.append(l)
828 828
829 829 return r
830 830
831 831 def newer(self, nodes):
832 832 m = {}
833 833 nl = []
834 834 pm = {}
835 835 cl = self.changelog
836 836 t = l = cl.count()
837 837
838 838 # find the lowest numbered node
839 839 for n in nodes:
840 840 l = min(l, cl.rev(n))
841 841 m[n] = 1
842 842
843 843 for i in xrange(l, t):
844 844 n = cl.node(i)
845 845 if n in m: # explicitly listed
846 846 pm[n] = 1
847 847 nl.append(n)
848 848 continue
849 849 for p in cl.parents(n):
850 850 if p in pm: # parent listed
851 851 pm[n] = 1
852 852 nl.append(n)
853 853 break
854 854
855 855 return nl
856 856
857 857 def findincoming(self, remote):
858 858 m = self.changelog.nodemap
859 859 search = []
860 860 fetch = []
861 861 base = {}
862 862 seen = {}
863 863 seenbranch = {}
864 864
865 865 # if we have an empty repo, fetch everything
866 866 if self.changelog.tip() == nullid:
867 867 self.ui.status("requesting all changes\n")
868 868 return [nullid]
869 869
870 870 # otherwise, assume we're closer to the tip than the root
871 871 # and start by examining the heads
872 872 self.ui.status("searching for changes\n")
873 873 heads = remote.heads()
874 874 unknown = []
875 875 for h in heads:
876 876 if h not in m:
877 877 unknown.append(h)
878 878
879 879 if not unknown:
880 880 return None
881 881
882 882 rep = {}
883 883 reqcnt = 0
884 884
885 885 # search through remote branches
886 886 # a 'branch' here is a linear segment of history, with four parts:
887 887 # head, root, first parent, second parent
888 888 # (a branch always has two parents (or none) by definition)
889 889 unknown = remote.branches(unknown)
890 890 while unknown:
891 891 r = []
892 892 while unknown:
893 893 n = unknown.pop(0)
894 894 if n[0] in seen:
895 895 continue
896 896
897 897 self.ui.debug("examining %s:%s\n" % (short(n[0]), short(n[1])))
898 898 if n[0] == nullid:
899 899 break
900 900 if n in seenbranch:
901 901 self.ui.debug("branch already found\n")
902 902 continue
903 903 if n[1] and n[1] in m: # do we know the base?
904 904 self.ui.debug("found incomplete branch %s:%s\n"
905 905 % (short(n[0]), short(n[1])))
906 906 search.append(n) # schedule branch range for scanning
907 907 seenbranch[n] = 1
908 908 else:
909 909 if n[1] not in seen and n[1] not in fetch:
910 910 if n[2] in m and n[3] in m:
911 911 self.ui.debug("found new changeset %s\n" %
912 912 short(n[1]))
913 913 fetch.append(n[1]) # earliest unknown
914 914 base[n[2]] = 1 # latest known
915 915 continue
916 916
917 917 for a in n[2:4]:
918 918 if a not in rep:
919 919 r.append(a)
920 920 rep[a] = 1
921 921
922 922 seen[n[0]] = 1
923 923
924 924 if r:
925 925 reqcnt += 1
926 926 self.ui.debug("request %d: %s\n" %
927 927 (reqcnt, " ".join(map(short, r))))
928 928 for p in range(0, len(r), 10):
929 929 for b in remote.branches(r[p:p+10]):
930 930 self.ui.debug("received %s:%s\n" %
931 931 (short(b[0]), short(b[1])))
932 932 if b[0] not in m and b[0] not in seen:
933 933 unknown.append(b)
934 934
935 935 # do binary search on the branches we found
936 936 while search:
937 937 n = search.pop(0)
938 938 reqcnt += 1
939 939 l = remote.between([(n[0], n[1])])[0]
940 940 l.append(n[1])
941 941 p = n[0]
942 942 f = 1
943 943 for i in l:
944 944 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
945 945 if i in m:
946 946 if f <= 2:
947 947 self.ui.debug("found new branch changeset %s\n" %
948 948 short(p))
949 949 fetch.append(p)
950 950 base[i] = 1
951 951 else:
952 952 self.ui.debug("narrowed branch search to %s:%s\n"
953 953 % (short(p), short(i)))
954 954 search.append((p, i))
955 955 break
956 956 p, f = i, f * 2
957 957
958 958 # sanity check our fetch list
959 959 for f in fetch:
960 960 if f in m:
961 961 raise RepoError("already have changeset " + short(f[:4]))
962 962
963 963 if base.keys() == [nullid]:
964 964 self.ui.warn("warning: pulling from an unrelated repository!\n")
965 965
966 966 self.ui.note("adding new changesets starting at " +
967 967 " ".join([short(f) for f in fetch]) + "\n")
968 968
969 969 self.ui.debug("%d total queries\n" % reqcnt)
970 970
971 971 return fetch
972 972
973 973 def changegroup(self, basenodes):
974 974 nodes = self.newer(basenodes)
975 975
976 976 # construct the link map
977 977 linkmap = {}
978 978 for n in nodes:
979 979 linkmap[self.changelog.rev(n)] = n
980 980
981 981 # construct a list of all changed files
982 982 changed = {}
983 983 for n in nodes:
984 984 c = self.changelog.read(n)
985 985 for f in c[3]:
986 986 changed[f] = 1
987 987 changed = changed.keys()
988 988 changed.sort()
989 989
990 990 # the changegroup is changesets + manifests + all file revs
991 991 revs = [ self.changelog.rev(n) for n in nodes ]
992 992
993 993 for y in self.changelog.group(linkmap): yield y
994 994 for y in self.manifest.group(linkmap): yield y
995 995 for f in changed:
996 996 yield struct.pack(">l", len(f) + 4) + f
997 997 g = self.file(f).group(linkmap)
998 998 for y in g:
999 999 yield y
1000 1000
1001 1001 def addchangegroup(self, generator):
1002 1002
1003 1003 class genread:
1004 1004 def __init__(self, generator):
1005 1005 self.g = generator
1006 1006 self.buf = ""
1007 1007 def read(self, l):
1008 1008 while l > len(self.buf):
1009 1009 try:
1010 1010 self.buf += self.g.next()
1011 1011 except StopIteration:
1012 1012 break
1013 1013 d, self.buf = self.buf[:l], self.buf[l:]
1014 1014 return d
1015 1015
1016 1016 def getchunk():
1017 1017 d = source.read(4)
1018 1018 if not d: return ""
1019 1019 l = struct.unpack(">l", d)[0]
1020 1020 if l <= 4: return ""
1021 1021 return source.read(l - 4)
1022 1022
1023 1023 def getgroup():
1024 1024 while 1:
1025 1025 c = getchunk()
1026 1026 if not c: break
1027 1027 yield c
1028 1028
1029 1029 def csmap(x):
1030 1030 self.ui.debug("add changeset %s\n" % short(x))
1031 1031 return self.changelog.count()
1032 1032
1033 1033 def revmap(x):
1034 1034 return self.changelog.rev(x)
1035 1035
1036 1036 if not generator: return
1037 1037 changesets = files = revisions = 0
1038 1038
1039 1039 source = genread(generator)
1040 1040 lock = self.lock()
1041 1041 tr = self.transaction()
1042 1042
1043 1043 # pull off the changeset group
1044 1044 self.ui.status("adding changesets\n")
1045 1045 co = self.changelog.tip()
1046 1046 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1047 1047 changesets = self.changelog.rev(cn) - self.changelog.rev(co)
1048 1048
1049 1049 # pull off the manifest group
1050 1050 self.ui.status("adding manifests\n")
1051 1051 mm = self.manifest.tip()
1052 1052 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1053 1053
1054 1054 # process the files
1055 1055 self.ui.status("adding file revisions\n")
1056 1056 while 1:
1057 1057 f = getchunk()
1058 1058 if not f: break
1059 1059 self.ui.debug("adding %s revisions\n" % f)
1060 1060 fl = self.file(f)
1061 1061 o = fl.count()
1062 1062 n = fl.addgroup(getgroup(), revmap, tr)
1063 1063 revisions += fl.count() - o
1064 1064 files += 1
1065 1065
1066 1066 self.ui.status(("modified %d files, added %d changesets" +
1067 1067 " and %d new revisions\n")
1068 1068 % (files, changesets, revisions))
1069 1069
1070 1070 tr.close()
1071 1071 return
1072 1072
1073 1073 def update(self, node, allow=False, force=False, choose=None,
1074 1074 moddirstate=True):
1075 1075 pl = self.dirstate.parents()
1076 1076 if not force and pl[1] != nullid:
1077 1077 self.ui.warn("aborting: outstanding uncommitted merges\n")
1078 1078 return
1079 1079
1080 1080 p1, p2 = pl[0], node
1081 1081 pa = self.changelog.ancestor(p1, p2)
1082 1082 m1n = self.changelog.read(p1)[0]
1083 1083 m2n = self.changelog.read(p2)[0]
1084 1084 man = self.manifest.ancestor(m1n, m2n)
1085 1085 m1 = self.manifest.read(m1n)
1086 1086 mf1 = self.manifest.readflags(m1n)
1087 1087 m2 = self.manifest.read(m2n)
1088 1088 mf2 = self.manifest.readflags(m2n)
1089 1089 ma = self.manifest.read(man)
1090 1090 mfa = self.manifest.readflags(man)
1091 1091
1092 1092 (c, a, d, u) = self.changes(None, None)
1093 1093
1094 1094 # is this a jump, or a merge? i.e. is there a linear path
1095 1095 # from p1 to p2?
1096 1096 linear_path = (pa == p1 or pa == p2)
1097 1097
1098 1098 # resolve the manifest to determine which files
1099 1099 # we care about merging
1100 1100 self.ui.note("resolving manifests\n")
1101 1101 self.ui.debug(" ancestor %s local %s remote %s\n" %
1102 1102 (short(man), short(m1n), short(m2n)))
1103 1103
1104 1104 merge = {}
1105 1105 get = {}
1106 1106 remove = []
1107 1107 mark = {}
1108 1108
1109 1109 # construct a working dir manifest
1110 1110 mw = m1.copy()
1111 1111 mfw = mf1.copy()
1112 1112 umap = dict.fromkeys(u)
1113 1113
1114 1114 for f in a + c + u:
1115 1115 mw[f] = ""
1116 1116 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1117 1117
1118 1118 for f in d:
1119 1119 if f in mw: del mw[f]
1120 1120
1121 1121 # If we're jumping between revisions (as opposed to merging),
1122 1122 # and if neither the working directory nor the target rev has
1123 1123 # the file, then we need to remove it from the dirstate, to
1124 1124 # prevent the dirstate from listing the file when it is no
1125 1125 # longer in the manifest.
1126 1126 if moddirstate and linear_path and f not in m2:
1127 1127 self.dirstate.forget((f,))
1128 1128
1129 1129 # Compare manifests
1130 1130 for f, n in mw.iteritems():
1131 1131 if choose and not choose(f): continue
1132 1132 if f in m2:
1133 1133 s = 0
1134 1134
1135 1135 # is the wfile new since m1, and match m2?
1136 1136 if f not in m1:
1137 1137 t1 = self.wfile(f).read()
1138 1138 t2 = self.file(f).revision(m2[f])
1139 1139 if cmp(t1, t2) == 0:
1140 1140 mark[f] = 1
1141 1141 n = m2[f]
1142 1142 del t1, t2
1143 1143
1144 1144 # are files different?
1145 1145 if n != m2[f]:
1146 1146 a = ma.get(f, nullid)
1147 1147 # are both different from the ancestor?
1148 1148 if n != a and m2[f] != a:
1149 1149 self.ui.debug(" %s versions differ, resolve\n" % f)
1150 1150 # merge executable bits
1151 1151 # "if we changed or they changed, change in merge"
1152 1152 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1153 1153 mode = ((a^b) | (a^c)) ^ a
1154 1154 merge[f] = (m1.get(f, nullid), m2[f], mode)
1155 1155 s = 1
1156 1156 # are we clobbering?
1157 1157 # is remote's version newer?
1158 1158 # or are we going back in time?
1159 1159 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1160 1160 self.ui.debug(" remote %s is newer, get\n" % f)
1161 1161 get[f] = m2[f]
1162 1162 s = 1
1163 1163 else:
1164 1164 mark[f] = 1
1165 1165 elif f in umap:
1166 1166 # this unknown file is the same as the checkout
1167 1167 get[f] = m2[f]
1168 1168
1169 1169 if not s and mfw[f] != mf2[f]:
1170 1170 if force:
1171 1171 self.ui.debug(" updating permissions for %s\n" % f)
1172 1172 util.set_exec(self.wjoin(f), mf2[f])
1173 1173 else:
1174 1174 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1175 1175 mode = ((a^b) | (a^c)) ^ a
1176 1176 if mode != b:
1177 1177 self.ui.debug(" updating permissions for %s\n" % f)
1178 1178 util.set_exec(self.wjoin(f), mode)
1179 1179 mark[f] = 1
1180 1180 del m2[f]
1181 1181 elif f in ma:
1182 if not force and n != ma[f]:
1183 r = ""
1184 if linear_path or allow:
1182 if n != ma[f]:
1183 r = "d"
1184 if not force and (linear_path or allow):
1185 1185 r = self.ui.prompt(
1186 1186 (" local changed %s which remote deleted\n" % f) +
1187 1187 "(k)eep or (d)elete?", "[kd]", "k")
1188 1188 if r == "d":
1189 1189 remove.append(f)
1190 1190 else:
1191 1191 self.ui.debug("other deleted %s\n" % f)
1192 1192 remove.append(f) # other deleted it
1193 1193 else:
1194 1194 if n == m1.get(f, nullid): # same as parent
1195 1195 if p2 == pa: # going backwards?
1196 1196 self.ui.debug("remote deleted %s\n" % f)
1197 1197 remove.append(f)
1198 1198 else:
1199 1199 self.ui.debug("local created %s, keeping\n" % f)
1200 1200 else:
1201 1201 self.ui.debug("working dir created %s, keeping\n" % f)
1202 1202
1203 1203 for f, n in m2.iteritems():
1204 1204 if choose and not choose(f): continue
1205 1205 if f[0] == "/": continue
1206 if not force and f in ma and n != ma[f]:
1207 r = ""
1208 if linear_path or allow:
1206 if f in ma and n != ma[f]:
1207 r = "k"
1208 if not force and (linear_path or allow):
1209 1209 r = self.ui.prompt(
1210 1210 ("remote changed %s which local deleted\n" % f) +
1211 1211 "(k)eep or (d)elete?", "[kd]", "k")
1212 if r == "d": remove.append(f)
1213 else:
1212 if r == "k": get[f] = n
1213 elif f not in ma:
1214 1214 self.ui.debug("remote created %s\n" % f)
1215 1215 get[f] = n
1216 else:
1217 self.ui.debug("local deleted %s\n" % f)
1216 1218
1217 1219 del mw, m1, m2, ma
1218 1220
1219 1221 if force:
1220 1222 for f in merge:
1221 1223 get[f] = merge[f][1]
1222 1224 merge = {}
1223 1225
1224 1226 if linear_path:
1225 1227 # we don't need to do any magic, just jump to the new rev
1226 1228 mode = 'n'
1227 1229 p1, p2 = p2, nullid
1228 1230 else:
1229 1231 if not allow:
1230 1232 self.ui.status("this update spans a branch" +
1231 1233 " affecting the following files:\n")
1232 1234 fl = merge.keys() + get.keys()
1233 1235 fl.sort()
1234 1236 for f in fl:
1235 1237 cf = ""
1236 1238 if f in merge: cf = " (resolve)"
1237 1239 self.ui.status(" %s%s\n" % (f, cf))
1238 1240 self.ui.warn("aborting update spanning branches!\n")
1239 1241 self.ui.status("(use update -m to perform a branch merge)\n")
1240 1242 return 1
1241 1243 # we have to remember what files we needed to get/change
1242 1244 # because any file that's different from either one of its
1243 1245 # parents must be in the changeset
1244 1246 mode = 'm'
1245 1247 if moddirstate:
1246 1248 self.dirstate.update(mark.keys(), "m")
1247 1249
1248 1250 if moddirstate:
1249 1251 self.dirstate.setparents(p1, p2)
1250 1252
1251 1253 # get the files we don't need to change
1252 1254 files = get.keys()
1253 1255 files.sort()
1254 1256 for f in files:
1255 1257 if f[0] == "/": continue
1256 1258 self.ui.note("getting %s\n" % f)
1257 1259 t = self.file(f).read(get[f])
1258 1260 try:
1259 1261 self.wfile(f, "w").write(t)
1260 1262 except IOError:
1261 1263 os.makedirs(os.path.dirname(self.wjoin(f)))
1262 1264 self.wfile(f, "w").write(t)
1263 1265 util.set_exec(self.wjoin(f), mf2[f])
1264 1266 if moddirstate:
1265 1267 self.dirstate.update([f], mode)
1266 1268
1267 1269 # merge the tricky bits
1268 1270 files = merge.keys()
1269 1271 files.sort()
1270 1272 for f in files:
1271 1273 self.ui.status("merging %s\n" % f)
1272 1274 m, o, flag = merge[f]
1273 1275 self.merge3(f, m, o)
1274 1276 util.set_exec(self.wjoin(f), flag)
1275 1277 if moddirstate:
1276 1278 self.dirstate.update([f], 'm')
1277 1279
1278 1280 for f in remove:
1279 1281 self.ui.note("removing %s\n" % f)
1280 1282 os.unlink(f)
1281 1283 # try removing directories that might now be empty
1282 1284 try: os.removedirs(os.path.dirname(f))
1283 1285 except: pass
1284 1286 if moddirstate:
1285 1287 if mode == 'n':
1286 1288 self.dirstate.forget(remove)
1287 1289 else:
1288 1290 self.dirstate.update(remove, 'r')
1289 1291
1290 1292 def merge3(self, fn, my, other):
1291 1293 """perform a 3-way merge in the working directory"""
1292 1294
1293 1295 def temp(prefix, node):
1294 1296 pre = "%s~%s." % (os.path.basename(fn), prefix)
1295 1297 (fd, name) = tempfile.mkstemp("", pre)
1296 1298 f = os.fdopen(fd, "wb")
1297 1299 f.write(fl.revision(node))
1298 1300 f.close()
1299 1301 return name
1300 1302
1301 1303 fl = self.file(fn)
1302 1304 base = fl.ancestor(my, other)
1303 1305 a = self.wjoin(fn)
1304 1306 b = temp("base", base)
1305 1307 c = temp("other", other)
1306 1308
1307 1309 self.ui.note("resolving %s\n" % fn)
1308 1310 self.ui.debug("file %s: other %s ancestor %s\n" %
1309 1311 (fn, short(other), short(base)))
1310 1312
1311 1313 cmd = self.ui.config("ui", "merge") or \
1312 1314 os.environ.get("HGMERGE", "hgmerge")
1313 1315 r = os.system("%s %s %s %s" % (cmd, a, b, c))
1314 1316 if r:
1315 1317 self.ui.warn("merging %s failed!\n" % fn)
1316 1318
1317 1319 os.unlink(b)
1318 1320 os.unlink(c)
1319 1321
1320 1322 def verify(self):
1321 1323 filelinkrevs = {}
1322 1324 filenodes = {}
1323 1325 changesets = revisions = files = 0
1324 1326 errors = 0
1325 1327
1326 1328 seen = {}
1327 1329 self.ui.status("checking changesets\n")
1328 1330 for i in range(self.changelog.count()):
1329 1331 changesets += 1
1330 1332 n = self.changelog.node(i)
1331 1333 if n in seen:
1332 1334 self.ui.warn("duplicate changeset at revision %d\n" % i)
1333 1335 errors += 1
1334 1336 seen[n] = 1
1335 1337
1336 1338 for p in self.changelog.parents(n):
1337 1339 if p not in self.changelog.nodemap:
1338 1340 self.ui.warn("changeset %s has unknown parent %s\n" %
1339 1341 (short(n), short(p)))
1340 1342 errors += 1
1341 1343 try:
1342 1344 changes = self.changelog.read(n)
1343 1345 except Exception, inst:
1344 1346 self.ui.warn("unpacking changeset %s: %s\n" % (short(n), inst))
1345 1347 errors += 1
1346 1348
1347 1349 for f in changes[3]:
1348 1350 filelinkrevs.setdefault(f, []).append(i)
1349 1351
1350 1352 seen = {}
1351 1353 self.ui.status("checking manifests\n")
1352 1354 for i in range(self.manifest.count()):
1353 1355 n = self.manifest.node(i)
1354 1356 if n in seen:
1355 1357 self.ui.warn("duplicate manifest at revision %d\n" % i)
1356 1358 errors += 1
1357 1359 seen[n] = 1
1358 1360
1359 1361 for p in self.manifest.parents(n):
1360 1362 if p not in self.manifest.nodemap:
1361 1363 self.ui.warn("manifest %s has unknown parent %s\n" %
1362 1364 (short(n), short(p)))
1363 1365 errors += 1
1364 1366
1365 1367 try:
1366 1368 delta = mdiff.patchtext(self.manifest.delta(n))
1367 1369 except KeyboardInterrupt:
1368 1370 self.ui.warn("aborted")
1369 1371 sys.exit(0)
1370 1372 except Exception, inst:
1371 1373 self.ui.warn("unpacking manifest %s: %s\n"
1372 1374 % (short(n), inst))
1373 1375 errors += 1
1374 1376
1375 1377 ff = [ l.split('\0') for l in delta.splitlines() ]
1376 1378 for f, fn in ff:
1377 1379 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1378 1380
1379 1381 self.ui.status("crosschecking files in changesets and manifests\n")
1380 1382 for f in filenodes:
1381 1383 if f not in filelinkrevs:
1382 1384 self.ui.warn("file %s in manifest but not in changesets\n" % f)
1383 1385 errors += 1
1384 1386
1385 1387 for f in filelinkrevs:
1386 1388 if f not in filenodes:
1387 1389 self.ui.warn("file %s in changeset but not in manifest\n" % f)
1388 1390 errors += 1
1389 1391
1390 1392 self.ui.status("checking files\n")
1391 1393 ff = filenodes.keys()
1392 1394 ff.sort()
1393 1395 for f in ff:
1394 1396 if f == "/dev/null": continue
1395 1397 files += 1
1396 1398 fl = self.file(f)
1397 1399 nodes = { nullid: 1 }
1398 1400 seen = {}
1399 1401 for i in range(fl.count()):
1400 1402 revisions += 1
1401 1403 n = fl.node(i)
1402 1404
1403 1405 if n in seen:
1404 1406 self.ui.warn("%s: duplicate revision %d\n" % (f, i))
1405 1407 errors += 1
1406 1408
1407 1409 if n not in filenodes[f]:
1408 1410 self.ui.warn("%s: %d:%s not in manifests\n"
1409 1411 % (f, i, short(n)))
1410 1412 errors += 1
1411 1413 else:
1412 1414 del filenodes[f][n]
1413 1415
1414 1416 flr = fl.linkrev(n)
1415 1417 if flr not in filelinkrevs[f]:
1416 1418 self.ui.warn("%s:%s points to unexpected changeset %d\n"
1417 1419 % (f, short(n), fl.linkrev(n)))
1418 1420 errors += 1
1419 1421 else:
1420 1422 filelinkrevs[f].remove(flr)
1421 1423
1422 1424 # verify contents
1423 1425 try:
1424 1426 t = fl.read(n)
1425 1427 except Exception, inst:
1426 1428 self.ui.warn("unpacking file %s %s: %s\n"
1427 1429 % (f, short(n), inst))
1428 1430 errors += 1
1429 1431
1430 1432 # verify parents
1431 1433 (p1, p2) = fl.parents(n)
1432 1434 if p1 not in nodes:
1433 1435 self.ui.warn("file %s:%s unknown parent 1 %s" %
1434 1436 (f, short(n), short(p1)))
1435 1437 errors += 1
1436 1438 if p2 not in nodes:
1437 1439 self.ui.warn("file %s:%s unknown parent 2 %s" %
1438 1440 (f, short(n), short(p1)))
1439 1441 errors += 1
1440 1442 nodes[n] = 1
1441 1443
1442 1444 # cross-check
1443 1445 for node in filenodes[f]:
1444 1446 self.ui.warn("node %s in manifests not in %s\n"
1445 1447 % (hex(n), f))
1446 1448 errors += 1
1447 1449
1448 1450 self.ui.status("%d files, %d changesets, %d total revisions\n" %
1449 1451 (files, changesets, revisions))
1450 1452
1451 1453 if errors:
1452 1454 self.ui.warn("%d integrity errors encountered!\n" % errors)
1453 1455 return 1
1454 1456
1455 1457 class remoterepository:
1456 1458 def __init__(self, ui, path):
1457 1459 self.url = path
1458 1460 self.ui = ui
1459 1461 no_list = [ "localhost", "127.0.0.1" ]
1460 1462 host = ui.config("http_proxy", "host")
1461 1463 if host is None:
1462 1464 host = os.environ.get("http_proxy")
1463 1465 if host and host.startswith('http://'):
1464 1466 host = host[7:]
1465 1467 user = ui.config("http_proxy", "user")
1466 1468 passwd = ui.config("http_proxy", "passwd")
1467 1469 no = ui.config("http_proxy", "no")
1468 1470 if no is None:
1469 1471 no = os.environ.get("no_proxy")
1470 1472 if no:
1471 1473 no_list = no_list + no.split(",")
1472 1474
1473 1475 no_proxy = 0
1474 1476 for h in no_list:
1475 1477 if (path.startswith("http://" + h + "/") or
1476 1478 path.startswith("http://" + h + ":") or
1477 1479 path == "http://" + h):
1478 1480 no_proxy = 1
1479 1481
1480 1482 # Note: urllib2 takes proxy values from the environment and those will
1481 1483 # take precedence
1482 1484 for env in ["HTTP_PROXY", "http_proxy", "no_proxy"]:
1483 1485 if os.environ.has_key(env):
1484 1486 del os.environ[env]
1485 1487
1486 1488 proxy_handler = urllib2.BaseHandler()
1487 1489 if host and not no_proxy:
1488 1490 proxy_handler = urllib2.ProxyHandler({"http" : "http://" + host})
1489 1491
1490 1492 authinfo = None
1491 1493 if user and passwd:
1492 1494 passmgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
1493 1495 passmgr.add_password(None, host, user, passwd)
1494 1496 authinfo = urllib2.ProxyBasicAuthHandler(passmgr)
1495 1497
1496 1498 opener = urllib2.build_opener(proxy_handler, authinfo)
1497 1499 urllib2.install_opener(opener)
1498 1500
1499 1501 def do_cmd(self, cmd, **args):
1500 1502 self.ui.debug("sending %s command\n" % cmd)
1501 1503 q = {"cmd": cmd}
1502 1504 q.update(args)
1503 1505 qs = urllib.urlencode(q)
1504 1506 cu = "%s?%s" % (self.url, qs)
1505 1507 return urllib2.urlopen(cu)
1506 1508
1507 1509 def heads(self):
1508 1510 d = self.do_cmd("heads").read()
1509 1511 try:
1510 1512 return map(bin, d[:-1].split(" "))
1511 1513 except:
1512 1514 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
1513 1515 raise
1514 1516
1515 1517 def branches(self, nodes):
1516 1518 n = " ".join(map(hex, nodes))
1517 1519 d = self.do_cmd("branches", nodes=n).read()
1518 1520 try:
1519 1521 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
1520 1522 return br
1521 1523 except:
1522 1524 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
1523 1525 raise
1524 1526
1525 1527 def between(self, pairs):
1526 1528 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
1527 1529 d = self.do_cmd("between", pairs=n).read()
1528 1530 try:
1529 1531 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
1530 1532 return p
1531 1533 except:
1532 1534 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
1533 1535 raise
1534 1536
1535 1537 def changegroup(self, nodes):
1536 1538 n = " ".join(map(hex, nodes))
1537 1539 zd = zlib.decompressobj()
1538 1540 f = self.do_cmd("changegroup", roots=n)
1539 1541 bytes = 0
1540 1542 while 1:
1541 1543 d = f.read(4096)
1542 1544 bytes += len(d)
1543 1545 if not d:
1544 1546 yield zd.flush()
1545 1547 break
1546 1548 yield zd.decompress(d)
1547 1549 self.ui.note("%d bytes of data transfered\n" % bytes)
1548 1550
1549 1551 def repository(ui, path=None, create=0):
1550 1552 if path and path[:7] == "http://":
1551 1553 return remoterepository(ui, path)
1552 1554 if path and path[:5] == "hg://":
1553 1555 return remoterepository(ui, path.replace("hg://", "http://"))
1554 1556 if path and path[:11] == "old-http://":
1555 1557 return localrepository(ui, path.replace("old-http://", "http://"))
1556 1558 else:
1557 1559 return localrepository(ui, path, create)
1558 1560
@@ -1,18 +1,19 b''
1 1 + mkdir t
2 2 + cd t
3 3 + hg init
4 4 + echo This is file a1
5 5 + echo This is file b1
6 6 + hg add a b
7 7 + hg commit -t 'commit #0' -d '0 0' -u user
8 8 + echo This is file b22
9 9 + hg commit '-tcomment #1' -d '0 0' -u user
10 10 + hg update 0
11 11 + rm b
12 12 + hg commit -A '-tcomment #2' -d '0 0' -u user
13 13 + yes k
14 14 + hg update 1
15 15 this update spans a branch affecting the following files:
16 b
16 17 aborting update spanning branches!
17 18 (use update -m to perform a branch merge)
18 19 + exit 0
General Comments 0
You need to be logged in to leave comments. Login now