##// END OF EJS Templates
give more info to hgmerge script.
Vadim Gelfer -
r1883:b98160cf default
parent child Browse files
Show More
@@ -1,179 +1,183
1 1 #!/bin/sh
2 2 #
3 3 # hgmerge - default merge helper for Mercurial
4 4 #
5 5 # This tries to find a way to do three-way merge on the current system.
6 6 # The result ought to end up in $1.
7 #
8 # Environment variables set by Mercurial:
9 # HG_ROOT repo root
10 # HG_FILE name of file within repo
11 # HG_MY_NODE revision being merged
12 # HG_OTHER_NODE revision being merged
7 13
8 14 set -e # bail out quickly on failure
9 15
10 echo $1 $2 $3
11
12 16 LOCAL="$1"
13 17 BASE="$2"
14 18 OTHER="$3"
15 19
16 20 if [ -z "$EDITOR" ]; then
17 21 EDITOR="vi"
18 22 fi
19 23
20 24 # find decent versions of our utilities, insisting on the GNU versions where we
21 25 # need to
22 26 MERGE="merge"
23 27 DIFF3="gdiff3"
24 28 DIFF="gdiff"
25 29 PATCH="gpatch"
26 30
27 31 type "$MERGE" >/dev/null 2>&1 || MERGE=
28 32 type "$DIFF3" >/dev/null 2>&1 || DIFF3="diff3"
29 33 $DIFF3 --version >/dev/null 2>&1 || DIFF3=
30 34 type "$DIFF" >/dev/null 2>&1 || DIFF="diff"
31 35 type "$DIFF" >/dev/null 2>&1 || DIFF=
32 36 type "$PATCH" >/dev/null 2>&1 || PATCH="patch"
33 37 type "$PATCH" >/dev/null 2>&1 || PATCH=
34 38
35 39 # find optional visual utilities
36 40 FILEMERGE="/Developer/Applications/Utilities/FileMerge.app/Contents/MacOS/FileMerge"
37 41 KDIFF3="kdiff3"
38 42 TKDIFF="tkdiff"
39 43 MELD="meld"
40 44
41 45 type "$FILEMERGE" >/dev/null 2>&1 || FILEMERGE=
42 46 type "$KDIFF3" >/dev/null 2>&1 || KDIFF3=
43 47 type "$TKDIFF" >/dev/null 2>&1 || TKDIFF=
44 48 type "$MELD" >/dev/null 2>&1 || MELD=
45 49
46 50 # random part of names
47 51 RAND="$RANDOM$RANDOM"
48 52
49 53 # temporary directory for diff+patch merge
50 54 HGTMP="${TMPDIR-/tmp}/hgmerge.$RAND"
51 55
52 56 # backup file
53 57 BACKUP="$LOCAL.orig.$RAND"
54 58
55 59 # file used to test for file change
56 60 CHGTEST="$LOCAL.chg.$RAND"
57 61
58 62 # put all your required cleanup here
59 63 cleanup() {
60 64 rm -f "$BACKUP" "$CHGTEST"
61 65 rm -rf "$HGTMP"
62 66 }
63 67
64 68 # functions concerning program exit
65 69 success() {
66 70 cleanup
67 71 exit 0
68 72 }
69 73
70 74 failure() {
71 75 echo "merge failed" 1>&2
72 76 mv "$BACKUP" "$LOCAL"
73 77 cleanup
74 78 exit 1
75 79 }
76 80
77 81 # Ask if the merge was successful
78 82 ask_if_merged() {
79 83 while true; do
80 84 echo "$LOCAL seems unchanged."
81 85 echo "Was the merge successful? [y/n]"
82 86 read answer
83 87 case "$answer" in
84 88 y*|Y*) success;;
85 89 n*|N*) failure;;
86 90 esac
87 91 done
88 92 }
89 93
90 94 # Clean up when interrupted
91 95 trap "failure" 1 2 3 6 15 # HUP INT QUIT ABRT TERM
92 96
93 97 # Back up our file (and try hard to keep the mtime unchanged)
94 98 mv "$LOCAL" "$BACKUP"
95 99 cp "$BACKUP" "$LOCAL"
96 100
97 101 # Attempt to do a non-interactive merge
98 102 if [ -n "$MERGE" -o -n "$DIFF3" ]; then
99 103 if [ -n "$MERGE" ]; then
100 104 $MERGE "$LOCAL" "$BASE" "$OTHER" 2> /dev/null && success
101 105 elif [ -n "$DIFF3" ]; then
102 106 $DIFF3 -m "$BACKUP" "$BASE" "$OTHER" > "$LOCAL" && success
103 107 fi
104 108 if [ $? -gt 1 ]; then
105 109 echo "automatic merge failed! Exiting." 1>&2
106 110 failure
107 111 fi
108 112 fi
109 113
110 114 # on MacOS X try FileMerge.app, shipped with Apple's developer tools
111 115 if [ -n "$FILEMERGE" ]; then
112 116 cp "$BACKUP" "$LOCAL"
113 117 cp "$BACKUP" "$CHGTEST"
114 118 # filemerge prefers the right by default
115 119 $FILEMERGE -left "$OTHER" -right "$LOCAL" -ancestor "$BASE" -merge "$LOCAL"
116 120 [ $? -ne 0 ] && echo "FileMerge failed to launch" && failure
117 121 test "$LOCAL" -nt "$CHGTEST" && success || ask_if_merged
118 122 fi
119 123
120 124 if [ -n "$DISPLAY" ]; then
121 125 # try using kdiff3, which is fairly nice
122 126 if [ -n "$KDIFF3" ]; then
123 127 $KDIFF3 --auto "$BASE" "$BACKUP" "$OTHER" -o "$LOCAL" || failure
124 128 success
125 129 fi
126 130
127 131 # try using tkdiff, which is a bit less sophisticated
128 132 if [ -n "$TKDIFF" ]; then
129 133 $TKDIFF "$BACKUP" "$OTHER" -a "$BASE" -o "$LOCAL" || failure
130 134 success
131 135 fi
132 136
133 137 if [ -n "$MELD" ]; then
134 138 cp "$BACKUP" "$CHGTEST"
135 139 # protect our feet - meld allows us to save to the left file
136 140 cp "$BACKUP" "$LOCAL.tmp.$RAND"
137 141 # Meld doesn't have automatic merging, so to reduce intervention
138 142 # use the file with conflicts
139 143 $MELD "$LOCAL.tmp.$RAND" "$LOCAL" "$OTHER" || failure
140 144 # Also it doesn't return good error code
141 145 test "$LOCAL" -nt "$CHGTEST" && success || ask_if_merged
142 146 fi
143 147 fi
144 148
145 149 # Attempt to do a merge with $EDITOR
146 150 if [ -n "$MERGE" -o -n "$DIFF3" ]; then
147 151 echo "conflicts detected in $LOCAL"
148 152 cp "$BACKUP" "$CHGTEST"
149 153 $EDITOR "$LOCAL" || failure
150 154 # Some editors do not return meaningful error codes
151 155 # Do not take any chances
152 156 test "$LOCAL" -nt "$CHGTEST" && success || ask_if_merged
153 157 fi
154 158
155 159 # attempt to manually merge with diff and patch
156 160 if [ -n "$DIFF" -a -n "$PATCH" ]; then
157 161
158 162 (umask 077 && mkdir "$HGTMP") || {
159 163 echo "Could not create temporary directory $HGTMP" 1>&2
160 164 failure
161 165 }
162 166
163 167 $DIFF -u "$BASE" "$OTHER" > "$HGTMP/diff" || :
164 168 if $PATCH "$LOCAL" < "$HGTMP/diff"; then
165 169 success
166 170 else
167 171 # If rejects are empty after using the editor, merge was ok
168 172 $EDITOR "$LOCAL" "$LOCAL.rej" || failure
169 173 test -s "$LOCAL.rej" || success
170 174 fi
171 175 failure
172 176 fi
173 177
174 178 echo
175 179 echo "hgmerge: unable to find any merge utility!"
176 180 echo "supported programs:"
177 181 echo "merge, FileMerge, tkdiff, kdiff3, meld, diff+patch"
178 182 echo
179 183 failure
@@ -1,1887 +1,1896
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import struct, os, util
9 9 import filelog, manifest, changelog, dirstate, repo
10 10 from node import *
11 11 from i18n import gettext as _
12 12 from demandload import *
13 13 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14 14
15 15 class localrepository(object):
16 16 def __del__(self):
17 17 self.transhandle = None
18 18 def __init__(self, parentui, path=None, create=0):
19 19 if not path:
20 20 p = os.getcwd()
21 21 while not os.path.isdir(os.path.join(p, ".hg")):
22 22 oldp = p
23 23 p = os.path.dirname(p)
24 24 if p == oldp:
25 25 raise repo.RepoError(_("no repo found"))
26 26 path = p
27 27 self.path = os.path.join(path, ".hg")
28 28
29 29 if not create and not os.path.isdir(self.path):
30 30 raise repo.RepoError(_("repository %s not found") % path)
31 31
32 32 self.root = os.path.abspath(path)
33 33 self.ui = ui.ui(parentui=parentui)
34 34 self.opener = util.opener(self.path)
35 35 self.wopener = util.opener(self.root)
36 36 self.manifest = manifest.manifest(self.opener)
37 37 self.changelog = changelog.changelog(self.opener)
38 38 self.tagscache = None
39 39 self.nodetagscache = None
40 40 self.encodepats = None
41 41 self.decodepats = None
42 42 self.transhandle = None
43 43
44 44 if create:
45 45 os.mkdir(self.path)
46 46 os.mkdir(self.join("data"))
47 47
48 48 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
49 49 try:
50 50 self.ui.readconfig(self.join("hgrc"))
51 51 except IOError:
52 52 pass
53 53
54 54 def hook(self, name, throw=False, **args):
55 55 def runhook(name, cmd):
56 56 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
57 57 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
58 58 r = util.system(cmd, environ=env, cwd=self.root)
59 59 if r:
60 60 desc, r = util.explain_exit(r)
61 61 if throw:
62 62 raise util.Abort(_('%s hook %s') % (name, desc))
63 63 self.ui.warn(_('error: %s hook %s\n') % (name, desc))
64 64 return False
65 65 return True
66 66
67 67 r = True
68 68 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
69 69 if hname.split(".", 1)[0] == name and cmd]
70 70 hooks.sort()
71 71 for hname, cmd in hooks:
72 72 r = runhook(hname, cmd) and r
73 73 return r
74 74
75 75 def tags(self):
76 76 '''return a mapping of tag to node'''
77 77 if not self.tagscache:
78 78 self.tagscache = {}
79 79 def addtag(self, k, n):
80 80 try:
81 81 bin_n = bin(n)
82 82 except TypeError:
83 83 bin_n = ''
84 84 self.tagscache[k.strip()] = bin_n
85 85
86 86 try:
87 87 # read each head of the tags file, ending with the tip
88 88 # and add each tag found to the map, with "newer" ones
89 89 # taking precedence
90 90 fl = self.file(".hgtags")
91 91 h = fl.heads()
92 92 h.reverse()
93 93 for r in h:
94 94 for l in fl.read(r).splitlines():
95 95 if l:
96 96 n, k = l.split(" ", 1)
97 97 addtag(self, k, n)
98 98 except KeyError:
99 99 pass
100 100
101 101 try:
102 102 f = self.opener("localtags")
103 103 for l in f:
104 104 n, k = l.split(" ", 1)
105 105 addtag(self, k, n)
106 106 except IOError:
107 107 pass
108 108
109 109 self.tagscache['tip'] = self.changelog.tip()
110 110
111 111 return self.tagscache
112 112
113 113 def tagslist(self):
114 114 '''return a list of tags ordered by revision'''
115 115 l = []
116 116 for t, n in self.tags().items():
117 117 try:
118 118 r = self.changelog.rev(n)
119 119 except:
120 120 r = -2 # sort to the beginning of the list if unknown
121 121 l.append((r, t, n))
122 122 l.sort()
123 123 return [(t, n) for r, t, n in l]
124 124
125 125 def nodetags(self, node):
126 126 '''return the tags associated with a node'''
127 127 if not self.nodetagscache:
128 128 self.nodetagscache = {}
129 129 for t, n in self.tags().items():
130 130 self.nodetagscache.setdefault(n, []).append(t)
131 131 return self.nodetagscache.get(node, [])
132 132
133 133 def lookup(self, key):
134 134 try:
135 135 return self.tags()[key]
136 136 except KeyError:
137 137 try:
138 138 return self.changelog.lookup(key)
139 139 except:
140 140 raise repo.RepoError(_("unknown revision '%s'") % key)
141 141
142 142 def dev(self):
143 143 return os.stat(self.path).st_dev
144 144
145 145 def local(self):
146 146 return True
147 147
148 148 def join(self, f):
149 149 return os.path.join(self.path, f)
150 150
151 151 def wjoin(self, f):
152 152 return os.path.join(self.root, f)
153 153
154 154 def file(self, f):
155 155 if f[0] == '/':
156 156 f = f[1:]
157 157 return filelog.filelog(self.opener, f)
158 158
159 159 def getcwd(self):
160 160 return self.dirstate.getcwd()
161 161
162 162 def wfile(self, f, mode='r'):
163 163 return self.wopener(f, mode)
164 164
165 165 def wread(self, filename):
166 166 if self.encodepats == None:
167 167 l = []
168 168 for pat, cmd in self.ui.configitems("encode"):
169 169 mf = util.matcher("", "/", [pat], [], [])[1]
170 170 l.append((mf, cmd))
171 171 self.encodepats = l
172 172
173 173 data = self.wopener(filename, 'r').read()
174 174
175 175 for mf, cmd in self.encodepats:
176 176 if mf(filename):
177 177 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
178 178 data = util.filter(data, cmd)
179 179 break
180 180
181 181 return data
182 182
183 183 def wwrite(self, filename, data, fd=None):
184 184 if self.decodepats == None:
185 185 l = []
186 186 for pat, cmd in self.ui.configitems("decode"):
187 187 mf = util.matcher("", "/", [pat], [], [])[1]
188 188 l.append((mf, cmd))
189 189 self.decodepats = l
190 190
191 191 for mf, cmd in self.decodepats:
192 192 if mf(filename):
193 193 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
194 194 data = util.filter(data, cmd)
195 195 break
196 196
197 197 if fd:
198 198 return fd.write(data)
199 199 return self.wopener(filename, 'w').write(data)
200 200
201 201 def transaction(self):
202 202 tr = self.transhandle
203 203 if tr != None and tr.running():
204 204 return tr.nest()
205 205
206 206 # save dirstate for undo
207 207 try:
208 208 ds = self.opener("dirstate").read()
209 209 except IOError:
210 210 ds = ""
211 211 self.opener("journal.dirstate", "w").write(ds)
212 212
213 213 tr = transaction.transaction(self.ui.warn, self.opener,
214 214 self.join("journal"),
215 215 aftertrans(self.path))
216 216 self.transhandle = tr
217 217 return tr
218 218
219 219 def recover(self):
220 220 l = self.lock()
221 221 if os.path.exists(self.join("journal")):
222 222 self.ui.status(_("rolling back interrupted transaction\n"))
223 223 transaction.rollback(self.opener, self.join("journal"))
224 224 self.reload()
225 225 return True
226 226 else:
227 227 self.ui.warn(_("no interrupted transaction available\n"))
228 228 return False
229 229
230 230 def undo(self, wlock=None):
231 231 if not wlock:
232 232 wlock = self.wlock()
233 233 l = self.lock()
234 234 if os.path.exists(self.join("undo")):
235 235 self.ui.status(_("rolling back last transaction\n"))
236 236 transaction.rollback(self.opener, self.join("undo"))
237 237 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
238 238 self.reload()
239 239 self.wreload()
240 240 else:
241 241 self.ui.warn(_("no undo information available\n"))
242 242
243 243 def wreload(self):
244 244 self.dirstate.read()
245 245
246 246 def reload(self):
247 247 self.changelog.load()
248 248 self.manifest.load()
249 249 self.tagscache = None
250 250 self.nodetagscache = None
251 251
252 252 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None):
253 253 try:
254 254 l = lock.lock(self.join(lockname), 0, releasefn)
255 255 except lock.LockHeld, inst:
256 256 if not wait:
257 257 raise inst
258 258 self.ui.warn(_("waiting for lock held by %s\n") % inst.args[0])
259 259 try:
260 260 # default to 600 seconds timeout
261 261 l = lock.lock(self.join(lockname),
262 262 int(self.ui.config("ui", "timeout") or 600),
263 263 releasefn)
264 264 except lock.LockHeld, inst:
265 265 raise util.Abort(_("timeout while waiting for "
266 266 "lock held by %s") % inst.args[0])
267 267 if acquirefn:
268 268 acquirefn()
269 269 return l
270 270
271 271 def lock(self, wait=1):
272 272 return self.do_lock("lock", wait, acquirefn=self.reload)
273 273
274 274 def wlock(self, wait=1):
275 275 return self.do_lock("wlock", wait,
276 276 self.dirstate.write,
277 277 self.wreload)
278 278
279 279 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
280 280 "determine whether a new filenode is needed"
281 281 fp1 = manifest1.get(filename, nullid)
282 282 fp2 = manifest2.get(filename, nullid)
283 283
284 284 if fp2 != nullid:
285 285 # is one parent an ancestor of the other?
286 286 fpa = filelog.ancestor(fp1, fp2)
287 287 if fpa == fp1:
288 288 fp1, fp2 = fp2, nullid
289 289 elif fpa == fp2:
290 290 fp2 = nullid
291 291
292 292 # is the file unmodified from the parent? report existing entry
293 293 if fp2 == nullid and text == filelog.read(fp1):
294 294 return (fp1, None, None)
295 295
296 296 return (None, fp1, fp2)
297 297
298 298 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
299 299 orig_parent = self.dirstate.parents()[0] or nullid
300 300 p1 = p1 or self.dirstate.parents()[0] or nullid
301 301 p2 = p2 or self.dirstate.parents()[1] or nullid
302 302 c1 = self.changelog.read(p1)
303 303 c2 = self.changelog.read(p2)
304 304 m1 = self.manifest.read(c1[0])
305 305 mf1 = self.manifest.readflags(c1[0])
306 306 m2 = self.manifest.read(c2[0])
307 307 changed = []
308 308
309 309 if orig_parent == p1:
310 310 update_dirstate = 1
311 311 else:
312 312 update_dirstate = 0
313 313
314 314 if not wlock:
315 315 wlock = self.wlock()
316 316 l = self.lock()
317 317 tr = self.transaction()
318 318 mm = m1.copy()
319 319 mfm = mf1.copy()
320 320 linkrev = self.changelog.count()
321 321 for f in files:
322 322 try:
323 323 t = self.wread(f)
324 324 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
325 325 r = self.file(f)
326 326 mfm[f] = tm
327 327
328 328 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
329 329 if entry:
330 330 mm[f] = entry
331 331 continue
332 332
333 333 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
334 334 changed.append(f)
335 335 if update_dirstate:
336 336 self.dirstate.update([f], "n")
337 337 except IOError:
338 338 try:
339 339 del mm[f]
340 340 del mfm[f]
341 341 if update_dirstate:
342 342 self.dirstate.forget([f])
343 343 except:
344 344 # deleted from p2?
345 345 pass
346 346
347 347 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
348 348 user = user or self.ui.username()
349 349 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
350 350 tr.close()
351 351 if update_dirstate:
352 352 self.dirstate.setparents(n, nullid)
353 353
354 354 def commit(self, files=None, text="", user=None, date=None,
355 355 match=util.always, force=False, lock=None, wlock=None):
356 356 commit = []
357 357 remove = []
358 358 changed = []
359 359
360 360 if files:
361 361 for f in files:
362 362 s = self.dirstate.state(f)
363 363 if s in 'nmai':
364 364 commit.append(f)
365 365 elif s == 'r':
366 366 remove.append(f)
367 367 else:
368 368 self.ui.warn(_("%s not tracked!\n") % f)
369 369 else:
370 370 modified, added, removed, deleted, unknown = self.changes(match=match)
371 371 commit = modified + added
372 372 remove = removed
373 373
374 374 p1, p2 = self.dirstate.parents()
375 375 c1 = self.changelog.read(p1)
376 376 c2 = self.changelog.read(p2)
377 377 m1 = self.manifest.read(c1[0])
378 378 mf1 = self.manifest.readflags(c1[0])
379 379 m2 = self.manifest.read(c2[0])
380 380
381 381 if not commit and not remove and not force and p2 == nullid:
382 382 self.ui.status(_("nothing changed\n"))
383 383 return None
384 384
385 385 xp1 = hex(p1)
386 386 if p2 == nullid: xp2 = ''
387 387 else: xp2 = hex(p2)
388 388
389 389 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
390 390
391 391 if not wlock:
392 392 wlock = self.wlock()
393 393 if not lock:
394 394 lock = self.lock()
395 395 tr = self.transaction()
396 396
397 397 # check in files
398 398 new = {}
399 399 linkrev = self.changelog.count()
400 400 commit.sort()
401 401 for f in commit:
402 402 self.ui.note(f + "\n")
403 403 try:
404 404 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
405 405 t = self.wread(f)
406 406 except IOError:
407 407 self.ui.warn(_("trouble committing %s!\n") % f)
408 408 raise
409 409
410 410 r = self.file(f)
411 411
412 412 meta = {}
413 413 cp = self.dirstate.copied(f)
414 414 if cp:
415 415 meta["copy"] = cp
416 416 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
417 417 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
418 418 fp1, fp2 = nullid, nullid
419 419 else:
420 420 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
421 421 if entry:
422 422 new[f] = entry
423 423 continue
424 424
425 425 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
426 426 # remember what we've added so that we can later calculate
427 427 # the files to pull from a set of changesets
428 428 changed.append(f)
429 429
430 430 # update manifest
431 431 m1 = m1.copy()
432 432 m1.update(new)
433 433 for f in remove:
434 434 if f in m1:
435 435 del m1[f]
436 436 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
437 437 (new, remove))
438 438
439 439 # add changeset
440 440 new = new.keys()
441 441 new.sort()
442 442
443 443 if not text:
444 444 edittext = [""]
445 445 if p2 != nullid:
446 446 edittext.append("HG: branch merge")
447 447 edittext.extend(["HG: changed %s" % f for f in changed])
448 448 edittext.extend(["HG: removed %s" % f for f in remove])
449 449 if not changed and not remove:
450 450 edittext.append("HG: no files changed")
451 451 edittext.append("")
452 452 # run editor in the repository root
453 453 olddir = os.getcwd()
454 454 os.chdir(self.root)
455 455 edittext = self.ui.edit("\n".join(edittext))
456 456 os.chdir(olddir)
457 457 if not edittext.rstrip():
458 458 return None
459 459 text = edittext
460 460
461 461 user = user or self.ui.username()
462 462 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
463 463 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
464 464 parent2=xp2)
465 465 tr.close()
466 466
467 467 self.dirstate.setparents(n)
468 468 self.dirstate.update(new, "n")
469 469 self.dirstate.forget(remove)
470 470
471 471 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
472 472 return n
473 473
474 474 def walk(self, node=None, files=[], match=util.always):
475 475 if node:
476 476 fdict = dict.fromkeys(files)
477 477 for fn in self.manifest.read(self.changelog.read(node)[0]):
478 478 fdict.pop(fn, None)
479 479 if match(fn):
480 480 yield 'm', fn
481 481 for fn in fdict:
482 482 self.ui.warn(_('%s: No such file in rev %s\n') % (
483 483 util.pathto(self.getcwd(), fn), short(node)))
484 484 else:
485 485 for src, fn in self.dirstate.walk(files, match):
486 486 yield src, fn
487 487
488 488 def changes(self, node1=None, node2=None, files=[], match=util.always,
489 489 wlock=None):
490 490 """return changes between two nodes or node and working directory
491 491
492 492 If node1 is None, use the first dirstate parent instead.
493 493 If node2 is None, compare node1 with working directory.
494 494 """
495 495
496 496 def fcmp(fn, mf):
497 497 t1 = self.wread(fn)
498 498 t2 = self.file(fn).read(mf.get(fn, nullid))
499 499 return cmp(t1, t2)
500 500
501 501 def mfmatches(node):
502 502 change = self.changelog.read(node)
503 503 mf = dict(self.manifest.read(change[0]))
504 504 for fn in mf.keys():
505 505 if not match(fn):
506 506 del mf[fn]
507 507 return mf
508 508
509 509 if node1:
510 510 # read the manifest from node1 before the manifest from node2,
511 511 # so that we'll hit the manifest cache if we're going through
512 512 # all the revisions in parent->child order.
513 513 mf1 = mfmatches(node1)
514 514
515 515 # are we comparing the working directory?
516 516 if not node2:
517 517 if not wlock:
518 518 try:
519 519 wlock = self.wlock(wait=0)
520 520 except lock.LockException:
521 521 wlock = None
522 522 lookup, modified, added, removed, deleted, unknown = (
523 523 self.dirstate.changes(files, match))
524 524
525 525 # are we comparing working dir against its parent?
526 526 if not node1:
527 527 if lookup:
528 528 # do a full compare of any files that might have changed
529 529 mf2 = mfmatches(self.dirstate.parents()[0])
530 530 for f in lookup:
531 531 if fcmp(f, mf2):
532 532 modified.append(f)
533 533 elif wlock is not None:
534 534 self.dirstate.update([f], "n")
535 535 else:
536 536 # we are comparing working dir against non-parent
537 537 # generate a pseudo-manifest for the working dir
538 538 mf2 = mfmatches(self.dirstate.parents()[0])
539 539 for f in lookup + modified + added:
540 540 mf2[f] = ""
541 541 for f in removed:
542 542 if f in mf2:
543 543 del mf2[f]
544 544 else:
545 545 # we are comparing two revisions
546 546 deleted, unknown = [], []
547 547 mf2 = mfmatches(node2)
548 548
549 549 if node1:
550 550 # flush lists from dirstate before comparing manifests
551 551 modified, added = [], []
552 552
553 553 for fn in mf2:
554 554 if mf1.has_key(fn):
555 555 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
556 556 modified.append(fn)
557 557 del mf1[fn]
558 558 else:
559 559 added.append(fn)
560 560
561 561 removed = mf1.keys()
562 562
563 563 # sort and return results:
564 564 for l in modified, added, removed, deleted, unknown:
565 565 l.sort()
566 566 return (modified, added, removed, deleted, unknown)
567 567
568 568 def add(self, list, wlock=None):
569 569 if not wlock:
570 570 wlock = self.wlock()
571 571 for f in list:
572 572 p = self.wjoin(f)
573 573 if not os.path.exists(p):
574 574 self.ui.warn(_("%s does not exist!\n") % f)
575 575 elif not os.path.isfile(p):
576 576 self.ui.warn(_("%s not added: only files supported currently\n")
577 577 % f)
578 578 elif self.dirstate.state(f) in 'an':
579 579 self.ui.warn(_("%s already tracked!\n") % f)
580 580 else:
581 581 self.dirstate.update([f], "a")
582 582
583 583 def forget(self, list, wlock=None):
584 584 if not wlock:
585 585 wlock = self.wlock()
586 586 for f in list:
587 587 if self.dirstate.state(f) not in 'ai':
588 588 self.ui.warn(_("%s not added!\n") % f)
589 589 else:
590 590 self.dirstate.forget([f])
591 591
592 592 def remove(self, list, unlink=False, wlock=None):
593 593 if unlink:
594 594 for f in list:
595 595 try:
596 596 util.unlink(self.wjoin(f))
597 597 except OSError, inst:
598 598 if inst.errno != errno.ENOENT:
599 599 raise
600 600 if not wlock:
601 601 wlock = self.wlock()
602 602 for f in list:
603 603 p = self.wjoin(f)
604 604 if os.path.exists(p):
605 605 self.ui.warn(_("%s still exists!\n") % f)
606 606 elif self.dirstate.state(f) == 'a':
607 607 self.dirstate.forget([f])
608 608 elif f not in self.dirstate:
609 609 self.ui.warn(_("%s not tracked!\n") % f)
610 610 else:
611 611 self.dirstate.update([f], "r")
612 612
613 613 def undelete(self, list, wlock=None):
614 614 p = self.dirstate.parents()[0]
615 615 mn = self.changelog.read(p)[0]
616 616 mf = self.manifest.readflags(mn)
617 617 m = self.manifest.read(mn)
618 618 if not wlock:
619 619 wlock = self.wlock()
620 620 for f in list:
621 621 if self.dirstate.state(f) not in "r":
622 622 self.ui.warn("%s not removed!\n" % f)
623 623 else:
624 624 t = self.file(f).read(m[f])
625 625 self.wwrite(f, t)
626 626 util.set_exec(self.wjoin(f), mf[f])
627 627 self.dirstate.update([f], "n")
628 628
629 629 def copy(self, source, dest, wlock=None):
630 630 p = self.wjoin(dest)
631 631 if not os.path.exists(p):
632 632 self.ui.warn(_("%s does not exist!\n") % dest)
633 633 elif not os.path.isfile(p):
634 634 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
635 635 else:
636 636 if not wlock:
637 637 wlock = self.wlock()
638 638 if self.dirstate.state(dest) == '?':
639 639 self.dirstate.update([dest], "a")
640 640 self.dirstate.copy(source, dest)
641 641
642 642 def heads(self, start=None):
643 643 heads = self.changelog.heads(start)
644 644 # sort the output in rev descending order
645 645 heads = [(-self.changelog.rev(h), h) for h in heads]
646 646 heads.sort()
647 647 return [n for (r, n) in heads]
648 648
649 649 # branchlookup returns a dict giving a list of branches for
650 650 # each head. A branch is defined as the tag of a node or
651 651 # the branch of the node's parents. If a node has multiple
652 652 # branch tags, tags are eliminated if they are visible from other
653 653 # branch tags.
654 654 #
655 655 # So, for this graph: a->b->c->d->e
656 656 # \ /
657 657 # aa -----/
658 658 # a has tag 2.6.12
659 659 # d has tag 2.6.13
660 660 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
661 661 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
662 662 # from the list.
663 663 #
664 664 # It is possible that more than one head will have the same branch tag.
665 665 # callers need to check the result for multiple heads under the same
666 666 # branch tag if that is a problem for them (ie checkout of a specific
667 667 # branch).
668 668 #
669 669 # passing in a specific branch will limit the depth of the search
670 670 # through the parents. It won't limit the branches returned in the
671 671 # result though.
672 672 def branchlookup(self, heads=None, branch=None):
673 673 if not heads:
674 674 heads = self.heads()
675 675 headt = [ h for h in heads ]
676 676 chlog = self.changelog
677 677 branches = {}
678 678 merges = []
679 679 seenmerge = {}
680 680
681 681 # traverse the tree once for each head, recording in the branches
682 682 # dict which tags are visible from this head. The branches
683 683 # dict also records which tags are visible from each tag
684 684 # while we traverse.
685 685 while headt or merges:
686 686 if merges:
687 687 n, found = merges.pop()
688 688 visit = [n]
689 689 else:
690 690 h = headt.pop()
691 691 visit = [h]
692 692 found = [h]
693 693 seen = {}
694 694 while visit:
695 695 n = visit.pop()
696 696 if n in seen:
697 697 continue
698 698 pp = chlog.parents(n)
699 699 tags = self.nodetags(n)
700 700 if tags:
701 701 for x in tags:
702 702 if x == 'tip':
703 703 continue
704 704 for f in found:
705 705 branches.setdefault(f, {})[n] = 1
706 706 branches.setdefault(n, {})[n] = 1
707 707 break
708 708 if n not in found:
709 709 found.append(n)
710 710 if branch in tags:
711 711 continue
712 712 seen[n] = 1
713 713 if pp[1] != nullid and n not in seenmerge:
714 714 merges.append((pp[1], [x for x in found]))
715 715 seenmerge[n] = 1
716 716 if pp[0] != nullid:
717 717 visit.append(pp[0])
718 718 # traverse the branches dict, eliminating branch tags from each
719 719 # head that are visible from another branch tag for that head.
720 720 out = {}
721 721 viscache = {}
722 722 for h in heads:
723 723 def visible(node):
724 724 if node in viscache:
725 725 return viscache[node]
726 726 ret = {}
727 727 visit = [node]
728 728 while visit:
729 729 x = visit.pop()
730 730 if x in viscache:
731 731 ret.update(viscache[x])
732 732 elif x not in ret:
733 733 ret[x] = 1
734 734 if x in branches:
735 735 visit[len(visit):] = branches[x].keys()
736 736 viscache[node] = ret
737 737 return ret
738 738 if h not in branches:
739 739 continue
740 740 # O(n^2), but somewhat limited. This only searches the
741 741 # tags visible from a specific head, not all the tags in the
742 742 # whole repo.
743 743 for b in branches[h]:
744 744 vis = False
745 745 for bb in branches[h].keys():
746 746 if b != bb:
747 747 if b in visible(bb):
748 748 vis = True
749 749 break
750 750 if not vis:
751 751 l = out.setdefault(h, [])
752 752 l[len(l):] = self.nodetags(b)
753 753 return out
754 754
755 755 def branches(self, nodes):
756 756 if not nodes:
757 757 nodes = [self.changelog.tip()]
758 758 b = []
759 759 for n in nodes:
760 760 t = n
761 761 while n:
762 762 p = self.changelog.parents(n)
763 763 if p[1] != nullid or p[0] == nullid:
764 764 b.append((t, n, p[0], p[1]))
765 765 break
766 766 n = p[0]
767 767 return b
768 768
769 769 def between(self, pairs):
770 770 r = []
771 771
772 772 for top, bottom in pairs:
773 773 n, l, i = top, [], 0
774 774 f = 1
775 775
776 776 while n != bottom:
777 777 p = self.changelog.parents(n)[0]
778 778 if i == f:
779 779 l.append(n)
780 780 f = f * 2
781 781 n = p
782 782 i += 1
783 783
784 784 r.append(l)
785 785
786 786 return r
787 787
788 788 def findincoming(self, remote, base=None, heads=None):
789 789 m = self.changelog.nodemap
790 790 search = []
791 791 fetch = {}
792 792 seen = {}
793 793 seenbranch = {}
794 794 if base == None:
795 795 base = {}
796 796
797 797 # assume we're closer to the tip than the root
798 798 # and start by examining the heads
799 799 self.ui.status(_("searching for changes\n"))
800 800
801 801 if not heads:
802 802 heads = remote.heads()
803 803
804 804 unknown = []
805 805 for h in heads:
806 806 if h not in m:
807 807 unknown.append(h)
808 808 else:
809 809 base[h] = 1
810 810
811 811 if not unknown:
812 812 return None
813 813
814 814 rep = {}
815 815 reqcnt = 0
816 816
817 817 # search through remote branches
818 818 # a 'branch' here is a linear segment of history, with four parts:
819 819 # head, root, first parent, second parent
820 820 # (a branch always has two parents (or none) by definition)
821 821 unknown = remote.branches(unknown)
822 822 while unknown:
823 823 r = []
824 824 while unknown:
825 825 n = unknown.pop(0)
826 826 if n[0] in seen:
827 827 continue
828 828
829 829 self.ui.debug(_("examining %s:%s\n")
830 830 % (short(n[0]), short(n[1])))
831 831 if n[0] == nullid:
832 832 break
833 833 if n in seenbranch:
834 834 self.ui.debug(_("branch already found\n"))
835 835 continue
836 836 if n[1] and n[1] in m: # do we know the base?
837 837 self.ui.debug(_("found incomplete branch %s:%s\n")
838 838 % (short(n[0]), short(n[1])))
839 839 search.append(n) # schedule branch range for scanning
840 840 seenbranch[n] = 1
841 841 else:
842 842 if n[1] not in seen and n[1] not in fetch:
843 843 if n[2] in m and n[3] in m:
844 844 self.ui.debug(_("found new changeset %s\n") %
845 845 short(n[1]))
846 846 fetch[n[1]] = 1 # earliest unknown
847 847 base[n[2]] = 1 # latest known
848 848 continue
849 849
850 850 for a in n[2:4]:
851 851 if a not in rep:
852 852 r.append(a)
853 853 rep[a] = 1
854 854
855 855 seen[n[0]] = 1
856 856
857 857 if r:
858 858 reqcnt += 1
859 859 self.ui.debug(_("request %d: %s\n") %
860 860 (reqcnt, " ".join(map(short, r))))
861 861 for p in range(0, len(r), 10):
862 862 for b in remote.branches(r[p:p+10]):
863 863 self.ui.debug(_("received %s:%s\n") %
864 864 (short(b[0]), short(b[1])))
865 865 if b[0] in m:
866 866 self.ui.debug(_("found base node %s\n")
867 867 % short(b[0]))
868 868 base[b[0]] = 1
869 869 elif b[0] not in seen:
870 870 unknown.append(b)
871 871
872 872 # do binary search on the branches we found
873 873 while search:
874 874 n = search.pop(0)
875 875 reqcnt += 1
876 876 l = remote.between([(n[0], n[1])])[0]
877 877 l.append(n[1])
878 878 p = n[0]
879 879 f = 1
880 880 for i in l:
881 881 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
882 882 if i in m:
883 883 if f <= 2:
884 884 self.ui.debug(_("found new branch changeset %s\n") %
885 885 short(p))
886 886 fetch[p] = 1
887 887 base[i] = 1
888 888 else:
889 889 self.ui.debug(_("narrowed branch search to %s:%s\n")
890 890 % (short(p), short(i)))
891 891 search.append((p, i))
892 892 break
893 893 p, f = i, f * 2
894 894
895 895 # sanity check our fetch list
896 896 for f in fetch.keys():
897 897 if f in m:
898 898 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
899 899
900 900 if base.keys() == [nullid]:
901 901 self.ui.warn(_("warning: pulling from an unrelated repository!\n"))
902 902
903 903 self.ui.note(_("found new changesets starting at ") +
904 904 " ".join([short(f) for f in fetch]) + "\n")
905 905
906 906 self.ui.debug(_("%d total queries\n") % reqcnt)
907 907
908 908 return fetch.keys()
909 909
910 910 def findoutgoing(self, remote, base=None, heads=None):
911 911 if base == None:
912 912 base = {}
913 913 self.findincoming(remote, base, heads)
914 914
915 915 self.ui.debug(_("common changesets up to ")
916 916 + " ".join(map(short, base.keys())) + "\n")
917 917
918 918 remain = dict.fromkeys(self.changelog.nodemap)
919 919
920 920 # prune everything remote has from the tree
921 921 del remain[nullid]
922 922 remove = base.keys()
923 923 while remove:
924 924 n = remove.pop(0)
925 925 if n in remain:
926 926 del remain[n]
927 927 for p in self.changelog.parents(n):
928 928 remove.append(p)
929 929
930 930 # find every node whose parents have been pruned
931 931 subset = []
932 932 for n in remain:
933 933 p1, p2 = self.changelog.parents(n)
934 934 if p1 not in remain and p2 not in remain:
935 935 subset.append(n)
936 936
937 937 # this is the set of all roots we have to push
938 938 return subset
939 939
940 940 def pull(self, remote, heads=None):
941 941 l = self.lock()
942 942
943 943 # if we have an empty repo, fetch everything
944 944 if self.changelog.tip() == nullid:
945 945 self.ui.status(_("requesting all changes\n"))
946 946 fetch = [nullid]
947 947 else:
948 948 fetch = self.findincoming(remote)
949 949
950 950 if not fetch:
951 951 self.ui.status(_("no changes found\n"))
952 952 return 1
953 953
954 954 if heads is None:
955 955 cg = remote.changegroup(fetch, 'pull')
956 956 else:
957 957 cg = remote.changegroupsubset(fetch, heads, 'pull')
958 958 return self.addchangegroup(cg)
959 959
960 960 def push(self, remote, force=False, revs=None):
961 961 lock = remote.lock()
962 962
963 963 base = {}
964 964 heads = remote.heads()
965 965 inc = self.findincoming(remote, base, heads)
966 966 if not force and inc:
967 967 self.ui.warn(_("abort: unsynced remote changes!\n"))
968 968 self.ui.status(_("(did you forget to sync? use push -f to force)\n"))
969 969 return 1
970 970
971 971 update = self.findoutgoing(remote, base)
972 972 if revs is not None:
973 973 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
974 974 else:
975 975 bases, heads = update, self.changelog.heads()
976 976
977 977 if not bases:
978 978 self.ui.status(_("no changes found\n"))
979 979 return 1
980 980 elif not force:
981 981 if len(bases) < len(heads):
982 982 self.ui.warn(_("abort: push creates new remote branches!\n"))
983 983 self.ui.status(_("(did you forget to merge?"
984 984 " use push -f to force)\n"))
985 985 return 1
986 986
987 987 if revs is None:
988 988 cg = self.changegroup(update, 'push')
989 989 else:
990 990 cg = self.changegroupsubset(update, revs, 'push')
991 991 return remote.addchangegroup(cg)
992 992
993 993 def changegroupsubset(self, bases, heads, source):
994 994 """This function generates a changegroup consisting of all the nodes
995 995 that are descendents of any of the bases, and ancestors of any of
996 996 the heads.
997 997
998 998 It is fairly complex as determining which filenodes and which
999 999 manifest nodes need to be included for the changeset to be complete
1000 1000 is non-trivial.
1001 1001
1002 1002 Another wrinkle is doing the reverse, figuring out which changeset in
1003 1003 the changegroup a particular filenode or manifestnode belongs to."""
1004 1004
1005 1005 self.hook('preoutgoing', throw=True, source=source)
1006 1006
1007 1007 # Set up some initial variables
1008 1008 # Make it easy to refer to self.changelog
1009 1009 cl = self.changelog
1010 1010 # msng is short for missing - compute the list of changesets in this
1011 1011 # changegroup.
1012 1012 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1013 1013 # Some bases may turn out to be superfluous, and some heads may be
1014 1014 # too. nodesbetween will return the minimal set of bases and heads
1015 1015 # necessary to re-create the changegroup.
1016 1016
1017 1017 # Known heads are the list of heads that it is assumed the recipient
1018 1018 # of this changegroup will know about.
1019 1019 knownheads = {}
1020 1020 # We assume that all parents of bases are known heads.
1021 1021 for n in bases:
1022 1022 for p in cl.parents(n):
1023 1023 if p != nullid:
1024 1024 knownheads[p] = 1
1025 1025 knownheads = knownheads.keys()
1026 1026 if knownheads:
1027 1027 # Now that we know what heads are known, we can compute which
1028 1028 # changesets are known. The recipient must know about all
1029 1029 # changesets required to reach the known heads from the null
1030 1030 # changeset.
1031 1031 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1032 1032 junk = None
1033 1033 # Transform the list into an ersatz set.
1034 1034 has_cl_set = dict.fromkeys(has_cl_set)
1035 1035 else:
1036 1036 # If there were no known heads, the recipient cannot be assumed to
1037 1037 # know about any changesets.
1038 1038 has_cl_set = {}
1039 1039
1040 1040 # Make it easy to refer to self.manifest
1041 1041 mnfst = self.manifest
1042 1042 # We don't know which manifests are missing yet
1043 1043 msng_mnfst_set = {}
1044 1044 # Nor do we know which filenodes are missing.
1045 1045 msng_filenode_set = {}
1046 1046
1047 1047 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1048 1048 junk = None
1049 1049
1050 1050 # A changeset always belongs to itself, so the changenode lookup
1051 1051 # function for a changenode is identity.
1052 1052 def identity(x):
1053 1053 return x
1054 1054
1055 1055 # A function generating function. Sets up an environment for the
1056 1056 # inner function.
1057 1057 def cmp_by_rev_func(revlog):
1058 1058 # Compare two nodes by their revision number in the environment's
1059 1059 # revision history. Since the revision number both represents the
1060 1060 # most efficient order to read the nodes in, and represents a
1061 1061 # topological sorting of the nodes, this function is often useful.
1062 1062 def cmp_by_rev(a, b):
1063 1063 return cmp(revlog.rev(a), revlog.rev(b))
1064 1064 return cmp_by_rev
1065 1065
1066 1066 # If we determine that a particular file or manifest node must be a
1067 1067 # node that the recipient of the changegroup will already have, we can
1068 1068 # also assume the recipient will have all the parents. This function
1069 1069 # prunes them from the set of missing nodes.
1070 1070 def prune_parents(revlog, hasset, msngset):
1071 1071 haslst = hasset.keys()
1072 1072 haslst.sort(cmp_by_rev_func(revlog))
1073 1073 for node in haslst:
1074 1074 parentlst = [p for p in revlog.parents(node) if p != nullid]
1075 1075 while parentlst:
1076 1076 n = parentlst.pop()
1077 1077 if n not in hasset:
1078 1078 hasset[n] = 1
1079 1079 p = [p for p in revlog.parents(n) if p != nullid]
1080 1080 parentlst.extend(p)
1081 1081 for n in hasset:
1082 1082 msngset.pop(n, None)
1083 1083
1084 1084 # This is a function generating function used to set up an environment
1085 1085 # for the inner function to execute in.
1086 1086 def manifest_and_file_collector(changedfileset):
1087 1087 # This is an information gathering function that gathers
1088 1088 # information from each changeset node that goes out as part of
1089 1089 # the changegroup. The information gathered is a list of which
1090 1090 # manifest nodes are potentially required (the recipient may
1091 1091 # already have them) and total list of all files which were
1092 1092 # changed in any changeset in the changegroup.
1093 1093 #
1094 1094 # We also remember the first changenode we saw any manifest
1095 1095 # referenced by so we can later determine which changenode 'owns'
1096 1096 # the manifest.
1097 1097 def collect_manifests_and_files(clnode):
1098 1098 c = cl.read(clnode)
1099 1099 for f in c[3]:
1100 1100 # This is to make sure we only have one instance of each
1101 1101 # filename string for each filename.
1102 1102 changedfileset.setdefault(f, f)
1103 1103 msng_mnfst_set.setdefault(c[0], clnode)
1104 1104 return collect_manifests_and_files
1105 1105
1106 1106 # Figure out which manifest nodes (of the ones we think might be part
1107 1107 # of the changegroup) the recipient must know about and remove them
1108 1108 # from the changegroup.
1109 1109 def prune_manifests():
1110 1110 has_mnfst_set = {}
1111 1111 for n in msng_mnfst_set:
1112 1112 # If a 'missing' manifest thinks it belongs to a changenode
1113 1113 # the recipient is assumed to have, obviously the recipient
1114 1114 # must have that manifest.
1115 1115 linknode = cl.node(mnfst.linkrev(n))
1116 1116 if linknode in has_cl_set:
1117 1117 has_mnfst_set[n] = 1
1118 1118 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1119 1119
1120 1120 # Use the information collected in collect_manifests_and_files to say
1121 1121 # which changenode any manifestnode belongs to.
1122 1122 def lookup_manifest_link(mnfstnode):
1123 1123 return msng_mnfst_set[mnfstnode]
1124 1124
1125 1125 # A function generating function that sets up the initial environment
1126 1126 # the inner function.
1127 1127 def filenode_collector(changedfiles):
1128 1128 next_rev = [0]
1129 1129 # This gathers information from each manifestnode included in the
1130 1130 # changegroup about which filenodes the manifest node references
1131 1131 # so we can include those in the changegroup too.
1132 1132 #
1133 1133 # It also remembers which changenode each filenode belongs to. It
1134 1134 # does this by assuming the a filenode belongs to the changenode
1135 1135 # the first manifest that references it belongs to.
1136 1136 def collect_msng_filenodes(mnfstnode):
1137 1137 r = mnfst.rev(mnfstnode)
1138 1138 if r == next_rev[0]:
1139 1139 # If the last rev we looked at was the one just previous,
1140 1140 # we only need to see a diff.
1141 1141 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1142 1142 # For each line in the delta
1143 1143 for dline in delta.splitlines():
1144 1144 # get the filename and filenode for that line
1145 1145 f, fnode = dline.split('\0')
1146 1146 fnode = bin(fnode[:40])
1147 1147 f = changedfiles.get(f, None)
1148 1148 # And if the file is in the list of files we care
1149 1149 # about.
1150 1150 if f is not None:
1151 1151 # Get the changenode this manifest belongs to
1152 1152 clnode = msng_mnfst_set[mnfstnode]
1153 1153 # Create the set of filenodes for the file if
1154 1154 # there isn't one already.
1155 1155 ndset = msng_filenode_set.setdefault(f, {})
1156 1156 # And set the filenode's changelog node to the
1157 1157 # manifest's if it hasn't been set already.
1158 1158 ndset.setdefault(fnode, clnode)
1159 1159 else:
1160 1160 # Otherwise we need a full manifest.
1161 1161 m = mnfst.read(mnfstnode)
1162 1162 # For every file in we care about.
1163 1163 for f in changedfiles:
1164 1164 fnode = m.get(f, None)
1165 1165 # If it's in the manifest
1166 1166 if fnode is not None:
1167 1167 # See comments above.
1168 1168 clnode = msng_mnfst_set[mnfstnode]
1169 1169 ndset = msng_filenode_set.setdefault(f, {})
1170 1170 ndset.setdefault(fnode, clnode)
1171 1171 # Remember the revision we hope to see next.
1172 1172 next_rev[0] = r + 1
1173 1173 return collect_msng_filenodes
1174 1174
1175 1175 # We have a list of filenodes we think we need for a file, lets remove
1176 1176 # all those we now the recipient must have.
1177 1177 def prune_filenodes(f, filerevlog):
1178 1178 msngset = msng_filenode_set[f]
1179 1179 hasset = {}
1180 1180 # If a 'missing' filenode thinks it belongs to a changenode we
1181 1181 # assume the recipient must have, then the recipient must have
1182 1182 # that filenode.
1183 1183 for n in msngset:
1184 1184 clnode = cl.node(filerevlog.linkrev(n))
1185 1185 if clnode in has_cl_set:
1186 1186 hasset[n] = 1
1187 1187 prune_parents(filerevlog, hasset, msngset)
1188 1188
1189 1189 # A function generator function that sets up the a context for the
1190 1190 # inner function.
1191 1191 def lookup_filenode_link_func(fname):
1192 1192 msngset = msng_filenode_set[fname]
1193 1193 # Lookup the changenode the filenode belongs to.
1194 1194 def lookup_filenode_link(fnode):
1195 1195 return msngset[fnode]
1196 1196 return lookup_filenode_link
1197 1197
1198 1198 # Now that we have all theses utility functions to help out and
1199 1199 # logically divide up the task, generate the group.
1200 1200 def gengroup():
1201 1201 # The set of changed files starts empty.
1202 1202 changedfiles = {}
1203 1203 # Create a changenode group generator that will call our functions
1204 1204 # back to lookup the owning changenode and collect information.
1205 1205 group = cl.group(msng_cl_lst, identity,
1206 1206 manifest_and_file_collector(changedfiles))
1207 1207 for chnk in group:
1208 1208 yield chnk
1209 1209
1210 1210 # The list of manifests has been collected by the generator
1211 1211 # calling our functions back.
1212 1212 prune_manifests()
1213 1213 msng_mnfst_lst = msng_mnfst_set.keys()
1214 1214 # Sort the manifestnodes by revision number.
1215 1215 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1216 1216 # Create a generator for the manifestnodes that calls our lookup
1217 1217 # and data collection functions back.
1218 1218 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1219 1219 filenode_collector(changedfiles))
1220 1220 for chnk in group:
1221 1221 yield chnk
1222 1222
1223 1223 # These are no longer needed, dereference and toss the memory for
1224 1224 # them.
1225 1225 msng_mnfst_lst = None
1226 1226 msng_mnfst_set.clear()
1227 1227
1228 1228 changedfiles = changedfiles.keys()
1229 1229 changedfiles.sort()
1230 1230 # Go through all our files in order sorted by name.
1231 1231 for fname in changedfiles:
1232 1232 filerevlog = self.file(fname)
1233 1233 # Toss out the filenodes that the recipient isn't really
1234 1234 # missing.
1235 1235 if msng_filenode_set.has_key(fname):
1236 1236 prune_filenodes(fname, filerevlog)
1237 1237 msng_filenode_lst = msng_filenode_set[fname].keys()
1238 1238 else:
1239 1239 msng_filenode_lst = []
1240 1240 # If any filenodes are left, generate the group for them,
1241 1241 # otherwise don't bother.
1242 1242 if len(msng_filenode_lst) > 0:
1243 1243 yield struct.pack(">l", len(fname) + 4) + fname
1244 1244 # Sort the filenodes by their revision #
1245 1245 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1246 1246 # Create a group generator and only pass in a changenode
1247 1247 # lookup function as we need to collect no information
1248 1248 # from filenodes.
1249 1249 group = filerevlog.group(msng_filenode_lst,
1250 1250 lookup_filenode_link_func(fname))
1251 1251 for chnk in group:
1252 1252 yield chnk
1253 1253 if msng_filenode_set.has_key(fname):
1254 1254 # Don't need this anymore, toss it to free memory.
1255 1255 del msng_filenode_set[fname]
1256 1256 # Signal that no more groups are left.
1257 1257 yield struct.pack(">l", 0)
1258 1258
1259 1259 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1260 1260
1261 1261 return util.chunkbuffer(gengroup())
1262 1262
1263 1263 def changegroup(self, basenodes, source):
1264 1264 """Generate a changegroup of all nodes that we have that a recipient
1265 1265 doesn't.
1266 1266
1267 1267 This is much easier than the previous function as we can assume that
1268 1268 the recipient has any changenode we aren't sending them."""
1269 1269
1270 1270 self.hook('preoutgoing', throw=True, source=source)
1271 1271
1272 1272 cl = self.changelog
1273 1273 nodes = cl.nodesbetween(basenodes, None)[0]
1274 1274 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1275 1275
1276 1276 def identity(x):
1277 1277 return x
1278 1278
1279 1279 def gennodelst(revlog):
1280 1280 for r in xrange(0, revlog.count()):
1281 1281 n = revlog.node(r)
1282 1282 if revlog.linkrev(n) in revset:
1283 1283 yield n
1284 1284
1285 1285 def changed_file_collector(changedfileset):
1286 1286 def collect_changed_files(clnode):
1287 1287 c = cl.read(clnode)
1288 1288 for fname in c[3]:
1289 1289 changedfileset[fname] = 1
1290 1290 return collect_changed_files
1291 1291
1292 1292 def lookuprevlink_func(revlog):
1293 1293 def lookuprevlink(n):
1294 1294 return cl.node(revlog.linkrev(n))
1295 1295 return lookuprevlink
1296 1296
1297 1297 def gengroup():
1298 1298 # construct a list of all changed files
1299 1299 changedfiles = {}
1300 1300
1301 1301 for chnk in cl.group(nodes, identity,
1302 1302 changed_file_collector(changedfiles)):
1303 1303 yield chnk
1304 1304 changedfiles = changedfiles.keys()
1305 1305 changedfiles.sort()
1306 1306
1307 1307 mnfst = self.manifest
1308 1308 nodeiter = gennodelst(mnfst)
1309 1309 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1310 1310 yield chnk
1311 1311
1312 1312 for fname in changedfiles:
1313 1313 filerevlog = self.file(fname)
1314 1314 nodeiter = gennodelst(filerevlog)
1315 1315 nodeiter = list(nodeiter)
1316 1316 if nodeiter:
1317 1317 yield struct.pack(">l", len(fname) + 4) + fname
1318 1318 lookup = lookuprevlink_func(filerevlog)
1319 1319 for chnk in filerevlog.group(nodeiter, lookup):
1320 1320 yield chnk
1321 1321
1322 1322 yield struct.pack(">l", 0)
1323 1323 self.hook('outgoing', node=hex(nodes[0]), source=source)
1324 1324
1325 1325 return util.chunkbuffer(gengroup())
1326 1326
1327 1327 def addchangegroup(self, source):
1328 1328
1329 1329 def getchunk():
1330 1330 d = source.read(4)
1331 1331 if not d:
1332 1332 return ""
1333 1333 l = struct.unpack(">l", d)[0]
1334 1334 if l <= 4:
1335 1335 return ""
1336 1336 d = source.read(l - 4)
1337 1337 if len(d) < l - 4:
1338 1338 raise repo.RepoError(_("premature EOF reading chunk"
1339 1339 " (got %d bytes, expected %d)")
1340 1340 % (len(d), l - 4))
1341 1341 return d
1342 1342
1343 1343 def getgroup():
1344 1344 while 1:
1345 1345 c = getchunk()
1346 1346 if not c:
1347 1347 break
1348 1348 yield c
1349 1349
1350 1350 def csmap(x):
1351 1351 self.ui.debug(_("add changeset %s\n") % short(x))
1352 1352 return self.changelog.count()
1353 1353
1354 1354 def revmap(x):
1355 1355 return self.changelog.rev(x)
1356 1356
1357 1357 if not source:
1358 1358 return
1359 1359
1360 1360 self.hook('prechangegroup', throw=True)
1361 1361
1362 1362 changesets = files = revisions = 0
1363 1363
1364 1364 tr = self.transaction()
1365 1365
1366 1366 oldheads = len(self.changelog.heads())
1367 1367
1368 1368 # pull off the changeset group
1369 1369 self.ui.status(_("adding changesets\n"))
1370 1370 co = self.changelog.tip()
1371 1371 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1372 1372 cnr, cor = map(self.changelog.rev, (cn, co))
1373 1373 if cn == nullid:
1374 1374 cnr = cor
1375 1375 changesets = cnr - cor
1376 1376
1377 1377 # pull off the manifest group
1378 1378 self.ui.status(_("adding manifests\n"))
1379 1379 mm = self.manifest.tip()
1380 1380 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1381 1381
1382 1382 # process the files
1383 1383 self.ui.status(_("adding file changes\n"))
1384 1384 while 1:
1385 1385 f = getchunk()
1386 1386 if not f:
1387 1387 break
1388 1388 self.ui.debug(_("adding %s revisions\n") % f)
1389 1389 fl = self.file(f)
1390 1390 o = fl.count()
1391 1391 n = fl.addgroup(getgroup(), revmap, tr)
1392 1392 revisions += fl.count() - o
1393 1393 files += 1
1394 1394
1395 1395 newheads = len(self.changelog.heads())
1396 1396 heads = ""
1397 1397 if oldheads and newheads > oldheads:
1398 1398 heads = _(" (+%d heads)") % (newheads - oldheads)
1399 1399
1400 1400 self.ui.status(_("added %d changesets"
1401 1401 " with %d changes to %d files%s\n")
1402 1402 % (changesets, revisions, files, heads))
1403 1403
1404 1404 self.hook('pretxnchangegroup', throw=True,
1405 1405 node=hex(self.changelog.node(cor+1)))
1406 1406
1407 1407 tr.close()
1408 1408
1409 1409 if changesets > 0:
1410 1410 self.hook("changegroup", node=hex(self.changelog.node(cor+1)))
1411 1411
1412 1412 for i in range(cor + 1, cnr + 1):
1413 1413 self.hook("incoming", node=hex(self.changelog.node(i)))
1414 1414
1415 1415 def update(self, node, allow=False, force=False, choose=None,
1416 1416 moddirstate=True, forcemerge=False, wlock=None):
1417 1417 pl = self.dirstate.parents()
1418 1418 if not force and pl[1] != nullid:
1419 1419 self.ui.warn(_("aborting: outstanding uncommitted merges\n"))
1420 1420 return 1
1421 1421
1422 1422 err = False
1423 1423
1424 1424 p1, p2 = pl[0], node
1425 1425 pa = self.changelog.ancestor(p1, p2)
1426 1426 m1n = self.changelog.read(p1)[0]
1427 1427 m2n = self.changelog.read(p2)[0]
1428 1428 man = self.manifest.ancestor(m1n, m2n)
1429 1429 m1 = self.manifest.read(m1n)
1430 1430 mf1 = self.manifest.readflags(m1n)
1431 1431 m2 = self.manifest.read(m2n).copy()
1432 1432 mf2 = self.manifest.readflags(m2n)
1433 1433 ma = self.manifest.read(man)
1434 1434 mfa = self.manifest.readflags(man)
1435 1435
1436 1436 modified, added, removed, deleted, unknown = self.changes()
1437 1437
1438 1438 # is this a jump, or a merge? i.e. is there a linear path
1439 1439 # from p1 to p2?
1440 1440 linear_path = (pa == p1 or pa == p2)
1441 1441
1442 1442 if allow and linear_path:
1443 1443 raise util.Abort(_("there is nothing to merge, "
1444 1444 "just use 'hg update'"))
1445 1445 if allow and not forcemerge:
1446 1446 if modified or added or removed:
1447 1447 raise util.Abort(_("outstanding uncommited changes"))
1448 1448 if not forcemerge and not force:
1449 1449 for f in unknown:
1450 1450 if f in m2:
1451 1451 t1 = self.wread(f)
1452 1452 t2 = self.file(f).read(m2[f])
1453 1453 if cmp(t1, t2) != 0:
1454 1454 raise util.Abort(_("'%s' already exists in the working"
1455 1455 " dir and differs from remote") % f)
1456 1456
1457 1457 # resolve the manifest to determine which files
1458 1458 # we care about merging
1459 1459 self.ui.note(_("resolving manifests\n"))
1460 1460 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1461 1461 (force, allow, moddirstate, linear_path))
1462 1462 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1463 1463 (short(man), short(m1n), short(m2n)))
1464 1464
1465 1465 merge = {}
1466 1466 get = {}
1467 1467 remove = []
1468 1468
1469 1469 # construct a working dir manifest
1470 1470 mw = m1.copy()
1471 1471 mfw = mf1.copy()
1472 1472 umap = dict.fromkeys(unknown)
1473 1473
1474 1474 for f in added + modified + unknown:
1475 1475 mw[f] = ""
1476 1476 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1477 1477
1478 1478 if moddirstate and not wlock:
1479 1479 wlock = self.wlock()
1480 1480
1481 1481 for f in deleted + removed:
1482 1482 if f in mw:
1483 1483 del mw[f]
1484 1484
1485 1485 # If we're jumping between revisions (as opposed to merging),
1486 1486 # and if neither the working directory nor the target rev has
1487 1487 # the file, then we need to remove it from the dirstate, to
1488 1488 # prevent the dirstate from listing the file when it is no
1489 1489 # longer in the manifest.
1490 1490 if moddirstate and linear_path and f not in m2:
1491 1491 self.dirstate.forget((f,))
1492 1492
1493 1493 # Compare manifests
1494 1494 for f, n in mw.iteritems():
1495 1495 if choose and not choose(f):
1496 1496 continue
1497 1497 if f in m2:
1498 1498 s = 0
1499 1499
1500 1500 # is the wfile new since m1, and match m2?
1501 1501 if f not in m1:
1502 1502 t1 = self.wread(f)
1503 1503 t2 = self.file(f).read(m2[f])
1504 1504 if cmp(t1, t2) == 0:
1505 1505 n = m2[f]
1506 1506 del t1, t2
1507 1507
1508 1508 # are files different?
1509 1509 if n != m2[f]:
1510 1510 a = ma.get(f, nullid)
1511 1511 # are both different from the ancestor?
1512 1512 if n != a and m2[f] != a:
1513 1513 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1514 1514 # merge executable bits
1515 1515 # "if we changed or they changed, change in merge"
1516 1516 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1517 1517 mode = ((a^b) | (a^c)) ^ a
1518 1518 merge[f] = (m1.get(f, nullid), m2[f], mode)
1519 1519 s = 1
1520 1520 # are we clobbering?
1521 1521 # is remote's version newer?
1522 1522 # or are we going back in time?
1523 1523 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1524 1524 self.ui.debug(_(" remote %s is newer, get\n") % f)
1525 1525 get[f] = m2[f]
1526 1526 s = 1
1527 1527 elif f in umap:
1528 1528 # this unknown file is the same as the checkout
1529 1529 get[f] = m2[f]
1530 1530
1531 1531 if not s and mfw[f] != mf2[f]:
1532 1532 if force:
1533 1533 self.ui.debug(_(" updating permissions for %s\n") % f)
1534 1534 util.set_exec(self.wjoin(f), mf2[f])
1535 1535 else:
1536 1536 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1537 1537 mode = ((a^b) | (a^c)) ^ a
1538 1538 if mode != b:
1539 1539 self.ui.debug(_(" updating permissions for %s\n")
1540 1540 % f)
1541 1541 util.set_exec(self.wjoin(f), mode)
1542 1542 del m2[f]
1543 1543 elif f in ma:
1544 1544 if n != ma[f]:
1545 1545 r = _("d")
1546 1546 if not force and (linear_path or allow):
1547 1547 r = self.ui.prompt(
1548 1548 (_(" local changed %s which remote deleted\n") % f) +
1549 1549 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1550 1550 if r == _("d"):
1551 1551 remove.append(f)
1552 1552 else:
1553 1553 self.ui.debug(_("other deleted %s\n") % f)
1554 1554 remove.append(f) # other deleted it
1555 1555 else:
1556 1556 # file is created on branch or in working directory
1557 1557 if force and f not in umap:
1558 1558 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1559 1559 remove.append(f)
1560 1560 elif n == m1.get(f, nullid): # same as parent
1561 1561 if p2 == pa: # going backwards?
1562 1562 self.ui.debug(_("remote deleted %s\n") % f)
1563 1563 remove.append(f)
1564 1564 else:
1565 1565 self.ui.debug(_("local modified %s, keeping\n") % f)
1566 1566 else:
1567 1567 self.ui.debug(_("working dir created %s, keeping\n") % f)
1568 1568
1569 1569 for f, n in m2.iteritems():
1570 1570 if choose and not choose(f):
1571 1571 continue
1572 1572 if f[0] == "/":
1573 1573 continue
1574 1574 if f in ma and n != ma[f]:
1575 1575 r = _("k")
1576 1576 if not force and (linear_path or allow):
1577 1577 r = self.ui.prompt(
1578 1578 (_("remote changed %s which local deleted\n") % f) +
1579 1579 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1580 1580 if r == _("k"):
1581 1581 get[f] = n
1582 1582 elif f not in ma:
1583 1583 self.ui.debug(_("remote created %s\n") % f)
1584 1584 get[f] = n
1585 1585 else:
1586 1586 if force or p2 == pa: # going backwards?
1587 1587 self.ui.debug(_("local deleted %s, recreating\n") % f)
1588 1588 get[f] = n
1589 1589 else:
1590 1590 self.ui.debug(_("local deleted %s\n") % f)
1591 1591
1592 1592 del mw, m1, m2, ma
1593 1593
1594 1594 if force:
1595 1595 for f in merge:
1596 1596 get[f] = merge[f][1]
1597 1597 merge = {}
1598 1598
1599 1599 if linear_path or force:
1600 1600 # we don't need to do any magic, just jump to the new rev
1601 1601 branch_merge = False
1602 1602 p1, p2 = p2, nullid
1603 1603 else:
1604 1604 if not allow:
1605 1605 self.ui.status(_("this update spans a branch"
1606 1606 " affecting the following files:\n"))
1607 1607 fl = merge.keys() + get.keys()
1608 1608 fl.sort()
1609 1609 for f in fl:
1610 1610 cf = ""
1611 1611 if f in merge:
1612 1612 cf = _(" (resolve)")
1613 1613 self.ui.status(" %s%s\n" % (f, cf))
1614 1614 self.ui.warn(_("aborting update spanning branches!\n"))
1615 1615 self.ui.status(_("(use update -m to merge across branches"
1616 1616 " or -C to lose changes)\n"))
1617 1617 return 1
1618 1618 branch_merge = True
1619 1619
1620 1620 # get the files we don't need to change
1621 1621 files = get.keys()
1622 1622 files.sort()
1623 1623 for f in files:
1624 1624 if f[0] == "/":
1625 1625 continue
1626 1626 self.ui.note(_("getting %s\n") % f)
1627 1627 t = self.file(f).read(get[f])
1628 1628 self.wwrite(f, t)
1629 1629 util.set_exec(self.wjoin(f), mf2[f])
1630 1630 if moddirstate:
1631 1631 if branch_merge:
1632 1632 self.dirstate.update([f], 'n', st_mtime=-1)
1633 1633 else:
1634 1634 self.dirstate.update([f], 'n')
1635 1635
1636 1636 # merge the tricky bits
1637 1637 files = merge.keys()
1638 1638 files.sort()
1639 xp1 = hex(p1)
1640 xp2 = hex(p2)
1639 1641 for f in files:
1640 1642 self.ui.status(_("merging %s\n") % f)
1641 1643 my, other, flag = merge[f]
1642 ret = self.merge3(f, my, other)
1644 ret = self.merge3(f, my, other, xp1, xp2)
1643 1645 if ret:
1644 1646 err = True
1645 1647 util.set_exec(self.wjoin(f), flag)
1646 1648 if moddirstate:
1647 1649 if branch_merge:
1648 1650 # We've done a branch merge, mark this file as merged
1649 1651 # so that we properly record the merger later
1650 1652 self.dirstate.update([f], 'm')
1651 1653 else:
1652 1654 # We've update-merged a locally modified file, so
1653 1655 # we set the dirstate to emulate a normal checkout
1654 1656 # of that file some time in the past. Thus our
1655 1657 # merge will appear as a normal local file
1656 1658 # modification.
1657 1659 f_len = len(self.file(f).read(other))
1658 1660 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1659 1661
1660 1662 remove.sort()
1661 1663 for f in remove:
1662 1664 self.ui.note(_("removing %s\n") % f)
1663 1665 util.audit_path(f)
1664 1666 try:
1665 1667 util.unlink(self.wjoin(f))
1666 1668 except OSError, inst:
1667 1669 if inst.errno != errno.ENOENT:
1668 1670 self.ui.warn(_("update failed to remove %s: %s!\n") %
1669 1671 (f, inst.strerror))
1670 1672 if moddirstate:
1671 1673 if branch_merge:
1672 1674 self.dirstate.update(remove, 'r')
1673 1675 else:
1674 1676 self.dirstate.forget(remove)
1675 1677
1676 1678 if moddirstate:
1677 1679 self.dirstate.setparents(p1, p2)
1678 1680 return err
1679 1681
1680 def merge3(self, fn, my, other):
1682 def merge3(self, fn, my, other, p1, p2):
1681 1683 """perform a 3-way merge in the working directory"""
1682 1684
1683 1685 def temp(prefix, node):
1684 1686 pre = "%s~%s." % (os.path.basename(fn), prefix)
1685 1687 (fd, name) = tempfile.mkstemp("", pre)
1686 1688 f = os.fdopen(fd, "wb")
1687 1689 self.wwrite(fn, fl.read(node), f)
1688 1690 f.close()
1689 1691 return name
1690 1692
1691 1693 fl = self.file(fn)
1692 1694 base = fl.ancestor(my, other)
1693 1695 a = self.wjoin(fn)
1694 1696 b = temp("base", base)
1695 1697 c = temp("other", other)
1696 1698
1697 1699 self.ui.note(_("resolving %s\n") % fn)
1698 1700 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1699 1701 (fn, short(my), short(other), short(base)))
1700 1702
1701 1703 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1702 1704 or "hgmerge")
1703 r = os.system('%s "%s" "%s" "%s"' % (cmd, a, b, c))
1705 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c),
1706 environ={'HG_ROOT': self.root,
1707 'HG_FILE': fn,
1708 'HG_MY_NODE': p1,
1709 'HG_OTHER_NODE': p2,
1710 'HG_FILE_MY_NODE': hex(my),
1711 'HG_FILE_OTHER_NODE': hex(other),
1712 'HG_FILE_BASE_NODE': hex(base)})
1704 1713 if r:
1705 1714 self.ui.warn(_("merging %s failed!\n") % fn)
1706 1715
1707 1716 os.unlink(b)
1708 1717 os.unlink(c)
1709 1718 return r
1710 1719
1711 1720 def verify(self):
1712 1721 filelinkrevs = {}
1713 1722 filenodes = {}
1714 1723 changesets = revisions = files = 0
1715 1724 errors = [0]
1716 1725 neededmanifests = {}
1717 1726
1718 1727 def err(msg):
1719 1728 self.ui.warn(msg + "\n")
1720 1729 errors[0] += 1
1721 1730
1722 1731 def checksize(obj, name):
1723 1732 d = obj.checksize()
1724 1733 if d[0]:
1725 1734 err(_("%s data length off by %d bytes") % (name, d[0]))
1726 1735 if d[1]:
1727 1736 err(_("%s index contains %d extra bytes") % (name, d[1]))
1728 1737
1729 1738 seen = {}
1730 1739 self.ui.status(_("checking changesets\n"))
1731 1740 checksize(self.changelog, "changelog")
1732 1741
1733 1742 for i in range(self.changelog.count()):
1734 1743 changesets += 1
1735 1744 n = self.changelog.node(i)
1736 1745 l = self.changelog.linkrev(n)
1737 1746 if l != i:
1738 1747 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1739 1748 if n in seen:
1740 1749 err(_("duplicate changeset at revision %d") % i)
1741 1750 seen[n] = 1
1742 1751
1743 1752 for p in self.changelog.parents(n):
1744 1753 if p not in self.changelog.nodemap:
1745 1754 err(_("changeset %s has unknown parent %s") %
1746 1755 (short(n), short(p)))
1747 1756 try:
1748 1757 changes = self.changelog.read(n)
1749 1758 except KeyboardInterrupt:
1750 1759 self.ui.warn(_("interrupted"))
1751 1760 raise
1752 1761 except Exception, inst:
1753 1762 err(_("unpacking changeset %s: %s") % (short(n), inst))
1754 1763 continue
1755 1764
1756 1765 neededmanifests[changes[0]] = n
1757 1766
1758 1767 for f in changes[3]:
1759 1768 filelinkrevs.setdefault(f, []).append(i)
1760 1769
1761 1770 seen = {}
1762 1771 self.ui.status(_("checking manifests\n"))
1763 1772 checksize(self.manifest, "manifest")
1764 1773
1765 1774 for i in range(self.manifest.count()):
1766 1775 n = self.manifest.node(i)
1767 1776 l = self.manifest.linkrev(n)
1768 1777
1769 1778 if l < 0 or l >= self.changelog.count():
1770 1779 err(_("bad manifest link (%d) at revision %d") % (l, i))
1771 1780
1772 1781 if n in neededmanifests:
1773 1782 del neededmanifests[n]
1774 1783
1775 1784 if n in seen:
1776 1785 err(_("duplicate manifest at revision %d") % i)
1777 1786
1778 1787 seen[n] = 1
1779 1788
1780 1789 for p in self.manifest.parents(n):
1781 1790 if p not in self.manifest.nodemap:
1782 1791 err(_("manifest %s has unknown parent %s") %
1783 1792 (short(n), short(p)))
1784 1793
1785 1794 try:
1786 1795 delta = mdiff.patchtext(self.manifest.delta(n))
1787 1796 except KeyboardInterrupt:
1788 1797 self.ui.warn(_("interrupted"))
1789 1798 raise
1790 1799 except Exception, inst:
1791 1800 err(_("unpacking manifest %s: %s") % (short(n), inst))
1792 1801 continue
1793 1802
1794 1803 try:
1795 1804 ff = [ l.split('\0') for l in delta.splitlines() ]
1796 1805 for f, fn in ff:
1797 1806 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1798 1807 except (ValueError, TypeError), inst:
1799 1808 err(_("broken delta in manifest %s: %s") % (short(n), inst))
1800 1809
1801 1810 self.ui.status(_("crosschecking files in changesets and manifests\n"))
1802 1811
1803 1812 for m, c in neededmanifests.items():
1804 1813 err(_("Changeset %s refers to unknown manifest %s") %
1805 1814 (short(m), short(c)))
1806 1815 del neededmanifests
1807 1816
1808 1817 for f in filenodes:
1809 1818 if f not in filelinkrevs:
1810 1819 err(_("file %s in manifest but not in changesets") % f)
1811 1820
1812 1821 for f in filelinkrevs:
1813 1822 if f not in filenodes:
1814 1823 err(_("file %s in changeset but not in manifest") % f)
1815 1824
1816 1825 self.ui.status(_("checking files\n"))
1817 1826 ff = filenodes.keys()
1818 1827 ff.sort()
1819 1828 for f in ff:
1820 1829 if f == "/dev/null":
1821 1830 continue
1822 1831 files += 1
1823 1832 if not f:
1824 1833 err(_("file without name in manifest %s") % short(n))
1825 1834 continue
1826 1835 fl = self.file(f)
1827 1836 checksize(fl, f)
1828 1837
1829 1838 nodes = {nullid: 1}
1830 1839 seen = {}
1831 1840 for i in range(fl.count()):
1832 1841 revisions += 1
1833 1842 n = fl.node(i)
1834 1843
1835 1844 if n in seen:
1836 1845 err(_("%s: duplicate revision %d") % (f, i))
1837 1846 if n not in filenodes[f]:
1838 1847 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
1839 1848 else:
1840 1849 del filenodes[f][n]
1841 1850
1842 1851 flr = fl.linkrev(n)
1843 1852 if flr not in filelinkrevs.get(f, []):
1844 1853 err(_("%s:%s points to unexpected changeset %d")
1845 1854 % (f, short(n), flr))
1846 1855 else:
1847 1856 filelinkrevs[f].remove(flr)
1848 1857
1849 1858 # verify contents
1850 1859 try:
1851 1860 t = fl.read(n)
1852 1861 except KeyboardInterrupt:
1853 1862 self.ui.warn(_("interrupted"))
1854 1863 raise
1855 1864 except Exception, inst:
1856 1865 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
1857 1866
1858 1867 # verify parents
1859 1868 (p1, p2) = fl.parents(n)
1860 1869 if p1 not in nodes:
1861 1870 err(_("file %s:%s unknown parent 1 %s") %
1862 1871 (f, short(n), short(p1)))
1863 1872 if p2 not in nodes:
1864 1873 err(_("file %s:%s unknown parent 2 %s") %
1865 1874 (f, short(n), short(p1)))
1866 1875 nodes[n] = 1
1867 1876
1868 1877 # cross-check
1869 1878 for node in filenodes[f]:
1870 1879 err(_("node %s in manifests not in %s") % (hex(node), f))
1871 1880
1872 1881 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
1873 1882 (files, changesets, revisions))
1874 1883
1875 1884 if errors[0]:
1876 1885 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
1877 1886 return 1
1878 1887
1879 1888 # used to avoid circular references so destructors work
1880 1889 def aftertrans(base):
1881 1890 p = base
1882 1891 def a():
1883 1892 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1884 1893 util.rename(os.path.join(p, "journal.dirstate"),
1885 1894 os.path.join(p, "undo.dirstate"))
1886 1895 return a
1887 1896
General Comments 0
You need to be logged in to leave comments. Login now