##// END OF EJS Templates
Always remove appendopener tmp files (fixes issue235)....
Thomas Arendsen Hein -
r2232:ef3c039e default
parent child Browse files
Show More
@@ -1,156 +1,162 b''
1 1 # appendfile.py - special classes to make repo updates atomic
2 2 #
3 3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from demandload import *
9 9 demandload(globals(), "cStringIO changelog errno manifest os tempfile util")
10 10
11 11 # writes to metadata files are ordered. reads: changelog, manifest,
12 12 # normal files. writes: normal files, manifest, changelog.
13 13
14 14 # manifest contains pointers to offsets in normal files. changelog
15 15 # contains pointers to offsets in manifest. if reader reads old
16 16 # changelog while manifest or normal files are written, it has no
17 17 # pointers into new parts of those files that are maybe not consistent
18 18 # yet, so will not read them.
19 19
20 20 # localrepo.addchangegroup thinks it writes changelog first, then
21 21 # manifest, then normal files (this is order they are available, and
22 22 # needed for computing linkrev fields), but uses appendfile to hide
23 23 # updates from readers. data not written to manifest or changelog
24 24 # until all normal files updated. write manifest first, then
25 25 # changelog.
26 26
27 27 # with this write ordering, readers cannot see inconsistent view of
28 28 # repo during update.
29 29
30 30 class appendfile(object):
31 31 '''implement enough of file protocol to append to revlog file.
32 32 appended data is written to temp file. reads and seeks span real
33 33 file and temp file. readers cannot see appended data until
34 34 writedata called.'''
35 35
36 36 def __init__(self, fp, tmpname):
37 37 if tmpname:
38 38 self.tmpname = tmpname
39 39 self.tmpfp = util.posixfile(self.tmpname, 'ab+')
40 40 else:
41 41 fd, self.tmpname = tempfile.mkstemp(prefix="hg-appendfile-")
42 42 os.close(fd)
43 43 self.tmpfp = util.posixfile(self.tmpname, 'ab+')
44 44 self.realfp = fp
45 45 self.offset = fp.tell()
46 46 # real file is not written by anyone else. cache its size so
47 47 # seek and read can be fast.
48 48 self.realsize = util.fstat(fp).st_size
49 49 self.name = fp.name
50 50
51 51 def end(self):
52 52 self.tmpfp.flush() # make sure the stat is correct
53 53 return self.realsize + util.fstat(self.tmpfp).st_size
54 54
55 55 def tell(self):
56 56 return self.offset
57 57
58 58 def flush(self):
59 59 self.tmpfp.flush()
60 60
61 61 def close(self):
62 62 self.realfp.close()
63 63 self.tmpfp.close()
64 64
65 65 def seek(self, offset, whence=0):
66 66 '''virtual file offset spans real file and temp file.'''
67 67 if whence == 0:
68 68 self.offset = offset
69 69 elif whence == 1:
70 70 self.offset += offset
71 71 elif whence == 2:
72 72 self.offset = self.end() + offset
73 73
74 74 if self.offset < self.realsize:
75 75 self.realfp.seek(self.offset)
76 76 else:
77 77 self.tmpfp.seek(self.offset - self.realsize)
78 78
79 79 def read(self, count=-1):
80 80 '''only trick here is reads that span real file and temp file.'''
81 81 fp = cStringIO.StringIO()
82 82 old_offset = self.offset
83 83 if self.offset < self.realsize:
84 84 s = self.realfp.read(count)
85 85 fp.write(s)
86 86 self.offset += len(s)
87 87 if count > 0:
88 88 count -= len(s)
89 89 if count != 0:
90 90 if old_offset != self.offset:
91 91 self.tmpfp.seek(self.offset - self.realsize)
92 92 s = self.tmpfp.read(count)
93 93 fp.write(s)
94 94 self.offset += len(s)
95 95 return fp.getvalue()
96 96
97 97 def write(self, s):
98 98 '''append to temp file.'''
99 99 self.tmpfp.seek(0, 2)
100 100 self.tmpfp.write(s)
101 101 # all writes are appends, so offset must go to end of file.
102 102 self.offset = self.realsize + self.tmpfp.tell()
103 103
104 104 class appendopener(object):
105 105 '''special opener for files that only read or append.'''
106 106
107 107 def __init__(self, opener):
108 108 self.realopener = opener
109 109 # key: file name, value: appendfile name
110 110 self.tmpnames = {}
111 111
112 112 def __call__(self, name, mode='r'):
113 113 '''open file.'''
114 114
115 115 assert mode in 'ra+'
116 116 try:
117 117 realfp = self.realopener(name, 'r')
118 118 except IOError, err:
119 119 if err.errno != errno.ENOENT: raise
120 120 realfp = self.realopener(name, 'w+')
121 121 tmpname = self.tmpnames.get(name)
122 122 fp = appendfile(realfp, tmpname)
123 123 if tmpname is None:
124 124 self.tmpnames[name] = fp.tmpname
125 125 return fp
126 126
127 127 def writedata(self):
128 128 '''copy data from temp files to real files.'''
129 129 # write .d file before .i file.
130 130 tmpnames = self.tmpnames.items()
131 131 tmpnames.sort()
132 132 for name, tmpname in tmpnames:
133 133 fp = open(tmpname, 'rb')
134 134 s = fp.read()
135 135 fp.close()
136 136 os.unlink(tmpname)
137 del self.tmpnames[name]
137 138 fp = self.realopener(name, 'a')
138 139 fp.write(s)
139 140 fp.close()
140 141
142 def cleanup(self):
143 '''delete temp files (this discards unwritten data!)'''
144 for tmpname in self.tmpnames.values():
145 os.unlink(tmpname)
146
141 147 # files for changelog and manifest are in different appendopeners, so
142 148 # not mixed up together.
143 149
144 150 class appendchangelog(changelog.changelog, appendopener):
145 151 def __init__(self, opener, version):
146 152 appendopener.__init__(self, opener)
147 153 changelog.changelog.__init__(self, self, version)
148 154 def checkinlinesize(self, fp, tr):
149 155 return
150 156
151 157 class appendmanifest(manifest.manifest, appendopener):
152 158 def __init__(self, opener, version):
153 159 appendopener.__init__(self, opener)
154 160 manifest.manifest.__init__(self, self, version)
155 161 def checkinlinesize(self, fp, tr):
156 162 return
@@ -1,2078 +1,2089 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import os, util
9 9 import filelog, manifest, changelog, dirstate, repo
10 10 from node import *
11 11 from i18n import gettext as _
12 12 from demandload import *
13 13 demandload(globals(), "appendfile changegroup")
14 14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 15 demandload(globals(), "revlog traceback")
16 16
17 17 class localrepository(object):
18 18 def __del__(self):
19 19 self.transhandle = None
20 20 def __init__(self, parentui, path=None, create=0):
21 21 if not path:
22 22 p = os.getcwd()
23 23 while not os.path.isdir(os.path.join(p, ".hg")):
24 24 oldp = p
25 25 p = os.path.dirname(p)
26 26 if p == oldp:
27 27 raise repo.RepoError(_("no repo found"))
28 28 path = p
29 29 self.path = os.path.join(path, ".hg")
30 30
31 31 if not create and not os.path.isdir(self.path):
32 32 raise repo.RepoError(_("repository %s not found") % path)
33 33
34 34 self.root = os.path.abspath(path)
35 35 self.origroot = path
36 36 self.ui = ui.ui(parentui=parentui)
37 37 self.opener = util.opener(self.path)
38 38 self.wopener = util.opener(self.root)
39 39
40 40 try:
41 41 self.ui.readconfig(self.join("hgrc"), self.root)
42 42 except IOError:
43 43 pass
44 44
45 45 v = self.ui.revlogopts
46 46 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
47 47 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
48 48 fl = v.get('flags', None)
49 49 flags = 0
50 50 if fl != None:
51 51 for x in fl.split():
52 52 flags |= revlog.flagstr(x)
53 53 elif self.revlogv1:
54 54 flags = revlog.REVLOG_DEFAULT_FLAGS
55 55
56 56 v = self.revlogversion | flags
57 57 self.manifest = manifest.manifest(self.opener, v)
58 58 self.changelog = changelog.changelog(self.opener, v)
59 59
60 60 # the changelog might not have the inline index flag
61 61 # on. If the format of the changelog is the same as found in
62 62 # .hgrc, apply any flags found in the .hgrc as well.
63 63 # Otherwise, just version from the changelog
64 64 v = self.changelog.version
65 65 if v == self.revlogversion:
66 66 v |= flags
67 67 self.revlogversion = v
68 68
69 69 self.tagscache = None
70 70 self.nodetagscache = None
71 71 self.encodepats = None
72 72 self.decodepats = None
73 73 self.transhandle = None
74 74
75 75 if create:
76 76 os.mkdir(self.path)
77 77 os.mkdir(self.join("data"))
78 78
79 79 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
80 80
81 81 def hook(self, name, throw=False, **args):
82 82 def callhook(hname, funcname):
83 83 '''call python hook. hook is callable object, looked up as
84 84 name in python module. if callable returns "true", hook
85 85 fails, else passes. if hook raises exception, treated as
86 86 hook failure. exception propagates if throw is "true".
87 87
88 88 reason for "true" meaning "hook failed" is so that
89 89 unmodified commands (e.g. mercurial.commands.update) can
90 90 be run as hooks without wrappers to convert return values.'''
91 91
92 92 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
93 93 d = funcname.rfind('.')
94 94 if d == -1:
95 95 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
96 96 % (hname, funcname))
97 97 modname = funcname[:d]
98 98 try:
99 99 obj = __import__(modname)
100 100 except ImportError:
101 101 raise util.Abort(_('%s hook is invalid '
102 102 '(import of "%s" failed)') %
103 103 (hname, modname))
104 104 try:
105 105 for p in funcname.split('.')[1:]:
106 106 obj = getattr(obj, p)
107 107 except AttributeError, err:
108 108 raise util.Abort(_('%s hook is invalid '
109 109 '("%s" is not defined)') %
110 110 (hname, funcname))
111 111 if not callable(obj):
112 112 raise util.Abort(_('%s hook is invalid '
113 113 '("%s" is not callable)') %
114 114 (hname, funcname))
115 115 try:
116 116 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
117 117 except (KeyboardInterrupt, util.SignalInterrupt):
118 118 raise
119 119 except Exception, exc:
120 120 if isinstance(exc, util.Abort):
121 121 self.ui.warn(_('error: %s hook failed: %s\n') %
122 122 (hname, exc.args[0] % exc.args[1:]))
123 123 else:
124 124 self.ui.warn(_('error: %s hook raised an exception: '
125 125 '%s\n') % (hname, exc))
126 126 if throw:
127 127 raise
128 128 if self.ui.traceback:
129 129 traceback.print_exc()
130 130 return True
131 131 if r:
132 132 if throw:
133 133 raise util.Abort(_('%s hook failed') % hname)
134 134 self.ui.warn(_('warning: %s hook failed\n') % hname)
135 135 return r
136 136
137 137 def runhook(name, cmd):
138 138 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
139 139 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()] +
140 140 [(k.upper(), v) for k, v in args.iteritems()])
141 141 r = util.system(cmd, environ=env, cwd=self.root)
142 142 if r:
143 143 desc, r = util.explain_exit(r)
144 144 if throw:
145 145 raise util.Abort(_('%s hook %s') % (name, desc))
146 146 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
147 147 return r
148 148
149 149 r = False
150 150 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
151 151 if hname.split(".", 1)[0] == name and cmd]
152 152 hooks.sort()
153 153 for hname, cmd in hooks:
154 154 if cmd.startswith('python:'):
155 155 r = callhook(hname, cmd[7:].strip()) or r
156 156 else:
157 157 r = runhook(hname, cmd) or r
158 158 return r
159 159
160 160 def tags(self):
161 161 '''return a mapping of tag to node'''
162 162 if not self.tagscache:
163 163 self.tagscache = {}
164 164
165 165 def parsetag(line, context):
166 166 if not line:
167 167 return
168 168 s = l.split(" ", 1)
169 169 if len(s) != 2:
170 170 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
171 171 return
172 172 node, key = s
173 173 try:
174 174 bin_n = bin(node)
175 175 except TypeError:
176 176 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
177 177 return
178 178 if bin_n not in self.changelog.nodemap:
179 179 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
180 180 return
181 181 self.tagscache[key.strip()] = bin_n
182 182
183 183 # read each head of the tags file, ending with the tip
184 184 # and add each tag found to the map, with "newer" ones
185 185 # taking precedence
186 186 fl = self.file(".hgtags")
187 187 h = fl.heads()
188 188 h.reverse()
189 189 for r in h:
190 190 count = 0
191 191 for l in fl.read(r).splitlines():
192 192 count += 1
193 193 parsetag(l, ".hgtags:%d" % count)
194 194
195 195 try:
196 196 f = self.opener("localtags")
197 197 count = 0
198 198 for l in f:
199 199 count += 1
200 200 parsetag(l, "localtags:%d" % count)
201 201 except IOError:
202 202 pass
203 203
204 204 self.tagscache['tip'] = self.changelog.tip()
205 205
206 206 return self.tagscache
207 207
208 208 def tagslist(self):
209 209 '''return a list of tags ordered by revision'''
210 210 l = []
211 211 for t, n in self.tags().items():
212 212 try:
213 213 r = self.changelog.rev(n)
214 214 except:
215 215 r = -2 # sort to the beginning of the list if unknown
216 216 l.append((r, t, n))
217 217 l.sort()
218 218 return [(t, n) for r, t, n in l]
219 219
220 220 def nodetags(self, node):
221 221 '''return the tags associated with a node'''
222 222 if not self.nodetagscache:
223 223 self.nodetagscache = {}
224 224 for t, n in self.tags().items():
225 225 self.nodetagscache.setdefault(n, []).append(t)
226 226 return self.nodetagscache.get(node, [])
227 227
228 228 def lookup(self, key):
229 229 try:
230 230 return self.tags()[key]
231 231 except KeyError:
232 232 try:
233 233 return self.changelog.lookup(key)
234 234 except:
235 235 raise repo.RepoError(_("unknown revision '%s'") % key)
236 236
237 237 def dev(self):
238 238 return os.stat(self.path).st_dev
239 239
240 240 def local(self):
241 241 return True
242 242
243 243 def join(self, f):
244 244 return os.path.join(self.path, f)
245 245
246 246 def wjoin(self, f):
247 247 return os.path.join(self.root, f)
248 248
249 249 def file(self, f):
250 250 if f[0] == '/':
251 251 f = f[1:]
252 252 return filelog.filelog(self.opener, f, self.revlogversion)
253 253
254 254 def getcwd(self):
255 255 return self.dirstate.getcwd()
256 256
257 257 def wfile(self, f, mode='r'):
258 258 return self.wopener(f, mode)
259 259
260 260 def wread(self, filename):
261 261 if self.encodepats == None:
262 262 l = []
263 263 for pat, cmd in self.ui.configitems("encode"):
264 264 mf = util.matcher(self.root, "", [pat], [], [])[1]
265 265 l.append((mf, cmd))
266 266 self.encodepats = l
267 267
268 268 data = self.wopener(filename, 'r').read()
269 269
270 270 for mf, cmd in self.encodepats:
271 271 if mf(filename):
272 272 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
273 273 data = util.filter(data, cmd)
274 274 break
275 275
276 276 return data
277 277
278 278 def wwrite(self, filename, data, fd=None):
279 279 if self.decodepats == None:
280 280 l = []
281 281 for pat, cmd in self.ui.configitems("decode"):
282 282 mf = util.matcher(self.root, "", [pat], [], [])[1]
283 283 l.append((mf, cmd))
284 284 self.decodepats = l
285 285
286 286 for mf, cmd in self.decodepats:
287 287 if mf(filename):
288 288 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
289 289 data = util.filter(data, cmd)
290 290 break
291 291
292 292 if fd:
293 293 return fd.write(data)
294 294 return self.wopener(filename, 'w').write(data)
295 295
296 296 def transaction(self):
297 297 tr = self.transhandle
298 298 if tr != None and tr.running():
299 299 return tr.nest()
300 300
301 301 # save dirstate for undo
302 302 try:
303 303 ds = self.opener("dirstate").read()
304 304 except IOError:
305 305 ds = ""
306 306 self.opener("journal.dirstate", "w").write(ds)
307 307
308 308 tr = transaction.transaction(self.ui.warn, self.opener,
309 309 self.join("journal"),
310 310 aftertrans(self.path))
311 311 self.transhandle = tr
312 312 return tr
313 313
314 314 def recover(self):
315 315 l = self.lock()
316 316 if os.path.exists(self.join("journal")):
317 317 self.ui.status(_("rolling back interrupted transaction\n"))
318 318 transaction.rollback(self.opener, self.join("journal"))
319 319 self.reload()
320 320 return True
321 321 else:
322 322 self.ui.warn(_("no interrupted transaction available\n"))
323 323 return False
324 324
325 325 def undo(self, wlock=None):
326 326 if not wlock:
327 327 wlock = self.wlock()
328 328 l = self.lock()
329 329 if os.path.exists(self.join("undo")):
330 330 self.ui.status(_("rolling back last transaction\n"))
331 331 transaction.rollback(self.opener, self.join("undo"))
332 332 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
333 333 self.reload()
334 334 self.wreload()
335 335 else:
336 336 self.ui.warn(_("no undo information available\n"))
337 337
338 338 def wreload(self):
339 339 self.dirstate.read()
340 340
341 341 def reload(self):
342 342 self.changelog.load()
343 343 self.manifest.load()
344 344 self.tagscache = None
345 345 self.nodetagscache = None
346 346
347 347 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
348 348 desc=None):
349 349 try:
350 350 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
351 351 except lock.LockHeld, inst:
352 352 if not wait:
353 353 raise
354 354 self.ui.warn(_("waiting for lock on %s held by %s\n") %
355 355 (desc, inst.args[0]))
356 356 # default to 600 seconds timeout
357 357 l = lock.lock(self.join(lockname),
358 358 int(self.ui.config("ui", "timeout") or 600),
359 359 releasefn, desc=desc)
360 360 if acquirefn:
361 361 acquirefn()
362 362 return l
363 363
364 364 def lock(self, wait=1):
365 365 return self.do_lock("lock", wait, acquirefn=self.reload,
366 366 desc=_('repository %s') % self.origroot)
367 367
368 368 def wlock(self, wait=1):
369 369 return self.do_lock("wlock", wait, self.dirstate.write,
370 370 self.wreload,
371 371 desc=_('working directory of %s') % self.origroot)
372 372
373 373 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
374 374 "determine whether a new filenode is needed"
375 375 fp1 = manifest1.get(filename, nullid)
376 376 fp2 = manifest2.get(filename, nullid)
377 377
378 378 if fp2 != nullid:
379 379 # is one parent an ancestor of the other?
380 380 fpa = filelog.ancestor(fp1, fp2)
381 381 if fpa == fp1:
382 382 fp1, fp2 = fp2, nullid
383 383 elif fpa == fp2:
384 384 fp2 = nullid
385 385
386 386 # is the file unmodified from the parent? report existing entry
387 387 if fp2 == nullid and text == filelog.read(fp1):
388 388 return (fp1, None, None)
389 389
390 390 return (None, fp1, fp2)
391 391
392 392 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
393 393 orig_parent = self.dirstate.parents()[0] or nullid
394 394 p1 = p1 or self.dirstate.parents()[0] or nullid
395 395 p2 = p2 or self.dirstate.parents()[1] or nullid
396 396 c1 = self.changelog.read(p1)
397 397 c2 = self.changelog.read(p2)
398 398 m1 = self.manifest.read(c1[0])
399 399 mf1 = self.manifest.readflags(c1[0])
400 400 m2 = self.manifest.read(c2[0])
401 401 changed = []
402 402
403 403 if orig_parent == p1:
404 404 update_dirstate = 1
405 405 else:
406 406 update_dirstate = 0
407 407
408 408 if not wlock:
409 409 wlock = self.wlock()
410 410 l = self.lock()
411 411 tr = self.transaction()
412 412 mm = m1.copy()
413 413 mfm = mf1.copy()
414 414 linkrev = self.changelog.count()
415 415 for f in files:
416 416 try:
417 417 t = self.wread(f)
418 418 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
419 419 r = self.file(f)
420 420 mfm[f] = tm
421 421
422 422 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
423 423 if entry:
424 424 mm[f] = entry
425 425 continue
426 426
427 427 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
428 428 changed.append(f)
429 429 if update_dirstate:
430 430 self.dirstate.update([f], "n")
431 431 except IOError:
432 432 try:
433 433 del mm[f]
434 434 del mfm[f]
435 435 if update_dirstate:
436 436 self.dirstate.forget([f])
437 437 except:
438 438 # deleted from p2?
439 439 pass
440 440
441 441 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
442 442 user = user or self.ui.username()
443 443 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
444 444 tr.close()
445 445 if update_dirstate:
446 446 self.dirstate.setparents(n, nullid)
447 447
448 448 def commit(self, files=None, text="", user=None, date=None,
449 449 match=util.always, force=False, lock=None, wlock=None):
450 450 commit = []
451 451 remove = []
452 452 changed = []
453 453
454 454 if files:
455 455 for f in files:
456 456 s = self.dirstate.state(f)
457 457 if s in 'nmai':
458 458 commit.append(f)
459 459 elif s == 'r':
460 460 remove.append(f)
461 461 else:
462 462 self.ui.warn(_("%s not tracked!\n") % f)
463 463 else:
464 464 modified, added, removed, deleted, unknown = self.changes(match=match)
465 465 commit = modified + added
466 466 remove = removed
467 467
468 468 p1, p2 = self.dirstate.parents()
469 469 c1 = self.changelog.read(p1)
470 470 c2 = self.changelog.read(p2)
471 471 m1 = self.manifest.read(c1[0])
472 472 mf1 = self.manifest.readflags(c1[0])
473 473 m2 = self.manifest.read(c2[0])
474 474
475 475 if not commit and not remove and not force and p2 == nullid:
476 476 self.ui.status(_("nothing changed\n"))
477 477 return None
478 478
479 479 xp1 = hex(p1)
480 480 if p2 == nullid: xp2 = ''
481 481 else: xp2 = hex(p2)
482 482
483 483 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
484 484
485 485 if not wlock:
486 486 wlock = self.wlock()
487 487 if not lock:
488 488 lock = self.lock()
489 489 tr = self.transaction()
490 490
491 491 # check in files
492 492 new = {}
493 493 linkrev = self.changelog.count()
494 494 commit.sort()
495 495 for f in commit:
496 496 self.ui.note(f + "\n")
497 497 try:
498 498 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
499 499 t = self.wread(f)
500 500 except IOError:
501 501 self.ui.warn(_("trouble committing %s!\n") % f)
502 502 raise
503 503
504 504 r = self.file(f)
505 505
506 506 meta = {}
507 507 cp = self.dirstate.copied(f)
508 508 if cp:
509 509 meta["copy"] = cp
510 510 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
511 511 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
512 512 fp1, fp2 = nullid, nullid
513 513 else:
514 514 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
515 515 if entry:
516 516 new[f] = entry
517 517 continue
518 518
519 519 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
520 520 # remember what we've added so that we can later calculate
521 521 # the files to pull from a set of changesets
522 522 changed.append(f)
523 523
524 524 # update manifest
525 525 m1 = m1.copy()
526 526 m1.update(new)
527 527 for f in remove:
528 528 if f in m1:
529 529 del m1[f]
530 530 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
531 531 (new, remove))
532 532
533 533 # add changeset
534 534 new = new.keys()
535 535 new.sort()
536 536
537 537 user = user or self.ui.username()
538 538 if not text:
539 539 edittext = [""]
540 540 if p2 != nullid:
541 541 edittext.append("HG: branch merge")
542 542 edittext.extend(["HG: changed %s" % f for f in changed])
543 543 edittext.extend(["HG: removed %s" % f for f in remove])
544 544 if not changed and not remove:
545 545 edittext.append("HG: no files changed")
546 546 edittext.append("")
547 547 # run editor in the repository root
548 548 olddir = os.getcwd()
549 549 os.chdir(self.root)
550 550 edittext = self.ui.edit("\n".join(edittext), user)
551 551 os.chdir(olddir)
552 552 if not edittext.rstrip():
553 553 return None
554 554 text = edittext
555 555
556 556 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
557 557 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
558 558 parent2=xp2)
559 559 tr.close()
560 560
561 561 self.dirstate.setparents(n)
562 562 self.dirstate.update(new, "n")
563 563 self.dirstate.forget(remove)
564 564
565 565 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
566 566 return n
567 567
568 568 def walk(self, node=None, files=[], match=util.always, badmatch=None):
569 569 if node:
570 570 fdict = dict.fromkeys(files)
571 571 for fn in self.manifest.read(self.changelog.read(node)[0]):
572 572 fdict.pop(fn, None)
573 573 if match(fn):
574 574 yield 'm', fn
575 575 for fn in fdict:
576 576 if badmatch and badmatch(fn):
577 577 if match(fn):
578 578 yield 'b', fn
579 579 else:
580 580 self.ui.warn(_('%s: No such file in rev %s\n') % (
581 581 util.pathto(self.getcwd(), fn), short(node)))
582 582 else:
583 583 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
584 584 yield src, fn
585 585
586 586 def changes(self, node1=None, node2=None, files=[], match=util.always,
587 587 wlock=None, show_ignored=None):
588 588 """return changes between two nodes or node and working directory
589 589
590 590 If node1 is None, use the first dirstate parent instead.
591 591 If node2 is None, compare node1 with working directory.
592 592 """
593 593
594 594 def fcmp(fn, mf):
595 595 t1 = self.wread(fn)
596 596 t2 = self.file(fn).read(mf.get(fn, nullid))
597 597 return cmp(t1, t2)
598 598
599 599 def mfmatches(node):
600 600 change = self.changelog.read(node)
601 601 mf = dict(self.manifest.read(change[0]))
602 602 for fn in mf.keys():
603 603 if not match(fn):
604 604 del mf[fn]
605 605 return mf
606 606
607 607 if node1:
608 608 # read the manifest from node1 before the manifest from node2,
609 609 # so that we'll hit the manifest cache if we're going through
610 610 # all the revisions in parent->child order.
611 611 mf1 = mfmatches(node1)
612 612
613 613 # are we comparing the working directory?
614 614 if not node2:
615 615 if not wlock:
616 616 try:
617 617 wlock = self.wlock(wait=0)
618 618 except lock.LockException:
619 619 wlock = None
620 620 lookup, modified, added, removed, deleted, unknown, ignored = (
621 621 self.dirstate.changes(files, match, show_ignored))
622 622
623 623 # are we comparing working dir against its parent?
624 624 if not node1:
625 625 if lookup:
626 626 # do a full compare of any files that might have changed
627 627 mf2 = mfmatches(self.dirstate.parents()[0])
628 628 for f in lookup:
629 629 if fcmp(f, mf2):
630 630 modified.append(f)
631 631 elif wlock is not None:
632 632 self.dirstate.update([f], "n")
633 633 else:
634 634 # we are comparing working dir against non-parent
635 635 # generate a pseudo-manifest for the working dir
636 636 mf2 = mfmatches(self.dirstate.parents()[0])
637 637 for f in lookup + modified + added:
638 638 mf2[f] = ""
639 639 for f in removed:
640 640 if f in mf2:
641 641 del mf2[f]
642 642 else:
643 643 # we are comparing two revisions
644 644 deleted, unknown, ignored = [], [], []
645 645 mf2 = mfmatches(node2)
646 646
647 647 if node1:
648 648 # flush lists from dirstate before comparing manifests
649 649 modified, added = [], []
650 650
651 651 for fn in mf2:
652 652 if mf1.has_key(fn):
653 653 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
654 654 modified.append(fn)
655 655 del mf1[fn]
656 656 else:
657 657 added.append(fn)
658 658
659 659 removed = mf1.keys()
660 660
661 661 # sort and return results:
662 662 for l in modified, added, removed, deleted, unknown, ignored:
663 663 l.sort()
664 664 if show_ignored is None:
665 665 return (modified, added, removed, deleted, unknown)
666 666 else:
667 667 return (modified, added, removed, deleted, unknown, ignored)
668 668
669 669 def add(self, list, wlock=None):
670 670 if not wlock:
671 671 wlock = self.wlock()
672 672 for f in list:
673 673 p = self.wjoin(f)
674 674 if not os.path.exists(p):
675 675 self.ui.warn(_("%s does not exist!\n") % f)
676 676 elif not os.path.isfile(p):
677 677 self.ui.warn(_("%s not added: only files supported currently\n")
678 678 % f)
679 679 elif self.dirstate.state(f) in 'an':
680 680 self.ui.warn(_("%s already tracked!\n") % f)
681 681 else:
682 682 self.dirstate.update([f], "a")
683 683
684 684 def forget(self, list, wlock=None):
685 685 if not wlock:
686 686 wlock = self.wlock()
687 687 for f in list:
688 688 if self.dirstate.state(f) not in 'ai':
689 689 self.ui.warn(_("%s not added!\n") % f)
690 690 else:
691 691 self.dirstate.forget([f])
692 692
693 693 def remove(self, list, unlink=False, wlock=None):
694 694 if unlink:
695 695 for f in list:
696 696 try:
697 697 util.unlink(self.wjoin(f))
698 698 except OSError, inst:
699 699 if inst.errno != errno.ENOENT:
700 700 raise
701 701 if not wlock:
702 702 wlock = self.wlock()
703 703 for f in list:
704 704 p = self.wjoin(f)
705 705 if os.path.exists(p):
706 706 self.ui.warn(_("%s still exists!\n") % f)
707 707 elif self.dirstate.state(f) == 'a':
708 708 self.dirstate.forget([f])
709 709 elif f not in self.dirstate:
710 710 self.ui.warn(_("%s not tracked!\n") % f)
711 711 else:
712 712 self.dirstate.update([f], "r")
713 713
714 714 def undelete(self, list, wlock=None):
715 715 p = self.dirstate.parents()[0]
716 716 mn = self.changelog.read(p)[0]
717 717 mf = self.manifest.readflags(mn)
718 718 m = self.manifest.read(mn)
719 719 if not wlock:
720 720 wlock = self.wlock()
721 721 for f in list:
722 722 if self.dirstate.state(f) not in "r":
723 723 self.ui.warn("%s not removed!\n" % f)
724 724 else:
725 725 t = self.file(f).read(m[f])
726 726 self.wwrite(f, t)
727 727 util.set_exec(self.wjoin(f), mf[f])
728 728 self.dirstate.update([f], "n")
729 729
730 730 def copy(self, source, dest, wlock=None):
731 731 p = self.wjoin(dest)
732 732 if not os.path.exists(p):
733 733 self.ui.warn(_("%s does not exist!\n") % dest)
734 734 elif not os.path.isfile(p):
735 735 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
736 736 else:
737 737 if not wlock:
738 738 wlock = self.wlock()
739 739 if self.dirstate.state(dest) == '?':
740 740 self.dirstate.update([dest], "a")
741 741 self.dirstate.copy(source, dest)
742 742
743 743 def heads(self, start=None):
744 744 heads = self.changelog.heads(start)
745 745 # sort the output in rev descending order
746 746 heads = [(-self.changelog.rev(h), h) for h in heads]
747 747 heads.sort()
748 748 return [n for (r, n) in heads]
749 749
750 750 # branchlookup returns a dict giving a list of branches for
751 751 # each head. A branch is defined as the tag of a node or
752 752 # the branch of the node's parents. If a node has multiple
753 753 # branch tags, tags are eliminated if they are visible from other
754 754 # branch tags.
755 755 #
756 756 # So, for this graph: a->b->c->d->e
757 757 # \ /
758 758 # aa -----/
759 759 # a has tag 2.6.12
760 760 # d has tag 2.6.13
761 761 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
762 762 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
763 763 # from the list.
764 764 #
765 765 # It is possible that more than one head will have the same branch tag.
766 766 # callers need to check the result for multiple heads under the same
767 767 # branch tag if that is a problem for them (ie checkout of a specific
768 768 # branch).
769 769 #
770 770 # passing in a specific branch will limit the depth of the search
771 771 # through the parents. It won't limit the branches returned in the
772 772 # result though.
773 773 def branchlookup(self, heads=None, branch=None):
774 774 if not heads:
775 775 heads = self.heads()
776 776 headt = [ h for h in heads ]
777 777 chlog = self.changelog
778 778 branches = {}
779 779 merges = []
780 780 seenmerge = {}
781 781
782 782 # traverse the tree once for each head, recording in the branches
783 783 # dict which tags are visible from this head. The branches
784 784 # dict also records which tags are visible from each tag
785 785 # while we traverse.
786 786 while headt or merges:
787 787 if merges:
788 788 n, found = merges.pop()
789 789 visit = [n]
790 790 else:
791 791 h = headt.pop()
792 792 visit = [h]
793 793 found = [h]
794 794 seen = {}
795 795 while visit:
796 796 n = visit.pop()
797 797 if n in seen:
798 798 continue
799 799 pp = chlog.parents(n)
800 800 tags = self.nodetags(n)
801 801 if tags:
802 802 for x in tags:
803 803 if x == 'tip':
804 804 continue
805 805 for f in found:
806 806 branches.setdefault(f, {})[n] = 1
807 807 branches.setdefault(n, {})[n] = 1
808 808 break
809 809 if n not in found:
810 810 found.append(n)
811 811 if branch in tags:
812 812 continue
813 813 seen[n] = 1
814 814 if pp[1] != nullid and n not in seenmerge:
815 815 merges.append((pp[1], [x for x in found]))
816 816 seenmerge[n] = 1
817 817 if pp[0] != nullid:
818 818 visit.append(pp[0])
819 819 # traverse the branches dict, eliminating branch tags from each
820 820 # head that are visible from another branch tag for that head.
821 821 out = {}
822 822 viscache = {}
823 823 for h in heads:
824 824 def visible(node):
825 825 if node in viscache:
826 826 return viscache[node]
827 827 ret = {}
828 828 visit = [node]
829 829 while visit:
830 830 x = visit.pop()
831 831 if x in viscache:
832 832 ret.update(viscache[x])
833 833 elif x not in ret:
834 834 ret[x] = 1
835 835 if x in branches:
836 836 visit[len(visit):] = branches[x].keys()
837 837 viscache[node] = ret
838 838 return ret
839 839 if h not in branches:
840 840 continue
841 841 # O(n^2), but somewhat limited. This only searches the
842 842 # tags visible from a specific head, not all the tags in the
843 843 # whole repo.
844 844 for b in branches[h]:
845 845 vis = False
846 846 for bb in branches[h].keys():
847 847 if b != bb:
848 848 if b in visible(bb):
849 849 vis = True
850 850 break
851 851 if not vis:
852 852 l = out.setdefault(h, [])
853 853 l[len(l):] = self.nodetags(b)
854 854 return out
855 855
856 856 def branches(self, nodes):
857 857 if not nodes:
858 858 nodes = [self.changelog.tip()]
859 859 b = []
860 860 for n in nodes:
861 861 t = n
862 862 while n:
863 863 p = self.changelog.parents(n)
864 864 if p[1] != nullid or p[0] == nullid:
865 865 b.append((t, n, p[0], p[1]))
866 866 break
867 867 n = p[0]
868 868 return b
869 869
870 870 def between(self, pairs):
871 871 r = []
872 872
873 873 for top, bottom in pairs:
874 874 n, l, i = top, [], 0
875 875 f = 1
876 876
877 877 while n != bottom:
878 878 p = self.changelog.parents(n)[0]
879 879 if i == f:
880 880 l.append(n)
881 881 f = f * 2
882 882 n = p
883 883 i += 1
884 884
885 885 r.append(l)
886 886
887 887 return r
888 888
889 889 def findincoming(self, remote, base=None, heads=None, force=False):
890 890 m = self.changelog.nodemap
891 891 search = []
892 892 fetch = {}
893 893 seen = {}
894 894 seenbranch = {}
895 895 if base == None:
896 896 base = {}
897 897
898 898 if not heads:
899 899 heads = remote.heads()
900 900
901 901 if self.changelog.tip() == nullid:
902 902 if heads != [nullid]:
903 903 return [nullid]
904 904 return []
905 905
906 906 # assume we're closer to the tip than the root
907 907 # and start by examining the heads
908 908 self.ui.status(_("searching for changes\n"))
909 909
910 910 unknown = []
911 911 for h in heads:
912 912 if h not in m:
913 913 unknown.append(h)
914 914 else:
915 915 base[h] = 1
916 916
917 917 if not unknown:
918 918 return []
919 919
920 920 rep = {}
921 921 reqcnt = 0
922 922
923 923 # search through remote branches
924 924 # a 'branch' here is a linear segment of history, with four parts:
925 925 # head, root, first parent, second parent
926 926 # (a branch always has two parents (or none) by definition)
927 927 unknown = remote.branches(unknown)
928 928 while unknown:
929 929 r = []
930 930 while unknown:
931 931 n = unknown.pop(0)
932 932 if n[0] in seen:
933 933 continue
934 934
935 935 self.ui.debug(_("examining %s:%s\n")
936 936 % (short(n[0]), short(n[1])))
937 937 if n[0] == nullid:
938 938 break
939 939 if n in seenbranch:
940 940 self.ui.debug(_("branch already found\n"))
941 941 continue
942 942 if n[1] and n[1] in m: # do we know the base?
943 943 self.ui.debug(_("found incomplete branch %s:%s\n")
944 944 % (short(n[0]), short(n[1])))
945 945 search.append(n) # schedule branch range for scanning
946 946 seenbranch[n] = 1
947 947 else:
948 948 if n[1] not in seen and n[1] not in fetch:
949 949 if n[2] in m and n[3] in m:
950 950 self.ui.debug(_("found new changeset %s\n") %
951 951 short(n[1]))
952 952 fetch[n[1]] = 1 # earliest unknown
953 953 base[n[2]] = 1 # latest known
954 954 continue
955 955
956 956 for a in n[2:4]:
957 957 if a not in rep:
958 958 r.append(a)
959 959 rep[a] = 1
960 960
961 961 seen[n[0]] = 1
962 962
963 963 if r:
964 964 reqcnt += 1
965 965 self.ui.debug(_("request %d: %s\n") %
966 966 (reqcnt, " ".join(map(short, r))))
967 967 for p in range(0, len(r), 10):
968 968 for b in remote.branches(r[p:p+10]):
969 969 self.ui.debug(_("received %s:%s\n") %
970 970 (short(b[0]), short(b[1])))
971 971 if b[0] in m:
972 972 self.ui.debug(_("found base node %s\n")
973 973 % short(b[0]))
974 974 base[b[0]] = 1
975 975 elif b[0] not in seen:
976 976 unknown.append(b)
977 977
978 978 # do binary search on the branches we found
979 979 while search:
980 980 n = search.pop(0)
981 981 reqcnt += 1
982 982 l = remote.between([(n[0], n[1])])[0]
983 983 l.append(n[1])
984 984 p = n[0]
985 985 f = 1
986 986 for i in l:
987 987 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
988 988 if i in m:
989 989 if f <= 2:
990 990 self.ui.debug(_("found new branch changeset %s\n") %
991 991 short(p))
992 992 fetch[p] = 1
993 993 base[i] = 1
994 994 else:
995 995 self.ui.debug(_("narrowed branch search to %s:%s\n")
996 996 % (short(p), short(i)))
997 997 search.append((p, i))
998 998 break
999 999 p, f = i, f * 2
1000 1000
1001 1001 # sanity check our fetch list
1002 1002 for f in fetch.keys():
1003 1003 if f in m:
1004 1004 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1005 1005
1006 1006 if base.keys() == [nullid]:
1007 1007 if force:
1008 1008 self.ui.warn(_("warning: repository is unrelated\n"))
1009 1009 else:
1010 1010 raise util.Abort(_("repository is unrelated"))
1011 1011
1012 1012 self.ui.note(_("found new changesets starting at ") +
1013 1013 " ".join([short(f) for f in fetch]) + "\n")
1014 1014
1015 1015 self.ui.debug(_("%d total queries\n") % reqcnt)
1016 1016
1017 1017 return fetch.keys()
1018 1018
1019 1019 def findoutgoing(self, remote, base=None, heads=None, force=False):
1020 1020 """Return list of nodes that are roots of subsets not in remote
1021 1021
1022 1022 If base dict is specified, assume that these nodes and their parents
1023 1023 exist on the remote side.
1024 1024 If a list of heads is specified, return only nodes which are heads
1025 1025 or ancestors of these heads, and return a second element which
1026 1026 contains all remote heads which get new children.
1027 1027 """
1028 1028 if base == None:
1029 1029 base = {}
1030 1030 self.findincoming(remote, base, heads, force=force)
1031 1031
1032 1032 self.ui.debug(_("common changesets up to ")
1033 1033 + " ".join(map(short, base.keys())) + "\n")
1034 1034
1035 1035 remain = dict.fromkeys(self.changelog.nodemap)
1036 1036
1037 1037 # prune everything remote has from the tree
1038 1038 del remain[nullid]
1039 1039 remove = base.keys()
1040 1040 while remove:
1041 1041 n = remove.pop(0)
1042 1042 if n in remain:
1043 1043 del remain[n]
1044 1044 for p in self.changelog.parents(n):
1045 1045 remove.append(p)
1046 1046
1047 1047 # find every node whose parents have been pruned
1048 1048 subset = []
1049 1049 # find every remote head that will get new children
1050 1050 updated_heads = {}
1051 1051 for n in remain:
1052 1052 p1, p2 = self.changelog.parents(n)
1053 1053 if p1 not in remain and p2 not in remain:
1054 1054 subset.append(n)
1055 1055 if heads:
1056 1056 if p1 in heads:
1057 1057 updated_heads[p1] = True
1058 1058 if p2 in heads:
1059 1059 updated_heads[p2] = True
1060 1060
1061 1061 # this is the set of all roots we have to push
1062 1062 if heads:
1063 1063 return subset, updated_heads.keys()
1064 1064 else:
1065 1065 return subset
1066 1066
1067 1067 def pull(self, remote, heads=None, force=False):
1068 1068 l = self.lock()
1069 1069
1070 1070 fetch = self.findincoming(remote, force=force)
1071 1071 if fetch == [nullid]:
1072 1072 self.ui.status(_("requesting all changes\n"))
1073 1073
1074 1074 if not fetch:
1075 1075 self.ui.status(_("no changes found\n"))
1076 1076 return 0
1077 1077
1078 1078 if heads is None:
1079 1079 cg = remote.changegroup(fetch, 'pull')
1080 1080 else:
1081 1081 cg = remote.changegroupsubset(fetch, heads, 'pull')
1082 1082 return self.addchangegroup(cg, 'pull')
1083 1083
1084 1084 def push(self, remote, force=False, revs=None):
1085 1085 lock = remote.lock()
1086 1086
1087 1087 base = {}
1088 1088 remote_heads = remote.heads()
1089 1089 inc = self.findincoming(remote, base, remote_heads, force=force)
1090 1090 if not force and inc:
1091 1091 self.ui.warn(_("abort: unsynced remote changes!\n"))
1092 1092 self.ui.status(_("(did you forget to sync?"
1093 1093 " use push -f to force)\n"))
1094 1094 return 1
1095 1095
1096 1096 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1097 1097 if revs is not None:
1098 1098 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1099 1099 else:
1100 1100 bases, heads = update, self.changelog.heads()
1101 1101
1102 1102 if not bases:
1103 1103 self.ui.status(_("no changes found\n"))
1104 1104 return 1
1105 1105 elif not force:
1106 1106 # FIXME we don't properly detect creation of new heads
1107 1107 # in the push -r case, assume the user knows what he's doing
1108 1108 if not revs and len(remote_heads) < len(heads) \
1109 1109 and remote_heads != [nullid]:
1110 1110 self.ui.warn(_("abort: push creates new remote branches!\n"))
1111 1111 self.ui.status(_("(did you forget to merge?"
1112 1112 " use push -f to force)\n"))
1113 1113 return 1
1114 1114
1115 1115 if revs is None:
1116 1116 cg = self.changegroup(update, 'push')
1117 1117 else:
1118 1118 cg = self.changegroupsubset(update, revs, 'push')
1119 1119 return remote.addchangegroup(cg, 'push')
1120 1120
1121 1121 def changegroupsubset(self, bases, heads, source):
1122 1122 """This function generates a changegroup consisting of all the nodes
1123 1123 that are descendents of any of the bases, and ancestors of any of
1124 1124 the heads.
1125 1125
1126 1126 It is fairly complex as determining which filenodes and which
1127 1127 manifest nodes need to be included for the changeset to be complete
1128 1128 is non-trivial.
1129 1129
1130 1130 Another wrinkle is doing the reverse, figuring out which changeset in
1131 1131 the changegroup a particular filenode or manifestnode belongs to."""
1132 1132
1133 1133 self.hook('preoutgoing', throw=True, source=source)
1134 1134
1135 1135 # Set up some initial variables
1136 1136 # Make it easy to refer to self.changelog
1137 1137 cl = self.changelog
1138 1138 # msng is short for missing - compute the list of changesets in this
1139 1139 # changegroup.
1140 1140 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1141 1141 # Some bases may turn out to be superfluous, and some heads may be
1142 1142 # too. nodesbetween will return the minimal set of bases and heads
1143 1143 # necessary to re-create the changegroup.
1144 1144
1145 1145 # Known heads are the list of heads that it is assumed the recipient
1146 1146 # of this changegroup will know about.
1147 1147 knownheads = {}
1148 1148 # We assume that all parents of bases are known heads.
1149 1149 for n in bases:
1150 1150 for p in cl.parents(n):
1151 1151 if p != nullid:
1152 1152 knownheads[p] = 1
1153 1153 knownheads = knownheads.keys()
1154 1154 if knownheads:
1155 1155 # Now that we know what heads are known, we can compute which
1156 1156 # changesets are known. The recipient must know about all
1157 1157 # changesets required to reach the known heads from the null
1158 1158 # changeset.
1159 1159 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1160 1160 junk = None
1161 1161 # Transform the list into an ersatz set.
1162 1162 has_cl_set = dict.fromkeys(has_cl_set)
1163 1163 else:
1164 1164 # If there were no known heads, the recipient cannot be assumed to
1165 1165 # know about any changesets.
1166 1166 has_cl_set = {}
1167 1167
1168 1168 # Make it easy to refer to self.manifest
1169 1169 mnfst = self.manifest
1170 1170 # We don't know which manifests are missing yet
1171 1171 msng_mnfst_set = {}
1172 1172 # Nor do we know which filenodes are missing.
1173 1173 msng_filenode_set = {}
1174 1174
1175 1175 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1176 1176 junk = None
1177 1177
1178 1178 # A changeset always belongs to itself, so the changenode lookup
1179 1179 # function for a changenode is identity.
1180 1180 def identity(x):
1181 1181 return x
1182 1182
1183 1183 # A function generating function. Sets up an environment for the
1184 1184 # inner function.
1185 1185 def cmp_by_rev_func(revlog):
1186 1186 # Compare two nodes by their revision number in the environment's
1187 1187 # revision history. Since the revision number both represents the
1188 1188 # most efficient order to read the nodes in, and represents a
1189 1189 # topological sorting of the nodes, this function is often useful.
1190 1190 def cmp_by_rev(a, b):
1191 1191 return cmp(revlog.rev(a), revlog.rev(b))
1192 1192 return cmp_by_rev
1193 1193
1194 1194 # If we determine that a particular file or manifest node must be a
1195 1195 # node that the recipient of the changegroup will already have, we can
1196 1196 # also assume the recipient will have all the parents. This function
1197 1197 # prunes them from the set of missing nodes.
1198 1198 def prune_parents(revlog, hasset, msngset):
1199 1199 haslst = hasset.keys()
1200 1200 haslst.sort(cmp_by_rev_func(revlog))
1201 1201 for node in haslst:
1202 1202 parentlst = [p for p in revlog.parents(node) if p != nullid]
1203 1203 while parentlst:
1204 1204 n = parentlst.pop()
1205 1205 if n not in hasset:
1206 1206 hasset[n] = 1
1207 1207 p = [p for p in revlog.parents(n) if p != nullid]
1208 1208 parentlst.extend(p)
1209 1209 for n in hasset:
1210 1210 msngset.pop(n, None)
1211 1211
1212 1212 # This is a function generating function used to set up an environment
1213 1213 # for the inner function to execute in.
1214 1214 def manifest_and_file_collector(changedfileset):
1215 1215 # This is an information gathering function that gathers
1216 1216 # information from each changeset node that goes out as part of
1217 1217 # the changegroup. The information gathered is a list of which
1218 1218 # manifest nodes are potentially required (the recipient may
1219 1219 # already have them) and total list of all files which were
1220 1220 # changed in any changeset in the changegroup.
1221 1221 #
1222 1222 # We also remember the first changenode we saw any manifest
1223 1223 # referenced by so we can later determine which changenode 'owns'
1224 1224 # the manifest.
1225 1225 def collect_manifests_and_files(clnode):
1226 1226 c = cl.read(clnode)
1227 1227 for f in c[3]:
1228 1228 # This is to make sure we only have one instance of each
1229 1229 # filename string for each filename.
1230 1230 changedfileset.setdefault(f, f)
1231 1231 msng_mnfst_set.setdefault(c[0], clnode)
1232 1232 return collect_manifests_and_files
1233 1233
1234 1234 # Figure out which manifest nodes (of the ones we think might be part
1235 1235 # of the changegroup) the recipient must know about and remove them
1236 1236 # from the changegroup.
1237 1237 def prune_manifests():
1238 1238 has_mnfst_set = {}
1239 1239 for n in msng_mnfst_set:
1240 1240 # If a 'missing' manifest thinks it belongs to a changenode
1241 1241 # the recipient is assumed to have, obviously the recipient
1242 1242 # must have that manifest.
1243 1243 linknode = cl.node(mnfst.linkrev(n))
1244 1244 if linknode in has_cl_set:
1245 1245 has_mnfst_set[n] = 1
1246 1246 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1247 1247
1248 1248 # Use the information collected in collect_manifests_and_files to say
1249 1249 # which changenode any manifestnode belongs to.
1250 1250 def lookup_manifest_link(mnfstnode):
1251 1251 return msng_mnfst_set[mnfstnode]
1252 1252
1253 1253 # A function generating function that sets up the initial environment
1254 1254 # the inner function.
1255 1255 def filenode_collector(changedfiles):
1256 1256 next_rev = [0]
1257 1257 # This gathers information from each manifestnode included in the
1258 1258 # changegroup about which filenodes the manifest node references
1259 1259 # so we can include those in the changegroup too.
1260 1260 #
1261 1261 # It also remembers which changenode each filenode belongs to. It
1262 1262 # does this by assuming the a filenode belongs to the changenode
1263 1263 # the first manifest that references it belongs to.
1264 1264 def collect_msng_filenodes(mnfstnode):
1265 1265 r = mnfst.rev(mnfstnode)
1266 1266 if r == next_rev[0]:
1267 1267 # If the last rev we looked at was the one just previous,
1268 1268 # we only need to see a diff.
1269 1269 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1270 1270 # For each line in the delta
1271 1271 for dline in delta.splitlines():
1272 1272 # get the filename and filenode for that line
1273 1273 f, fnode = dline.split('\0')
1274 1274 fnode = bin(fnode[:40])
1275 1275 f = changedfiles.get(f, None)
1276 1276 # And if the file is in the list of files we care
1277 1277 # about.
1278 1278 if f is not None:
1279 1279 # Get the changenode this manifest belongs to
1280 1280 clnode = msng_mnfst_set[mnfstnode]
1281 1281 # Create the set of filenodes for the file if
1282 1282 # there isn't one already.
1283 1283 ndset = msng_filenode_set.setdefault(f, {})
1284 1284 # And set the filenode's changelog node to the
1285 1285 # manifest's if it hasn't been set already.
1286 1286 ndset.setdefault(fnode, clnode)
1287 1287 else:
1288 1288 # Otherwise we need a full manifest.
1289 1289 m = mnfst.read(mnfstnode)
1290 1290 # For every file in we care about.
1291 1291 for f in changedfiles:
1292 1292 fnode = m.get(f, None)
1293 1293 # If it's in the manifest
1294 1294 if fnode is not None:
1295 1295 # See comments above.
1296 1296 clnode = msng_mnfst_set[mnfstnode]
1297 1297 ndset = msng_filenode_set.setdefault(f, {})
1298 1298 ndset.setdefault(fnode, clnode)
1299 1299 # Remember the revision we hope to see next.
1300 1300 next_rev[0] = r + 1
1301 1301 return collect_msng_filenodes
1302 1302
1303 1303 # We have a list of filenodes we think we need for a file, lets remove
1304 1304 # all those we now the recipient must have.
1305 1305 def prune_filenodes(f, filerevlog):
1306 1306 msngset = msng_filenode_set[f]
1307 1307 hasset = {}
1308 1308 # If a 'missing' filenode thinks it belongs to a changenode we
1309 1309 # assume the recipient must have, then the recipient must have
1310 1310 # that filenode.
1311 1311 for n in msngset:
1312 1312 clnode = cl.node(filerevlog.linkrev(n))
1313 1313 if clnode in has_cl_set:
1314 1314 hasset[n] = 1
1315 1315 prune_parents(filerevlog, hasset, msngset)
1316 1316
1317 1317 # A function generator function that sets up the a context for the
1318 1318 # inner function.
1319 1319 def lookup_filenode_link_func(fname):
1320 1320 msngset = msng_filenode_set[fname]
1321 1321 # Lookup the changenode the filenode belongs to.
1322 1322 def lookup_filenode_link(fnode):
1323 1323 return msngset[fnode]
1324 1324 return lookup_filenode_link
1325 1325
1326 1326 # Now that we have all theses utility functions to help out and
1327 1327 # logically divide up the task, generate the group.
1328 1328 def gengroup():
1329 1329 # The set of changed files starts empty.
1330 1330 changedfiles = {}
1331 1331 # Create a changenode group generator that will call our functions
1332 1332 # back to lookup the owning changenode and collect information.
1333 1333 group = cl.group(msng_cl_lst, identity,
1334 1334 manifest_and_file_collector(changedfiles))
1335 1335 for chnk in group:
1336 1336 yield chnk
1337 1337
1338 1338 # The list of manifests has been collected by the generator
1339 1339 # calling our functions back.
1340 1340 prune_manifests()
1341 1341 msng_mnfst_lst = msng_mnfst_set.keys()
1342 1342 # Sort the manifestnodes by revision number.
1343 1343 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1344 1344 # Create a generator for the manifestnodes that calls our lookup
1345 1345 # and data collection functions back.
1346 1346 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1347 1347 filenode_collector(changedfiles))
1348 1348 for chnk in group:
1349 1349 yield chnk
1350 1350
1351 1351 # These are no longer needed, dereference and toss the memory for
1352 1352 # them.
1353 1353 msng_mnfst_lst = None
1354 1354 msng_mnfst_set.clear()
1355 1355
1356 1356 changedfiles = changedfiles.keys()
1357 1357 changedfiles.sort()
1358 1358 # Go through all our files in order sorted by name.
1359 1359 for fname in changedfiles:
1360 1360 filerevlog = self.file(fname)
1361 1361 # Toss out the filenodes that the recipient isn't really
1362 1362 # missing.
1363 1363 if msng_filenode_set.has_key(fname):
1364 1364 prune_filenodes(fname, filerevlog)
1365 1365 msng_filenode_lst = msng_filenode_set[fname].keys()
1366 1366 else:
1367 1367 msng_filenode_lst = []
1368 1368 # If any filenodes are left, generate the group for them,
1369 1369 # otherwise don't bother.
1370 1370 if len(msng_filenode_lst) > 0:
1371 1371 yield changegroup.genchunk(fname)
1372 1372 # Sort the filenodes by their revision #
1373 1373 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1374 1374 # Create a group generator and only pass in a changenode
1375 1375 # lookup function as we need to collect no information
1376 1376 # from filenodes.
1377 1377 group = filerevlog.group(msng_filenode_lst,
1378 1378 lookup_filenode_link_func(fname))
1379 1379 for chnk in group:
1380 1380 yield chnk
1381 1381 if msng_filenode_set.has_key(fname):
1382 1382 # Don't need this anymore, toss it to free memory.
1383 1383 del msng_filenode_set[fname]
1384 1384 # Signal that no more groups are left.
1385 1385 yield changegroup.closechunk()
1386 1386
1387 1387 if msng_cl_lst:
1388 1388 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1389 1389
1390 1390 return util.chunkbuffer(gengroup())
1391 1391
1392 1392 def changegroup(self, basenodes, source):
1393 1393 """Generate a changegroup of all nodes that we have that a recipient
1394 1394 doesn't.
1395 1395
1396 1396 This is much easier than the previous function as we can assume that
1397 1397 the recipient has any changenode we aren't sending them."""
1398 1398
1399 1399 self.hook('preoutgoing', throw=True, source=source)
1400 1400
1401 1401 cl = self.changelog
1402 1402 nodes = cl.nodesbetween(basenodes, None)[0]
1403 1403 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1404 1404
1405 1405 def identity(x):
1406 1406 return x
1407 1407
1408 1408 def gennodelst(revlog):
1409 1409 for r in xrange(0, revlog.count()):
1410 1410 n = revlog.node(r)
1411 1411 if revlog.linkrev(n) in revset:
1412 1412 yield n
1413 1413
1414 1414 def changed_file_collector(changedfileset):
1415 1415 def collect_changed_files(clnode):
1416 1416 c = cl.read(clnode)
1417 1417 for fname in c[3]:
1418 1418 changedfileset[fname] = 1
1419 1419 return collect_changed_files
1420 1420
1421 1421 def lookuprevlink_func(revlog):
1422 1422 def lookuprevlink(n):
1423 1423 return cl.node(revlog.linkrev(n))
1424 1424 return lookuprevlink
1425 1425
1426 1426 def gengroup():
1427 1427 # construct a list of all changed files
1428 1428 changedfiles = {}
1429 1429
1430 1430 for chnk in cl.group(nodes, identity,
1431 1431 changed_file_collector(changedfiles)):
1432 1432 yield chnk
1433 1433 changedfiles = changedfiles.keys()
1434 1434 changedfiles.sort()
1435 1435
1436 1436 mnfst = self.manifest
1437 1437 nodeiter = gennodelst(mnfst)
1438 1438 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1439 1439 yield chnk
1440 1440
1441 1441 for fname in changedfiles:
1442 1442 filerevlog = self.file(fname)
1443 1443 nodeiter = gennodelst(filerevlog)
1444 1444 nodeiter = list(nodeiter)
1445 1445 if nodeiter:
1446 1446 yield changegroup.genchunk(fname)
1447 1447 lookup = lookuprevlink_func(filerevlog)
1448 1448 for chnk in filerevlog.group(nodeiter, lookup):
1449 1449 yield chnk
1450 1450
1451 1451 yield changegroup.closechunk()
1452 1452
1453 1453 if nodes:
1454 1454 self.hook('outgoing', node=hex(nodes[0]), source=source)
1455 1455
1456 1456 return util.chunkbuffer(gengroup())
1457 1457
1458 1458 def addchangegroup(self, source, srctype):
1459 1459 """add changegroup to repo.
1460 1460 returns number of heads modified or added + 1."""
1461 1461
1462 1462 def csmap(x):
1463 1463 self.ui.debug(_("add changeset %s\n") % short(x))
1464 1464 return cl.count()
1465 1465
1466 1466 def revmap(x):
1467 1467 return cl.rev(x)
1468 1468
1469 1469 if not source:
1470 1470 return 0
1471 1471
1472 1472 self.hook('prechangegroup', throw=True, source=srctype)
1473 1473
1474 1474 changesets = files = revisions = 0
1475 1475
1476 1476 tr = self.transaction()
1477 1477
1478 1478 # write changelog and manifest data to temp files so
1479 1479 # concurrent readers will not see inconsistent view
1480 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1480 cl = None
1481 try:
1482 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1481 1483
1482 oldheads = len(cl.heads())
1484 oldheads = len(cl.heads())
1483 1485
1484 # pull off the changeset group
1485 self.ui.status(_("adding changesets\n"))
1486 co = cl.tip()
1487 chunkiter = changegroup.chunkiter(source)
1488 cn = cl.addgroup(chunkiter, csmap, tr, 1) # unique
1489 cnr, cor = map(cl.rev, (cn, co))
1490 if cn == nullid:
1491 cnr = cor
1492 changesets = cnr - cor
1486 # pull off the changeset group
1487 self.ui.status(_("adding changesets\n"))
1488 co = cl.tip()
1489 chunkiter = changegroup.chunkiter(source)
1490 cn = cl.addgroup(chunkiter, csmap, tr, 1) # unique
1491 cnr, cor = map(cl.rev, (cn, co))
1492 if cn == nullid:
1493 cnr = cor
1494 changesets = cnr - cor
1493 1495
1494 mf = appendfile.appendmanifest(self.opener, self.manifest.version)
1496 mf = None
1497 try:
1498 mf = appendfile.appendmanifest(self.opener,
1499 self.manifest.version)
1495 1500
1496 # pull off the manifest group
1497 self.ui.status(_("adding manifests\n"))
1498 mm = mf.tip()
1499 chunkiter = changegroup.chunkiter(source)
1500 mo = mf.addgroup(chunkiter, revmap, tr)
1501 # pull off the manifest group
1502 self.ui.status(_("adding manifests\n"))
1503 mm = mf.tip()
1504 chunkiter = changegroup.chunkiter(source)
1505 mo = mf.addgroup(chunkiter, revmap, tr)
1501 1506
1502 # process the files
1503 self.ui.status(_("adding file changes\n"))
1504 while 1:
1505 f = changegroup.getchunk(source)
1506 if not f:
1507 break
1508 self.ui.debug(_("adding %s revisions\n") % f)
1509 fl = self.file(f)
1510 o = fl.count()
1511 chunkiter = changegroup.chunkiter(source)
1512 n = fl.addgroup(chunkiter, revmap, tr)
1513 revisions += fl.count() - o
1514 files += 1
1507 # process the files
1508 self.ui.status(_("adding file changes\n"))
1509 while 1:
1510 f = changegroup.getchunk(source)
1511 if not f:
1512 break
1513 self.ui.debug(_("adding %s revisions\n") % f)
1514 fl = self.file(f)
1515 o = fl.count()
1516 chunkiter = changegroup.chunkiter(source)
1517 n = fl.addgroup(chunkiter, revmap, tr)
1518 revisions += fl.count() - o
1519 files += 1
1515 1520
1516 # write order here is important so concurrent readers will see
1517 # consistent view of repo
1518 mf.writedata()
1519 cl.writedata()
1521 # write order here is important so concurrent readers will see
1522 # consistent view of repo
1523 mf.writedata()
1524 finally:
1525 if mf:
1526 mf.cleanup()
1527 cl.writedata()
1528 finally:
1529 if cl:
1530 cl.cleanup()
1520 1531
1521 1532 # make changelog and manifest see real files again
1522 1533 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1523 1534 self.manifest = manifest.manifest(self.opener, self.manifest.version)
1524 1535 self.changelog.checkinlinesize(tr)
1525 1536 self.manifest.checkinlinesize(tr)
1526 1537
1527 1538 newheads = len(self.changelog.heads())
1528 1539 heads = ""
1529 1540 if oldheads and newheads > oldheads:
1530 1541 heads = _(" (+%d heads)") % (newheads - oldheads)
1531 1542
1532 1543 self.ui.status(_("added %d changesets"
1533 1544 " with %d changes to %d files%s\n")
1534 1545 % (changesets, revisions, files, heads))
1535 1546
1536 1547 self.hook('pretxnchangegroup', throw=True,
1537 1548 node=hex(self.changelog.node(cor+1)), source=srctype)
1538 1549
1539 1550 tr.close()
1540 1551
1541 1552 if changesets > 0:
1542 1553 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1543 1554 source=srctype)
1544 1555
1545 1556 for i in range(cor + 1, cnr + 1):
1546 1557 self.hook("incoming", node=hex(self.changelog.node(i)),
1547 1558 source=srctype)
1548 1559
1549 1560 return newheads - oldheads + 1
1550 1561
1551 1562 def update(self, node, allow=False, force=False, choose=None,
1552 1563 moddirstate=True, forcemerge=False, wlock=None, show_stats=True):
1553 1564 pl = self.dirstate.parents()
1554 1565 if not force and pl[1] != nullid:
1555 1566 self.ui.warn(_("aborting: outstanding uncommitted merges\n"))
1556 1567 return 1
1557 1568
1558 1569 err = False
1559 1570
1560 1571 p1, p2 = pl[0], node
1561 1572 pa = self.changelog.ancestor(p1, p2)
1562 1573 m1n = self.changelog.read(p1)[0]
1563 1574 m2n = self.changelog.read(p2)[0]
1564 1575 man = self.manifest.ancestor(m1n, m2n)
1565 1576 m1 = self.manifest.read(m1n)
1566 1577 mf1 = self.manifest.readflags(m1n)
1567 1578 m2 = self.manifest.read(m2n).copy()
1568 1579 mf2 = self.manifest.readflags(m2n)
1569 1580 ma = self.manifest.read(man)
1570 1581 mfa = self.manifest.readflags(man)
1571 1582
1572 1583 modified, added, removed, deleted, unknown = self.changes()
1573 1584
1574 1585 # is this a jump, or a merge? i.e. is there a linear path
1575 1586 # from p1 to p2?
1576 1587 linear_path = (pa == p1 or pa == p2)
1577 1588
1578 1589 if allow and linear_path:
1579 1590 raise util.Abort(_("there is nothing to merge, "
1580 1591 "just use 'hg update'"))
1581 1592 if allow and not forcemerge:
1582 1593 if modified or added or removed:
1583 1594 raise util.Abort(_("outstanding uncommitted changes"))
1584 1595 if not forcemerge and not force:
1585 1596 for f in unknown:
1586 1597 if f in m2:
1587 1598 t1 = self.wread(f)
1588 1599 t2 = self.file(f).read(m2[f])
1589 1600 if cmp(t1, t2) != 0:
1590 1601 raise util.Abort(_("'%s' already exists in the working"
1591 1602 " dir and differs from remote") % f)
1592 1603
1593 1604 # resolve the manifest to determine which files
1594 1605 # we care about merging
1595 1606 self.ui.note(_("resolving manifests\n"))
1596 1607 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1597 1608 (force, allow, moddirstate, linear_path))
1598 1609 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1599 1610 (short(man), short(m1n), short(m2n)))
1600 1611
1601 1612 merge = {}
1602 1613 get = {}
1603 1614 remove = []
1604 1615
1605 1616 # construct a working dir manifest
1606 1617 mw = m1.copy()
1607 1618 mfw = mf1.copy()
1608 1619 umap = dict.fromkeys(unknown)
1609 1620
1610 1621 for f in added + modified + unknown:
1611 1622 mw[f] = ""
1612 1623 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1613 1624
1614 1625 if moddirstate and not wlock:
1615 1626 wlock = self.wlock()
1616 1627
1617 1628 for f in deleted + removed:
1618 1629 if f in mw:
1619 1630 del mw[f]
1620 1631
1621 1632 # If we're jumping between revisions (as opposed to merging),
1622 1633 # and if neither the working directory nor the target rev has
1623 1634 # the file, then we need to remove it from the dirstate, to
1624 1635 # prevent the dirstate from listing the file when it is no
1625 1636 # longer in the manifest.
1626 1637 if moddirstate and linear_path and f not in m2:
1627 1638 self.dirstate.forget((f,))
1628 1639
1629 1640 # Compare manifests
1630 1641 for f, n in mw.iteritems():
1631 1642 if choose and not choose(f):
1632 1643 continue
1633 1644 if f in m2:
1634 1645 s = 0
1635 1646
1636 1647 # is the wfile new since m1, and match m2?
1637 1648 if f not in m1:
1638 1649 t1 = self.wread(f)
1639 1650 t2 = self.file(f).read(m2[f])
1640 1651 if cmp(t1, t2) == 0:
1641 1652 n = m2[f]
1642 1653 del t1, t2
1643 1654
1644 1655 # are files different?
1645 1656 if n != m2[f]:
1646 1657 a = ma.get(f, nullid)
1647 1658 # are both different from the ancestor?
1648 1659 if n != a and m2[f] != a:
1649 1660 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1650 1661 # merge executable bits
1651 1662 # "if we changed or they changed, change in merge"
1652 1663 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1653 1664 mode = ((a^b) | (a^c)) ^ a
1654 1665 merge[f] = (m1.get(f, nullid), m2[f], mode)
1655 1666 s = 1
1656 1667 # are we clobbering?
1657 1668 # is remote's version newer?
1658 1669 # or are we going back in time?
1659 1670 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1660 1671 self.ui.debug(_(" remote %s is newer, get\n") % f)
1661 1672 get[f] = m2[f]
1662 1673 s = 1
1663 1674 elif f in umap or f in added:
1664 1675 # this unknown file is the same as the checkout
1665 1676 # we need to reset the dirstate if the file was added
1666 1677 get[f] = m2[f]
1667 1678
1668 1679 if not s and mfw[f] != mf2[f]:
1669 1680 if force:
1670 1681 self.ui.debug(_(" updating permissions for %s\n") % f)
1671 1682 util.set_exec(self.wjoin(f), mf2[f])
1672 1683 else:
1673 1684 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1674 1685 mode = ((a^b) | (a^c)) ^ a
1675 1686 if mode != b:
1676 1687 self.ui.debug(_(" updating permissions for %s\n")
1677 1688 % f)
1678 1689 util.set_exec(self.wjoin(f), mode)
1679 1690 del m2[f]
1680 1691 elif f in ma:
1681 1692 if n != ma[f]:
1682 1693 r = _("d")
1683 1694 if not force and (linear_path or allow):
1684 1695 r = self.ui.prompt(
1685 1696 (_(" local changed %s which remote deleted\n") % f) +
1686 1697 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1687 1698 if r == _("d"):
1688 1699 remove.append(f)
1689 1700 else:
1690 1701 self.ui.debug(_("other deleted %s\n") % f)
1691 1702 remove.append(f) # other deleted it
1692 1703 else:
1693 1704 # file is created on branch or in working directory
1694 1705 if force and f not in umap:
1695 1706 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1696 1707 remove.append(f)
1697 1708 elif n == m1.get(f, nullid): # same as parent
1698 1709 if p2 == pa: # going backwards?
1699 1710 self.ui.debug(_("remote deleted %s\n") % f)
1700 1711 remove.append(f)
1701 1712 else:
1702 1713 self.ui.debug(_("local modified %s, keeping\n") % f)
1703 1714 else:
1704 1715 self.ui.debug(_("working dir created %s, keeping\n") % f)
1705 1716
1706 1717 for f, n in m2.iteritems():
1707 1718 if choose and not choose(f):
1708 1719 continue
1709 1720 if f[0] == "/":
1710 1721 continue
1711 1722 if f in ma and n != ma[f]:
1712 1723 r = _("k")
1713 1724 if not force and (linear_path or allow):
1714 1725 r = self.ui.prompt(
1715 1726 (_("remote changed %s which local deleted\n") % f) +
1716 1727 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1717 1728 if r == _("k"):
1718 1729 get[f] = n
1719 1730 elif f not in ma:
1720 1731 self.ui.debug(_("remote created %s\n") % f)
1721 1732 get[f] = n
1722 1733 else:
1723 1734 if force or p2 == pa: # going backwards?
1724 1735 self.ui.debug(_("local deleted %s, recreating\n") % f)
1725 1736 get[f] = n
1726 1737 else:
1727 1738 self.ui.debug(_("local deleted %s\n") % f)
1728 1739
1729 1740 del mw, m1, m2, ma
1730 1741
1731 1742 if force:
1732 1743 for f in merge:
1733 1744 get[f] = merge[f][1]
1734 1745 merge = {}
1735 1746
1736 1747 if linear_path or force:
1737 1748 # we don't need to do any magic, just jump to the new rev
1738 1749 branch_merge = False
1739 1750 p1, p2 = p2, nullid
1740 1751 else:
1741 1752 if not allow:
1742 1753 self.ui.status(_("this update spans a branch"
1743 1754 " affecting the following files:\n"))
1744 1755 fl = merge.keys() + get.keys()
1745 1756 fl.sort()
1746 1757 for f in fl:
1747 1758 cf = ""
1748 1759 if f in merge:
1749 1760 cf = _(" (resolve)")
1750 1761 self.ui.status(" %s%s\n" % (f, cf))
1751 1762 self.ui.warn(_("aborting update spanning branches!\n"))
1752 1763 self.ui.status(_("(use 'hg merge' to merge across branches"
1753 1764 " or 'hg update -C' to lose changes)\n"))
1754 1765 return 1
1755 1766 branch_merge = True
1756 1767
1757 1768 # get the files we don't need to change
1758 1769 files = get.keys()
1759 1770 files.sort()
1760 1771 for f in files:
1761 1772 if f[0] == "/":
1762 1773 continue
1763 1774 self.ui.note(_("getting %s\n") % f)
1764 1775 t = self.file(f).read(get[f])
1765 1776 self.wwrite(f, t)
1766 1777 util.set_exec(self.wjoin(f), mf2[f])
1767 1778 if moddirstate:
1768 1779 if branch_merge:
1769 1780 self.dirstate.update([f], 'n', st_mtime=-1)
1770 1781 else:
1771 1782 self.dirstate.update([f], 'n')
1772 1783
1773 1784 # merge the tricky bits
1774 1785 failedmerge = []
1775 1786 files = merge.keys()
1776 1787 files.sort()
1777 1788 xp1 = hex(p1)
1778 1789 xp2 = hex(p2)
1779 1790 for f in files:
1780 1791 self.ui.status(_("merging %s\n") % f)
1781 1792 my, other, flag = merge[f]
1782 1793 ret = self.merge3(f, my, other, xp1, xp2)
1783 1794 if ret:
1784 1795 err = True
1785 1796 failedmerge.append(f)
1786 1797 util.set_exec(self.wjoin(f), flag)
1787 1798 if moddirstate:
1788 1799 if branch_merge:
1789 1800 # We've done a branch merge, mark this file as merged
1790 1801 # so that we properly record the merger later
1791 1802 self.dirstate.update([f], 'm')
1792 1803 else:
1793 1804 # We've update-merged a locally modified file, so
1794 1805 # we set the dirstate to emulate a normal checkout
1795 1806 # of that file some time in the past. Thus our
1796 1807 # merge will appear as a normal local file
1797 1808 # modification.
1798 1809 f_len = len(self.file(f).read(other))
1799 1810 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1800 1811
1801 1812 remove.sort()
1802 1813 for f in remove:
1803 1814 self.ui.note(_("removing %s\n") % f)
1804 1815 util.audit_path(f)
1805 1816 try:
1806 1817 util.unlink(self.wjoin(f))
1807 1818 except OSError, inst:
1808 1819 if inst.errno != errno.ENOENT:
1809 1820 self.ui.warn(_("update failed to remove %s: %s!\n") %
1810 1821 (f, inst.strerror))
1811 1822 if moddirstate:
1812 1823 if branch_merge:
1813 1824 self.dirstate.update(remove, 'r')
1814 1825 else:
1815 1826 self.dirstate.forget(remove)
1816 1827
1817 1828 if moddirstate:
1818 1829 self.dirstate.setparents(p1, p2)
1819 1830
1820 1831 if show_stats:
1821 1832 stats = ((len(get), _("updated")),
1822 1833 (len(merge) - len(failedmerge), _("merged")),
1823 1834 (len(remove), _("removed")),
1824 1835 (len(failedmerge), _("unresolved")))
1825 1836 note = ", ".join([_("%d files %s") % s for s in stats])
1826 1837 self.ui.status("%s\n" % note)
1827 1838 if moddirstate:
1828 1839 if branch_merge:
1829 1840 if failedmerge:
1830 1841 self.ui.status(_("There are unresolved merges,"
1831 1842 " you can redo the full merge using:\n"
1832 1843 " hg update -C %s\n"
1833 1844 " hg merge %s\n"
1834 1845 % (self.changelog.rev(p1),
1835 1846 self.changelog.rev(p2))))
1836 1847 else:
1837 1848 self.ui.status(_("(branch merge, don't forget to commit)\n"))
1838 1849 elif failedmerge:
1839 1850 self.ui.status(_("There are unresolved merges with"
1840 1851 " locally modified files.\n"))
1841 1852
1842 1853 return err
1843 1854
1844 1855 def merge3(self, fn, my, other, p1, p2):
1845 1856 """perform a 3-way merge in the working directory"""
1846 1857
1847 1858 def temp(prefix, node):
1848 1859 pre = "%s~%s." % (os.path.basename(fn), prefix)
1849 1860 (fd, name) = tempfile.mkstemp(prefix=pre)
1850 1861 f = os.fdopen(fd, "wb")
1851 1862 self.wwrite(fn, fl.read(node), f)
1852 1863 f.close()
1853 1864 return name
1854 1865
1855 1866 fl = self.file(fn)
1856 1867 base = fl.ancestor(my, other)
1857 1868 a = self.wjoin(fn)
1858 1869 b = temp("base", base)
1859 1870 c = temp("other", other)
1860 1871
1861 1872 self.ui.note(_("resolving %s\n") % fn)
1862 1873 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1863 1874 (fn, short(my), short(other), short(base)))
1864 1875
1865 1876 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1866 1877 or "hgmerge")
1867 1878 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=self.root,
1868 1879 environ={'HG_FILE': fn,
1869 1880 'HG_MY_NODE': p1,
1870 1881 'HG_OTHER_NODE': p2,
1871 1882 'HG_FILE_MY_NODE': hex(my),
1872 1883 'HG_FILE_OTHER_NODE': hex(other),
1873 1884 'HG_FILE_BASE_NODE': hex(base)})
1874 1885 if r:
1875 1886 self.ui.warn(_("merging %s failed!\n") % fn)
1876 1887
1877 1888 os.unlink(b)
1878 1889 os.unlink(c)
1879 1890 return r
1880 1891
1881 1892 def verify(self):
1882 1893 filelinkrevs = {}
1883 1894 filenodes = {}
1884 1895 changesets = revisions = files = 0
1885 1896 errors = [0]
1886 1897 warnings = [0]
1887 1898 neededmanifests = {}
1888 1899
1889 1900 def err(msg):
1890 1901 self.ui.warn(msg + "\n")
1891 1902 errors[0] += 1
1892 1903
1893 1904 def warn(msg):
1894 1905 self.ui.warn(msg + "\n")
1895 1906 warnings[0] += 1
1896 1907
1897 1908 def checksize(obj, name):
1898 1909 d = obj.checksize()
1899 1910 if d[0]:
1900 1911 err(_("%s data length off by %d bytes") % (name, d[0]))
1901 1912 if d[1]:
1902 1913 err(_("%s index contains %d extra bytes") % (name, d[1]))
1903 1914
1904 1915 def checkversion(obj, name):
1905 1916 if obj.version != revlog.REVLOGV0:
1906 1917 if not revlogv1:
1907 1918 warn(_("warning: `%s' uses revlog format 1") % name)
1908 1919 elif revlogv1:
1909 1920 warn(_("warning: `%s' uses revlog format 0") % name)
1910 1921
1911 1922 revlogv1 = self.revlogversion != revlog.REVLOGV0
1912 1923 if self.ui.verbose or revlogv1 != self.revlogv1:
1913 1924 self.ui.status(_("repository uses revlog format %d\n") %
1914 1925 (revlogv1 and 1 or 0))
1915 1926
1916 1927 seen = {}
1917 1928 self.ui.status(_("checking changesets\n"))
1918 1929 checksize(self.changelog, "changelog")
1919 1930
1920 1931 for i in range(self.changelog.count()):
1921 1932 changesets += 1
1922 1933 n = self.changelog.node(i)
1923 1934 l = self.changelog.linkrev(n)
1924 1935 if l != i:
1925 1936 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1926 1937 if n in seen:
1927 1938 err(_("duplicate changeset at revision %d") % i)
1928 1939 seen[n] = 1
1929 1940
1930 1941 for p in self.changelog.parents(n):
1931 1942 if p not in self.changelog.nodemap:
1932 1943 err(_("changeset %s has unknown parent %s") %
1933 1944 (short(n), short(p)))
1934 1945 try:
1935 1946 changes = self.changelog.read(n)
1936 1947 except KeyboardInterrupt:
1937 1948 self.ui.warn(_("interrupted"))
1938 1949 raise
1939 1950 except Exception, inst:
1940 1951 err(_("unpacking changeset %s: %s") % (short(n), inst))
1941 1952 continue
1942 1953
1943 1954 neededmanifests[changes[0]] = n
1944 1955
1945 1956 for f in changes[3]:
1946 1957 filelinkrevs.setdefault(f, []).append(i)
1947 1958
1948 1959 seen = {}
1949 1960 self.ui.status(_("checking manifests\n"))
1950 1961 checkversion(self.manifest, "manifest")
1951 1962 checksize(self.manifest, "manifest")
1952 1963
1953 1964 for i in range(self.manifest.count()):
1954 1965 n = self.manifest.node(i)
1955 1966 l = self.manifest.linkrev(n)
1956 1967
1957 1968 if l < 0 or l >= self.changelog.count():
1958 1969 err(_("bad manifest link (%d) at revision %d") % (l, i))
1959 1970
1960 1971 if n in neededmanifests:
1961 1972 del neededmanifests[n]
1962 1973
1963 1974 if n in seen:
1964 1975 err(_("duplicate manifest at revision %d") % i)
1965 1976
1966 1977 seen[n] = 1
1967 1978
1968 1979 for p in self.manifest.parents(n):
1969 1980 if p not in self.manifest.nodemap:
1970 1981 err(_("manifest %s has unknown parent %s") %
1971 1982 (short(n), short(p)))
1972 1983
1973 1984 try:
1974 1985 delta = mdiff.patchtext(self.manifest.delta(n))
1975 1986 except KeyboardInterrupt:
1976 1987 self.ui.warn(_("interrupted"))
1977 1988 raise
1978 1989 except Exception, inst:
1979 1990 err(_("unpacking manifest %s: %s") % (short(n), inst))
1980 1991 continue
1981 1992
1982 1993 try:
1983 1994 ff = [ l.split('\0') for l in delta.splitlines() ]
1984 1995 for f, fn in ff:
1985 1996 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1986 1997 except (ValueError, TypeError), inst:
1987 1998 err(_("broken delta in manifest %s: %s") % (short(n), inst))
1988 1999
1989 2000 self.ui.status(_("crosschecking files in changesets and manifests\n"))
1990 2001
1991 2002 for m, c in neededmanifests.items():
1992 2003 err(_("Changeset %s refers to unknown manifest %s") %
1993 2004 (short(m), short(c)))
1994 2005 del neededmanifests
1995 2006
1996 2007 for f in filenodes:
1997 2008 if f not in filelinkrevs:
1998 2009 err(_("file %s in manifest but not in changesets") % f)
1999 2010
2000 2011 for f in filelinkrevs:
2001 2012 if f not in filenodes:
2002 2013 err(_("file %s in changeset but not in manifest") % f)
2003 2014
2004 2015 self.ui.status(_("checking files\n"))
2005 2016 ff = filenodes.keys()
2006 2017 ff.sort()
2007 2018 for f in ff:
2008 2019 if f == "/dev/null":
2009 2020 continue
2010 2021 files += 1
2011 2022 if not f:
2012 2023 err(_("file without name in manifest %s") % short(n))
2013 2024 continue
2014 2025 fl = self.file(f)
2015 2026 checkversion(fl, f)
2016 2027 checksize(fl, f)
2017 2028
2018 2029 nodes = {nullid: 1}
2019 2030 seen = {}
2020 2031 for i in range(fl.count()):
2021 2032 revisions += 1
2022 2033 n = fl.node(i)
2023 2034
2024 2035 if n in seen:
2025 2036 err(_("%s: duplicate revision %d") % (f, i))
2026 2037 if n not in filenodes[f]:
2027 2038 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
2028 2039 else:
2029 2040 del filenodes[f][n]
2030 2041
2031 2042 flr = fl.linkrev(n)
2032 2043 if flr not in filelinkrevs.get(f, []):
2033 2044 err(_("%s:%s points to unexpected changeset %d")
2034 2045 % (f, short(n), flr))
2035 2046 else:
2036 2047 filelinkrevs[f].remove(flr)
2037 2048
2038 2049 # verify contents
2039 2050 try:
2040 2051 t = fl.read(n)
2041 2052 except KeyboardInterrupt:
2042 2053 self.ui.warn(_("interrupted"))
2043 2054 raise
2044 2055 except Exception, inst:
2045 2056 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
2046 2057
2047 2058 # verify parents
2048 2059 (p1, p2) = fl.parents(n)
2049 2060 if p1 not in nodes:
2050 2061 err(_("file %s:%s unknown parent 1 %s") %
2051 2062 (f, short(n), short(p1)))
2052 2063 if p2 not in nodes:
2053 2064 err(_("file %s:%s unknown parent 2 %s") %
2054 2065 (f, short(n), short(p1)))
2055 2066 nodes[n] = 1
2056 2067
2057 2068 # cross-check
2058 2069 for node in filenodes[f]:
2059 2070 err(_("node %s in manifests not in %s") % (hex(node), f))
2060 2071
2061 2072 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
2062 2073 (files, changesets, revisions))
2063 2074
2064 2075 if warnings[0]:
2065 2076 self.ui.warn(_("%d warnings encountered!\n") % warnings[0])
2066 2077 if errors[0]:
2067 2078 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
2068 2079 return 1
2069 2080
2070 2081 # used to avoid circular references so destructors work
2071 2082 def aftertrans(base):
2072 2083 p = base
2073 2084 def a():
2074 2085 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
2075 2086 util.rename(os.path.join(p, "journal.dirstate"),
2076 2087 os.path.join(p, "undo.dirstate"))
2077 2088 return a
2078 2089
General Comments 0
You need to be logged in to leave comments. Login now