##// END OF EJS Templates
merge util.esystem and util.system.
Vadim Gelfer -
r1882:c0320567 default
parent child Browse files
Show More
@@ -1,1887 +1,1887
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 import struct, os, util
8 import struct, os, util
9 import filelog, manifest, changelog, dirstate, repo
9 import filelog, manifest, changelog, dirstate, repo
10 from node import *
10 from node import *
11 from i18n import gettext as _
11 from i18n import gettext as _
12 from demandload import *
12 from demandload import *
13 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
13 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14
14
15 class localrepository(object):
15 class localrepository(object):
16 def __del__(self):
16 def __del__(self):
17 self.transhandle = None
17 self.transhandle = None
18 def __init__(self, parentui, path=None, create=0):
18 def __init__(self, parentui, path=None, create=0):
19 if not path:
19 if not path:
20 p = os.getcwd()
20 p = os.getcwd()
21 while not os.path.isdir(os.path.join(p, ".hg")):
21 while not os.path.isdir(os.path.join(p, ".hg")):
22 oldp = p
22 oldp = p
23 p = os.path.dirname(p)
23 p = os.path.dirname(p)
24 if p == oldp:
24 if p == oldp:
25 raise repo.RepoError(_("no repo found"))
25 raise repo.RepoError(_("no repo found"))
26 path = p
26 path = p
27 self.path = os.path.join(path, ".hg")
27 self.path = os.path.join(path, ".hg")
28
28
29 if not create and not os.path.isdir(self.path):
29 if not create and not os.path.isdir(self.path):
30 raise repo.RepoError(_("repository %s not found") % path)
30 raise repo.RepoError(_("repository %s not found") % path)
31
31
32 self.root = os.path.abspath(path)
32 self.root = os.path.abspath(path)
33 self.ui = ui.ui(parentui=parentui)
33 self.ui = ui.ui(parentui=parentui)
34 self.opener = util.opener(self.path)
34 self.opener = util.opener(self.path)
35 self.wopener = util.opener(self.root)
35 self.wopener = util.opener(self.root)
36 self.manifest = manifest.manifest(self.opener)
36 self.manifest = manifest.manifest(self.opener)
37 self.changelog = changelog.changelog(self.opener)
37 self.changelog = changelog.changelog(self.opener)
38 self.tagscache = None
38 self.tagscache = None
39 self.nodetagscache = None
39 self.nodetagscache = None
40 self.encodepats = None
40 self.encodepats = None
41 self.decodepats = None
41 self.decodepats = None
42 self.transhandle = None
42 self.transhandle = None
43
43
44 if create:
44 if create:
45 os.mkdir(self.path)
45 os.mkdir(self.path)
46 os.mkdir(self.join("data"))
46 os.mkdir(self.join("data"))
47
47
48 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
48 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
49 try:
49 try:
50 self.ui.readconfig(self.join("hgrc"))
50 self.ui.readconfig(self.join("hgrc"))
51 except IOError:
51 except IOError:
52 pass
52 pass
53
53
54 def hook(self, name, throw=False, **args):
54 def hook(self, name, throw=False, **args):
55 def runhook(name, cmd):
55 def runhook(name, cmd):
56 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
56 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
57 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
57 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
58 r = util.esystem(cmd, environ=env, cwd=self.root)
58 r = util.system(cmd, environ=env, cwd=self.root)
59 if r:
59 if r:
60 desc, r = util.explain_exit(r)
60 desc, r = util.explain_exit(r)
61 if throw:
61 if throw:
62 raise util.Abort(_('%s hook %s') % (name, desc))
62 raise util.Abort(_('%s hook %s') % (name, desc))
63 self.ui.warn(_('error: %s hook %s\n') % (name, desc))
63 self.ui.warn(_('error: %s hook %s\n') % (name, desc))
64 return False
64 return False
65 return True
65 return True
66
66
67 r = True
67 r = True
68 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
68 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
69 if hname.split(".", 1)[0] == name and cmd]
69 if hname.split(".", 1)[0] == name and cmd]
70 hooks.sort()
70 hooks.sort()
71 for hname, cmd in hooks:
71 for hname, cmd in hooks:
72 r = runhook(hname, cmd) and r
72 r = runhook(hname, cmd) and r
73 return r
73 return r
74
74
75 def tags(self):
75 def tags(self):
76 '''return a mapping of tag to node'''
76 '''return a mapping of tag to node'''
77 if not self.tagscache:
77 if not self.tagscache:
78 self.tagscache = {}
78 self.tagscache = {}
79 def addtag(self, k, n):
79 def addtag(self, k, n):
80 try:
80 try:
81 bin_n = bin(n)
81 bin_n = bin(n)
82 except TypeError:
82 except TypeError:
83 bin_n = ''
83 bin_n = ''
84 self.tagscache[k.strip()] = bin_n
84 self.tagscache[k.strip()] = bin_n
85
85
86 try:
86 try:
87 # read each head of the tags file, ending with the tip
87 # read each head of the tags file, ending with the tip
88 # and add each tag found to the map, with "newer" ones
88 # and add each tag found to the map, with "newer" ones
89 # taking precedence
89 # taking precedence
90 fl = self.file(".hgtags")
90 fl = self.file(".hgtags")
91 h = fl.heads()
91 h = fl.heads()
92 h.reverse()
92 h.reverse()
93 for r in h:
93 for r in h:
94 for l in fl.read(r).splitlines():
94 for l in fl.read(r).splitlines():
95 if l:
95 if l:
96 n, k = l.split(" ", 1)
96 n, k = l.split(" ", 1)
97 addtag(self, k, n)
97 addtag(self, k, n)
98 except KeyError:
98 except KeyError:
99 pass
99 pass
100
100
101 try:
101 try:
102 f = self.opener("localtags")
102 f = self.opener("localtags")
103 for l in f:
103 for l in f:
104 n, k = l.split(" ", 1)
104 n, k = l.split(" ", 1)
105 addtag(self, k, n)
105 addtag(self, k, n)
106 except IOError:
106 except IOError:
107 pass
107 pass
108
108
109 self.tagscache['tip'] = self.changelog.tip()
109 self.tagscache['tip'] = self.changelog.tip()
110
110
111 return self.tagscache
111 return self.tagscache
112
112
113 def tagslist(self):
113 def tagslist(self):
114 '''return a list of tags ordered by revision'''
114 '''return a list of tags ordered by revision'''
115 l = []
115 l = []
116 for t, n in self.tags().items():
116 for t, n in self.tags().items():
117 try:
117 try:
118 r = self.changelog.rev(n)
118 r = self.changelog.rev(n)
119 except:
119 except:
120 r = -2 # sort to the beginning of the list if unknown
120 r = -2 # sort to the beginning of the list if unknown
121 l.append((r, t, n))
121 l.append((r, t, n))
122 l.sort()
122 l.sort()
123 return [(t, n) for r, t, n in l]
123 return [(t, n) for r, t, n in l]
124
124
125 def nodetags(self, node):
125 def nodetags(self, node):
126 '''return the tags associated with a node'''
126 '''return the tags associated with a node'''
127 if not self.nodetagscache:
127 if not self.nodetagscache:
128 self.nodetagscache = {}
128 self.nodetagscache = {}
129 for t, n in self.tags().items():
129 for t, n in self.tags().items():
130 self.nodetagscache.setdefault(n, []).append(t)
130 self.nodetagscache.setdefault(n, []).append(t)
131 return self.nodetagscache.get(node, [])
131 return self.nodetagscache.get(node, [])
132
132
133 def lookup(self, key):
133 def lookup(self, key):
134 try:
134 try:
135 return self.tags()[key]
135 return self.tags()[key]
136 except KeyError:
136 except KeyError:
137 try:
137 try:
138 return self.changelog.lookup(key)
138 return self.changelog.lookup(key)
139 except:
139 except:
140 raise repo.RepoError(_("unknown revision '%s'") % key)
140 raise repo.RepoError(_("unknown revision '%s'") % key)
141
141
142 def dev(self):
142 def dev(self):
143 return os.stat(self.path).st_dev
143 return os.stat(self.path).st_dev
144
144
145 def local(self):
145 def local(self):
146 return True
146 return True
147
147
148 def join(self, f):
148 def join(self, f):
149 return os.path.join(self.path, f)
149 return os.path.join(self.path, f)
150
150
151 def wjoin(self, f):
151 def wjoin(self, f):
152 return os.path.join(self.root, f)
152 return os.path.join(self.root, f)
153
153
154 def file(self, f):
154 def file(self, f):
155 if f[0] == '/':
155 if f[0] == '/':
156 f = f[1:]
156 f = f[1:]
157 return filelog.filelog(self.opener, f)
157 return filelog.filelog(self.opener, f)
158
158
159 def getcwd(self):
159 def getcwd(self):
160 return self.dirstate.getcwd()
160 return self.dirstate.getcwd()
161
161
162 def wfile(self, f, mode='r'):
162 def wfile(self, f, mode='r'):
163 return self.wopener(f, mode)
163 return self.wopener(f, mode)
164
164
165 def wread(self, filename):
165 def wread(self, filename):
166 if self.encodepats == None:
166 if self.encodepats == None:
167 l = []
167 l = []
168 for pat, cmd in self.ui.configitems("encode"):
168 for pat, cmd in self.ui.configitems("encode"):
169 mf = util.matcher("", "/", [pat], [], [])[1]
169 mf = util.matcher("", "/", [pat], [], [])[1]
170 l.append((mf, cmd))
170 l.append((mf, cmd))
171 self.encodepats = l
171 self.encodepats = l
172
172
173 data = self.wopener(filename, 'r').read()
173 data = self.wopener(filename, 'r').read()
174
174
175 for mf, cmd in self.encodepats:
175 for mf, cmd in self.encodepats:
176 if mf(filename):
176 if mf(filename):
177 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
177 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
178 data = util.filter(data, cmd)
178 data = util.filter(data, cmd)
179 break
179 break
180
180
181 return data
181 return data
182
182
183 def wwrite(self, filename, data, fd=None):
183 def wwrite(self, filename, data, fd=None):
184 if self.decodepats == None:
184 if self.decodepats == None:
185 l = []
185 l = []
186 for pat, cmd in self.ui.configitems("decode"):
186 for pat, cmd in self.ui.configitems("decode"):
187 mf = util.matcher("", "/", [pat], [], [])[1]
187 mf = util.matcher("", "/", [pat], [], [])[1]
188 l.append((mf, cmd))
188 l.append((mf, cmd))
189 self.decodepats = l
189 self.decodepats = l
190
190
191 for mf, cmd in self.decodepats:
191 for mf, cmd in self.decodepats:
192 if mf(filename):
192 if mf(filename):
193 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
193 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
194 data = util.filter(data, cmd)
194 data = util.filter(data, cmd)
195 break
195 break
196
196
197 if fd:
197 if fd:
198 return fd.write(data)
198 return fd.write(data)
199 return self.wopener(filename, 'w').write(data)
199 return self.wopener(filename, 'w').write(data)
200
200
201 def transaction(self):
201 def transaction(self):
202 tr = self.transhandle
202 tr = self.transhandle
203 if tr != None and tr.running():
203 if tr != None and tr.running():
204 return tr.nest()
204 return tr.nest()
205
205
206 # save dirstate for undo
206 # save dirstate for undo
207 try:
207 try:
208 ds = self.opener("dirstate").read()
208 ds = self.opener("dirstate").read()
209 except IOError:
209 except IOError:
210 ds = ""
210 ds = ""
211 self.opener("journal.dirstate", "w").write(ds)
211 self.opener("journal.dirstate", "w").write(ds)
212
212
213 tr = transaction.transaction(self.ui.warn, self.opener,
213 tr = transaction.transaction(self.ui.warn, self.opener,
214 self.join("journal"),
214 self.join("journal"),
215 aftertrans(self.path))
215 aftertrans(self.path))
216 self.transhandle = tr
216 self.transhandle = tr
217 return tr
217 return tr
218
218
219 def recover(self):
219 def recover(self):
220 l = self.lock()
220 l = self.lock()
221 if os.path.exists(self.join("journal")):
221 if os.path.exists(self.join("journal")):
222 self.ui.status(_("rolling back interrupted transaction\n"))
222 self.ui.status(_("rolling back interrupted transaction\n"))
223 transaction.rollback(self.opener, self.join("journal"))
223 transaction.rollback(self.opener, self.join("journal"))
224 self.reload()
224 self.reload()
225 return True
225 return True
226 else:
226 else:
227 self.ui.warn(_("no interrupted transaction available\n"))
227 self.ui.warn(_("no interrupted transaction available\n"))
228 return False
228 return False
229
229
230 def undo(self, wlock=None):
230 def undo(self, wlock=None):
231 if not wlock:
231 if not wlock:
232 wlock = self.wlock()
232 wlock = self.wlock()
233 l = self.lock()
233 l = self.lock()
234 if os.path.exists(self.join("undo")):
234 if os.path.exists(self.join("undo")):
235 self.ui.status(_("rolling back last transaction\n"))
235 self.ui.status(_("rolling back last transaction\n"))
236 transaction.rollback(self.opener, self.join("undo"))
236 transaction.rollback(self.opener, self.join("undo"))
237 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
237 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
238 self.reload()
238 self.reload()
239 self.wreload()
239 self.wreload()
240 else:
240 else:
241 self.ui.warn(_("no undo information available\n"))
241 self.ui.warn(_("no undo information available\n"))
242
242
243 def wreload(self):
243 def wreload(self):
244 self.dirstate.read()
244 self.dirstate.read()
245
245
246 def reload(self):
246 def reload(self):
247 self.changelog.load()
247 self.changelog.load()
248 self.manifest.load()
248 self.manifest.load()
249 self.tagscache = None
249 self.tagscache = None
250 self.nodetagscache = None
250 self.nodetagscache = None
251
251
252 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None):
252 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None):
253 try:
253 try:
254 l = lock.lock(self.join(lockname), 0, releasefn)
254 l = lock.lock(self.join(lockname), 0, releasefn)
255 except lock.LockHeld, inst:
255 except lock.LockHeld, inst:
256 if not wait:
256 if not wait:
257 raise inst
257 raise inst
258 self.ui.warn(_("waiting for lock held by %s\n") % inst.args[0])
258 self.ui.warn(_("waiting for lock held by %s\n") % inst.args[0])
259 try:
259 try:
260 # default to 600 seconds timeout
260 # default to 600 seconds timeout
261 l = lock.lock(self.join(lockname),
261 l = lock.lock(self.join(lockname),
262 int(self.ui.config("ui", "timeout") or 600),
262 int(self.ui.config("ui", "timeout") or 600),
263 releasefn)
263 releasefn)
264 except lock.LockHeld, inst:
264 except lock.LockHeld, inst:
265 raise util.Abort(_("timeout while waiting for "
265 raise util.Abort(_("timeout while waiting for "
266 "lock held by %s") % inst.args[0])
266 "lock held by %s") % inst.args[0])
267 if acquirefn:
267 if acquirefn:
268 acquirefn()
268 acquirefn()
269 return l
269 return l
270
270
271 def lock(self, wait=1):
271 def lock(self, wait=1):
272 return self.do_lock("lock", wait, acquirefn=self.reload)
272 return self.do_lock("lock", wait, acquirefn=self.reload)
273
273
274 def wlock(self, wait=1):
274 def wlock(self, wait=1):
275 return self.do_lock("wlock", wait,
275 return self.do_lock("wlock", wait,
276 self.dirstate.write,
276 self.dirstate.write,
277 self.wreload)
277 self.wreload)
278
278
279 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
279 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
280 "determine whether a new filenode is needed"
280 "determine whether a new filenode is needed"
281 fp1 = manifest1.get(filename, nullid)
281 fp1 = manifest1.get(filename, nullid)
282 fp2 = manifest2.get(filename, nullid)
282 fp2 = manifest2.get(filename, nullid)
283
283
284 if fp2 != nullid:
284 if fp2 != nullid:
285 # is one parent an ancestor of the other?
285 # is one parent an ancestor of the other?
286 fpa = filelog.ancestor(fp1, fp2)
286 fpa = filelog.ancestor(fp1, fp2)
287 if fpa == fp1:
287 if fpa == fp1:
288 fp1, fp2 = fp2, nullid
288 fp1, fp2 = fp2, nullid
289 elif fpa == fp2:
289 elif fpa == fp2:
290 fp2 = nullid
290 fp2 = nullid
291
291
292 # is the file unmodified from the parent? report existing entry
292 # is the file unmodified from the parent? report existing entry
293 if fp2 == nullid and text == filelog.read(fp1):
293 if fp2 == nullid and text == filelog.read(fp1):
294 return (fp1, None, None)
294 return (fp1, None, None)
295
295
296 return (None, fp1, fp2)
296 return (None, fp1, fp2)
297
297
298 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
298 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
299 orig_parent = self.dirstate.parents()[0] or nullid
299 orig_parent = self.dirstate.parents()[0] or nullid
300 p1 = p1 or self.dirstate.parents()[0] or nullid
300 p1 = p1 or self.dirstate.parents()[0] or nullid
301 p2 = p2 or self.dirstate.parents()[1] or nullid
301 p2 = p2 or self.dirstate.parents()[1] or nullid
302 c1 = self.changelog.read(p1)
302 c1 = self.changelog.read(p1)
303 c2 = self.changelog.read(p2)
303 c2 = self.changelog.read(p2)
304 m1 = self.manifest.read(c1[0])
304 m1 = self.manifest.read(c1[0])
305 mf1 = self.manifest.readflags(c1[0])
305 mf1 = self.manifest.readflags(c1[0])
306 m2 = self.manifest.read(c2[0])
306 m2 = self.manifest.read(c2[0])
307 changed = []
307 changed = []
308
308
309 if orig_parent == p1:
309 if orig_parent == p1:
310 update_dirstate = 1
310 update_dirstate = 1
311 else:
311 else:
312 update_dirstate = 0
312 update_dirstate = 0
313
313
314 if not wlock:
314 if not wlock:
315 wlock = self.wlock()
315 wlock = self.wlock()
316 l = self.lock()
316 l = self.lock()
317 tr = self.transaction()
317 tr = self.transaction()
318 mm = m1.copy()
318 mm = m1.copy()
319 mfm = mf1.copy()
319 mfm = mf1.copy()
320 linkrev = self.changelog.count()
320 linkrev = self.changelog.count()
321 for f in files:
321 for f in files:
322 try:
322 try:
323 t = self.wread(f)
323 t = self.wread(f)
324 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
324 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
325 r = self.file(f)
325 r = self.file(f)
326 mfm[f] = tm
326 mfm[f] = tm
327
327
328 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
328 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
329 if entry:
329 if entry:
330 mm[f] = entry
330 mm[f] = entry
331 continue
331 continue
332
332
333 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
333 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
334 changed.append(f)
334 changed.append(f)
335 if update_dirstate:
335 if update_dirstate:
336 self.dirstate.update([f], "n")
336 self.dirstate.update([f], "n")
337 except IOError:
337 except IOError:
338 try:
338 try:
339 del mm[f]
339 del mm[f]
340 del mfm[f]
340 del mfm[f]
341 if update_dirstate:
341 if update_dirstate:
342 self.dirstate.forget([f])
342 self.dirstate.forget([f])
343 except:
343 except:
344 # deleted from p2?
344 # deleted from p2?
345 pass
345 pass
346
346
347 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
347 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
348 user = user or self.ui.username()
348 user = user or self.ui.username()
349 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
349 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
350 tr.close()
350 tr.close()
351 if update_dirstate:
351 if update_dirstate:
352 self.dirstate.setparents(n, nullid)
352 self.dirstate.setparents(n, nullid)
353
353
354 def commit(self, files=None, text="", user=None, date=None,
354 def commit(self, files=None, text="", user=None, date=None,
355 match=util.always, force=False, lock=None, wlock=None):
355 match=util.always, force=False, lock=None, wlock=None):
356 commit = []
356 commit = []
357 remove = []
357 remove = []
358 changed = []
358 changed = []
359
359
360 if files:
360 if files:
361 for f in files:
361 for f in files:
362 s = self.dirstate.state(f)
362 s = self.dirstate.state(f)
363 if s in 'nmai':
363 if s in 'nmai':
364 commit.append(f)
364 commit.append(f)
365 elif s == 'r':
365 elif s == 'r':
366 remove.append(f)
366 remove.append(f)
367 else:
367 else:
368 self.ui.warn(_("%s not tracked!\n") % f)
368 self.ui.warn(_("%s not tracked!\n") % f)
369 else:
369 else:
370 modified, added, removed, deleted, unknown = self.changes(match=match)
370 modified, added, removed, deleted, unknown = self.changes(match=match)
371 commit = modified + added
371 commit = modified + added
372 remove = removed
372 remove = removed
373
373
374 p1, p2 = self.dirstate.parents()
374 p1, p2 = self.dirstate.parents()
375 c1 = self.changelog.read(p1)
375 c1 = self.changelog.read(p1)
376 c2 = self.changelog.read(p2)
376 c2 = self.changelog.read(p2)
377 m1 = self.manifest.read(c1[0])
377 m1 = self.manifest.read(c1[0])
378 mf1 = self.manifest.readflags(c1[0])
378 mf1 = self.manifest.readflags(c1[0])
379 m2 = self.manifest.read(c2[0])
379 m2 = self.manifest.read(c2[0])
380
380
381 if not commit and not remove and not force and p2 == nullid:
381 if not commit and not remove and not force and p2 == nullid:
382 self.ui.status(_("nothing changed\n"))
382 self.ui.status(_("nothing changed\n"))
383 return None
383 return None
384
384
385 xp1 = hex(p1)
385 xp1 = hex(p1)
386 if p2 == nullid: xp2 = ''
386 if p2 == nullid: xp2 = ''
387 else: xp2 = hex(p2)
387 else: xp2 = hex(p2)
388
388
389 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
389 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
390
390
391 if not wlock:
391 if not wlock:
392 wlock = self.wlock()
392 wlock = self.wlock()
393 if not lock:
393 if not lock:
394 lock = self.lock()
394 lock = self.lock()
395 tr = self.transaction()
395 tr = self.transaction()
396
396
397 # check in files
397 # check in files
398 new = {}
398 new = {}
399 linkrev = self.changelog.count()
399 linkrev = self.changelog.count()
400 commit.sort()
400 commit.sort()
401 for f in commit:
401 for f in commit:
402 self.ui.note(f + "\n")
402 self.ui.note(f + "\n")
403 try:
403 try:
404 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
404 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
405 t = self.wread(f)
405 t = self.wread(f)
406 except IOError:
406 except IOError:
407 self.ui.warn(_("trouble committing %s!\n") % f)
407 self.ui.warn(_("trouble committing %s!\n") % f)
408 raise
408 raise
409
409
410 r = self.file(f)
410 r = self.file(f)
411
411
412 meta = {}
412 meta = {}
413 cp = self.dirstate.copied(f)
413 cp = self.dirstate.copied(f)
414 if cp:
414 if cp:
415 meta["copy"] = cp
415 meta["copy"] = cp
416 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
416 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
417 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
417 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
418 fp1, fp2 = nullid, nullid
418 fp1, fp2 = nullid, nullid
419 else:
419 else:
420 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
420 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
421 if entry:
421 if entry:
422 new[f] = entry
422 new[f] = entry
423 continue
423 continue
424
424
425 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
425 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
426 # remember what we've added so that we can later calculate
426 # remember what we've added so that we can later calculate
427 # the files to pull from a set of changesets
427 # the files to pull from a set of changesets
428 changed.append(f)
428 changed.append(f)
429
429
430 # update manifest
430 # update manifest
431 m1 = m1.copy()
431 m1 = m1.copy()
432 m1.update(new)
432 m1.update(new)
433 for f in remove:
433 for f in remove:
434 if f in m1:
434 if f in m1:
435 del m1[f]
435 del m1[f]
436 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
436 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
437 (new, remove))
437 (new, remove))
438
438
439 # add changeset
439 # add changeset
440 new = new.keys()
440 new = new.keys()
441 new.sort()
441 new.sort()
442
442
443 if not text:
443 if not text:
444 edittext = [""]
444 edittext = [""]
445 if p2 != nullid:
445 if p2 != nullid:
446 edittext.append("HG: branch merge")
446 edittext.append("HG: branch merge")
447 edittext.extend(["HG: changed %s" % f for f in changed])
447 edittext.extend(["HG: changed %s" % f for f in changed])
448 edittext.extend(["HG: removed %s" % f for f in remove])
448 edittext.extend(["HG: removed %s" % f for f in remove])
449 if not changed and not remove:
449 if not changed and not remove:
450 edittext.append("HG: no files changed")
450 edittext.append("HG: no files changed")
451 edittext.append("")
451 edittext.append("")
452 # run editor in the repository root
452 # run editor in the repository root
453 olddir = os.getcwd()
453 olddir = os.getcwd()
454 os.chdir(self.root)
454 os.chdir(self.root)
455 edittext = self.ui.edit("\n".join(edittext))
455 edittext = self.ui.edit("\n".join(edittext))
456 os.chdir(olddir)
456 os.chdir(olddir)
457 if not edittext.rstrip():
457 if not edittext.rstrip():
458 return None
458 return None
459 text = edittext
459 text = edittext
460
460
461 user = user or self.ui.username()
461 user = user or self.ui.username()
462 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
462 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
463 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
463 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
464 parent2=xp2)
464 parent2=xp2)
465 tr.close()
465 tr.close()
466
466
467 self.dirstate.setparents(n)
467 self.dirstate.setparents(n)
468 self.dirstate.update(new, "n")
468 self.dirstate.update(new, "n")
469 self.dirstate.forget(remove)
469 self.dirstate.forget(remove)
470
470
471 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
471 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
472 return n
472 return n
473
473
474 def walk(self, node=None, files=[], match=util.always):
474 def walk(self, node=None, files=[], match=util.always):
475 if node:
475 if node:
476 fdict = dict.fromkeys(files)
476 fdict = dict.fromkeys(files)
477 for fn in self.manifest.read(self.changelog.read(node)[0]):
477 for fn in self.manifest.read(self.changelog.read(node)[0]):
478 fdict.pop(fn, None)
478 fdict.pop(fn, None)
479 if match(fn):
479 if match(fn):
480 yield 'm', fn
480 yield 'm', fn
481 for fn in fdict:
481 for fn in fdict:
482 self.ui.warn(_('%s: No such file in rev %s\n') % (
482 self.ui.warn(_('%s: No such file in rev %s\n') % (
483 util.pathto(self.getcwd(), fn), short(node)))
483 util.pathto(self.getcwd(), fn), short(node)))
484 else:
484 else:
485 for src, fn in self.dirstate.walk(files, match):
485 for src, fn in self.dirstate.walk(files, match):
486 yield src, fn
486 yield src, fn
487
487
488 def changes(self, node1=None, node2=None, files=[], match=util.always,
488 def changes(self, node1=None, node2=None, files=[], match=util.always,
489 wlock=None):
489 wlock=None):
490 """return changes between two nodes or node and working directory
490 """return changes between two nodes or node and working directory
491
491
492 If node1 is None, use the first dirstate parent instead.
492 If node1 is None, use the first dirstate parent instead.
493 If node2 is None, compare node1 with working directory.
493 If node2 is None, compare node1 with working directory.
494 """
494 """
495
495
496 def fcmp(fn, mf):
496 def fcmp(fn, mf):
497 t1 = self.wread(fn)
497 t1 = self.wread(fn)
498 t2 = self.file(fn).read(mf.get(fn, nullid))
498 t2 = self.file(fn).read(mf.get(fn, nullid))
499 return cmp(t1, t2)
499 return cmp(t1, t2)
500
500
501 def mfmatches(node):
501 def mfmatches(node):
502 change = self.changelog.read(node)
502 change = self.changelog.read(node)
503 mf = dict(self.manifest.read(change[0]))
503 mf = dict(self.manifest.read(change[0]))
504 for fn in mf.keys():
504 for fn in mf.keys():
505 if not match(fn):
505 if not match(fn):
506 del mf[fn]
506 del mf[fn]
507 return mf
507 return mf
508
508
509 if node1:
509 if node1:
510 # read the manifest from node1 before the manifest from node2,
510 # read the manifest from node1 before the manifest from node2,
511 # so that we'll hit the manifest cache if we're going through
511 # so that we'll hit the manifest cache if we're going through
512 # all the revisions in parent->child order.
512 # all the revisions in parent->child order.
513 mf1 = mfmatches(node1)
513 mf1 = mfmatches(node1)
514
514
515 # are we comparing the working directory?
515 # are we comparing the working directory?
516 if not node2:
516 if not node2:
517 if not wlock:
517 if not wlock:
518 try:
518 try:
519 wlock = self.wlock(wait=0)
519 wlock = self.wlock(wait=0)
520 except lock.LockException:
520 except lock.LockException:
521 wlock = None
521 wlock = None
522 lookup, modified, added, removed, deleted, unknown = (
522 lookup, modified, added, removed, deleted, unknown = (
523 self.dirstate.changes(files, match))
523 self.dirstate.changes(files, match))
524
524
525 # are we comparing working dir against its parent?
525 # are we comparing working dir against its parent?
526 if not node1:
526 if not node1:
527 if lookup:
527 if lookup:
528 # do a full compare of any files that might have changed
528 # do a full compare of any files that might have changed
529 mf2 = mfmatches(self.dirstate.parents()[0])
529 mf2 = mfmatches(self.dirstate.parents()[0])
530 for f in lookup:
530 for f in lookup:
531 if fcmp(f, mf2):
531 if fcmp(f, mf2):
532 modified.append(f)
532 modified.append(f)
533 elif wlock is not None:
533 elif wlock is not None:
534 self.dirstate.update([f], "n")
534 self.dirstate.update([f], "n")
535 else:
535 else:
536 # we are comparing working dir against non-parent
536 # we are comparing working dir against non-parent
537 # generate a pseudo-manifest for the working dir
537 # generate a pseudo-manifest for the working dir
538 mf2 = mfmatches(self.dirstate.parents()[0])
538 mf2 = mfmatches(self.dirstate.parents()[0])
539 for f in lookup + modified + added:
539 for f in lookup + modified + added:
540 mf2[f] = ""
540 mf2[f] = ""
541 for f in removed:
541 for f in removed:
542 if f in mf2:
542 if f in mf2:
543 del mf2[f]
543 del mf2[f]
544 else:
544 else:
545 # we are comparing two revisions
545 # we are comparing two revisions
546 deleted, unknown = [], []
546 deleted, unknown = [], []
547 mf2 = mfmatches(node2)
547 mf2 = mfmatches(node2)
548
548
549 if node1:
549 if node1:
550 # flush lists from dirstate before comparing manifests
550 # flush lists from dirstate before comparing manifests
551 modified, added = [], []
551 modified, added = [], []
552
552
553 for fn in mf2:
553 for fn in mf2:
554 if mf1.has_key(fn):
554 if mf1.has_key(fn):
555 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
555 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
556 modified.append(fn)
556 modified.append(fn)
557 del mf1[fn]
557 del mf1[fn]
558 else:
558 else:
559 added.append(fn)
559 added.append(fn)
560
560
561 removed = mf1.keys()
561 removed = mf1.keys()
562
562
563 # sort and return results:
563 # sort and return results:
564 for l in modified, added, removed, deleted, unknown:
564 for l in modified, added, removed, deleted, unknown:
565 l.sort()
565 l.sort()
566 return (modified, added, removed, deleted, unknown)
566 return (modified, added, removed, deleted, unknown)
567
567
568 def add(self, list, wlock=None):
568 def add(self, list, wlock=None):
569 if not wlock:
569 if not wlock:
570 wlock = self.wlock()
570 wlock = self.wlock()
571 for f in list:
571 for f in list:
572 p = self.wjoin(f)
572 p = self.wjoin(f)
573 if not os.path.exists(p):
573 if not os.path.exists(p):
574 self.ui.warn(_("%s does not exist!\n") % f)
574 self.ui.warn(_("%s does not exist!\n") % f)
575 elif not os.path.isfile(p):
575 elif not os.path.isfile(p):
576 self.ui.warn(_("%s not added: only files supported currently\n")
576 self.ui.warn(_("%s not added: only files supported currently\n")
577 % f)
577 % f)
578 elif self.dirstate.state(f) in 'an':
578 elif self.dirstate.state(f) in 'an':
579 self.ui.warn(_("%s already tracked!\n") % f)
579 self.ui.warn(_("%s already tracked!\n") % f)
580 else:
580 else:
581 self.dirstate.update([f], "a")
581 self.dirstate.update([f], "a")
582
582
583 def forget(self, list, wlock=None):
583 def forget(self, list, wlock=None):
584 if not wlock:
584 if not wlock:
585 wlock = self.wlock()
585 wlock = self.wlock()
586 for f in list:
586 for f in list:
587 if self.dirstate.state(f) not in 'ai':
587 if self.dirstate.state(f) not in 'ai':
588 self.ui.warn(_("%s not added!\n") % f)
588 self.ui.warn(_("%s not added!\n") % f)
589 else:
589 else:
590 self.dirstate.forget([f])
590 self.dirstate.forget([f])
591
591
592 def remove(self, list, unlink=False, wlock=None):
592 def remove(self, list, unlink=False, wlock=None):
593 if unlink:
593 if unlink:
594 for f in list:
594 for f in list:
595 try:
595 try:
596 util.unlink(self.wjoin(f))
596 util.unlink(self.wjoin(f))
597 except OSError, inst:
597 except OSError, inst:
598 if inst.errno != errno.ENOENT:
598 if inst.errno != errno.ENOENT:
599 raise
599 raise
600 if not wlock:
600 if not wlock:
601 wlock = self.wlock()
601 wlock = self.wlock()
602 for f in list:
602 for f in list:
603 p = self.wjoin(f)
603 p = self.wjoin(f)
604 if os.path.exists(p):
604 if os.path.exists(p):
605 self.ui.warn(_("%s still exists!\n") % f)
605 self.ui.warn(_("%s still exists!\n") % f)
606 elif self.dirstate.state(f) == 'a':
606 elif self.dirstate.state(f) == 'a':
607 self.dirstate.forget([f])
607 self.dirstate.forget([f])
608 elif f not in self.dirstate:
608 elif f not in self.dirstate:
609 self.ui.warn(_("%s not tracked!\n") % f)
609 self.ui.warn(_("%s not tracked!\n") % f)
610 else:
610 else:
611 self.dirstate.update([f], "r")
611 self.dirstate.update([f], "r")
612
612
613 def undelete(self, list, wlock=None):
613 def undelete(self, list, wlock=None):
614 p = self.dirstate.parents()[0]
614 p = self.dirstate.parents()[0]
615 mn = self.changelog.read(p)[0]
615 mn = self.changelog.read(p)[0]
616 mf = self.manifest.readflags(mn)
616 mf = self.manifest.readflags(mn)
617 m = self.manifest.read(mn)
617 m = self.manifest.read(mn)
618 if not wlock:
618 if not wlock:
619 wlock = self.wlock()
619 wlock = self.wlock()
620 for f in list:
620 for f in list:
621 if self.dirstate.state(f) not in "r":
621 if self.dirstate.state(f) not in "r":
622 self.ui.warn("%s not removed!\n" % f)
622 self.ui.warn("%s not removed!\n" % f)
623 else:
623 else:
624 t = self.file(f).read(m[f])
624 t = self.file(f).read(m[f])
625 self.wwrite(f, t)
625 self.wwrite(f, t)
626 util.set_exec(self.wjoin(f), mf[f])
626 util.set_exec(self.wjoin(f), mf[f])
627 self.dirstate.update([f], "n")
627 self.dirstate.update([f], "n")
628
628
629 def copy(self, source, dest, wlock=None):
629 def copy(self, source, dest, wlock=None):
630 p = self.wjoin(dest)
630 p = self.wjoin(dest)
631 if not os.path.exists(p):
631 if not os.path.exists(p):
632 self.ui.warn(_("%s does not exist!\n") % dest)
632 self.ui.warn(_("%s does not exist!\n") % dest)
633 elif not os.path.isfile(p):
633 elif not os.path.isfile(p):
634 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
634 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
635 else:
635 else:
636 if not wlock:
636 if not wlock:
637 wlock = self.wlock()
637 wlock = self.wlock()
638 if self.dirstate.state(dest) == '?':
638 if self.dirstate.state(dest) == '?':
639 self.dirstate.update([dest], "a")
639 self.dirstate.update([dest], "a")
640 self.dirstate.copy(source, dest)
640 self.dirstate.copy(source, dest)
641
641
642 def heads(self, start=None):
642 def heads(self, start=None):
643 heads = self.changelog.heads(start)
643 heads = self.changelog.heads(start)
644 # sort the output in rev descending order
644 # sort the output in rev descending order
645 heads = [(-self.changelog.rev(h), h) for h in heads]
645 heads = [(-self.changelog.rev(h), h) for h in heads]
646 heads.sort()
646 heads.sort()
647 return [n for (r, n) in heads]
647 return [n for (r, n) in heads]
648
648
649 # branchlookup returns a dict giving a list of branches for
649 # branchlookup returns a dict giving a list of branches for
650 # each head. A branch is defined as the tag of a node or
650 # each head. A branch is defined as the tag of a node or
651 # the branch of the node's parents. If a node has multiple
651 # the branch of the node's parents. If a node has multiple
652 # branch tags, tags are eliminated if they are visible from other
652 # branch tags, tags are eliminated if they are visible from other
653 # branch tags.
653 # branch tags.
654 #
654 #
655 # So, for this graph: a->b->c->d->e
655 # So, for this graph: a->b->c->d->e
656 # \ /
656 # \ /
657 # aa -----/
657 # aa -----/
658 # a has tag 2.6.12
658 # a has tag 2.6.12
659 # d has tag 2.6.13
659 # d has tag 2.6.13
660 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
660 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
661 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
661 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
662 # from the list.
662 # from the list.
663 #
663 #
664 # It is possible that more than one head will have the same branch tag.
664 # It is possible that more than one head will have the same branch tag.
665 # callers need to check the result for multiple heads under the same
665 # callers need to check the result for multiple heads under the same
666 # branch tag if that is a problem for them (ie checkout of a specific
666 # branch tag if that is a problem for them (ie checkout of a specific
667 # branch).
667 # branch).
668 #
668 #
669 # passing in a specific branch will limit the depth of the search
669 # passing in a specific branch will limit the depth of the search
670 # through the parents. It won't limit the branches returned in the
670 # through the parents. It won't limit the branches returned in the
671 # result though.
671 # result though.
672 def branchlookup(self, heads=None, branch=None):
672 def branchlookup(self, heads=None, branch=None):
673 if not heads:
673 if not heads:
674 heads = self.heads()
674 heads = self.heads()
675 headt = [ h for h in heads ]
675 headt = [ h for h in heads ]
676 chlog = self.changelog
676 chlog = self.changelog
677 branches = {}
677 branches = {}
678 merges = []
678 merges = []
679 seenmerge = {}
679 seenmerge = {}
680
680
681 # traverse the tree once for each head, recording in the branches
681 # traverse the tree once for each head, recording in the branches
682 # dict which tags are visible from this head. The branches
682 # dict which tags are visible from this head. The branches
683 # dict also records which tags are visible from each tag
683 # dict also records which tags are visible from each tag
684 # while we traverse.
684 # while we traverse.
685 while headt or merges:
685 while headt or merges:
686 if merges:
686 if merges:
687 n, found = merges.pop()
687 n, found = merges.pop()
688 visit = [n]
688 visit = [n]
689 else:
689 else:
690 h = headt.pop()
690 h = headt.pop()
691 visit = [h]
691 visit = [h]
692 found = [h]
692 found = [h]
693 seen = {}
693 seen = {}
694 while visit:
694 while visit:
695 n = visit.pop()
695 n = visit.pop()
696 if n in seen:
696 if n in seen:
697 continue
697 continue
698 pp = chlog.parents(n)
698 pp = chlog.parents(n)
699 tags = self.nodetags(n)
699 tags = self.nodetags(n)
700 if tags:
700 if tags:
701 for x in tags:
701 for x in tags:
702 if x == 'tip':
702 if x == 'tip':
703 continue
703 continue
704 for f in found:
704 for f in found:
705 branches.setdefault(f, {})[n] = 1
705 branches.setdefault(f, {})[n] = 1
706 branches.setdefault(n, {})[n] = 1
706 branches.setdefault(n, {})[n] = 1
707 break
707 break
708 if n not in found:
708 if n not in found:
709 found.append(n)
709 found.append(n)
710 if branch in tags:
710 if branch in tags:
711 continue
711 continue
712 seen[n] = 1
712 seen[n] = 1
713 if pp[1] != nullid and n not in seenmerge:
713 if pp[1] != nullid and n not in seenmerge:
714 merges.append((pp[1], [x for x in found]))
714 merges.append((pp[1], [x for x in found]))
715 seenmerge[n] = 1
715 seenmerge[n] = 1
716 if pp[0] != nullid:
716 if pp[0] != nullid:
717 visit.append(pp[0])
717 visit.append(pp[0])
718 # traverse the branches dict, eliminating branch tags from each
718 # traverse the branches dict, eliminating branch tags from each
719 # head that are visible from another branch tag for that head.
719 # head that are visible from another branch tag for that head.
720 out = {}
720 out = {}
721 viscache = {}
721 viscache = {}
722 for h in heads:
722 for h in heads:
723 def visible(node):
723 def visible(node):
724 if node in viscache:
724 if node in viscache:
725 return viscache[node]
725 return viscache[node]
726 ret = {}
726 ret = {}
727 visit = [node]
727 visit = [node]
728 while visit:
728 while visit:
729 x = visit.pop()
729 x = visit.pop()
730 if x in viscache:
730 if x in viscache:
731 ret.update(viscache[x])
731 ret.update(viscache[x])
732 elif x not in ret:
732 elif x not in ret:
733 ret[x] = 1
733 ret[x] = 1
734 if x in branches:
734 if x in branches:
735 visit[len(visit):] = branches[x].keys()
735 visit[len(visit):] = branches[x].keys()
736 viscache[node] = ret
736 viscache[node] = ret
737 return ret
737 return ret
738 if h not in branches:
738 if h not in branches:
739 continue
739 continue
740 # O(n^2), but somewhat limited. This only searches the
740 # O(n^2), but somewhat limited. This only searches the
741 # tags visible from a specific head, not all the tags in the
741 # tags visible from a specific head, not all the tags in the
742 # whole repo.
742 # whole repo.
743 for b in branches[h]:
743 for b in branches[h]:
744 vis = False
744 vis = False
745 for bb in branches[h].keys():
745 for bb in branches[h].keys():
746 if b != bb:
746 if b != bb:
747 if b in visible(bb):
747 if b in visible(bb):
748 vis = True
748 vis = True
749 break
749 break
750 if not vis:
750 if not vis:
751 l = out.setdefault(h, [])
751 l = out.setdefault(h, [])
752 l[len(l):] = self.nodetags(b)
752 l[len(l):] = self.nodetags(b)
753 return out
753 return out
754
754
755 def branches(self, nodes):
755 def branches(self, nodes):
756 if not nodes:
756 if not nodes:
757 nodes = [self.changelog.tip()]
757 nodes = [self.changelog.tip()]
758 b = []
758 b = []
759 for n in nodes:
759 for n in nodes:
760 t = n
760 t = n
761 while n:
761 while n:
762 p = self.changelog.parents(n)
762 p = self.changelog.parents(n)
763 if p[1] != nullid or p[0] == nullid:
763 if p[1] != nullid or p[0] == nullid:
764 b.append((t, n, p[0], p[1]))
764 b.append((t, n, p[0], p[1]))
765 break
765 break
766 n = p[0]
766 n = p[0]
767 return b
767 return b
768
768
769 def between(self, pairs):
769 def between(self, pairs):
770 r = []
770 r = []
771
771
772 for top, bottom in pairs:
772 for top, bottom in pairs:
773 n, l, i = top, [], 0
773 n, l, i = top, [], 0
774 f = 1
774 f = 1
775
775
776 while n != bottom:
776 while n != bottom:
777 p = self.changelog.parents(n)[0]
777 p = self.changelog.parents(n)[0]
778 if i == f:
778 if i == f:
779 l.append(n)
779 l.append(n)
780 f = f * 2
780 f = f * 2
781 n = p
781 n = p
782 i += 1
782 i += 1
783
783
784 r.append(l)
784 r.append(l)
785
785
786 return r
786 return r
787
787
788 def findincoming(self, remote, base=None, heads=None):
788 def findincoming(self, remote, base=None, heads=None):
789 m = self.changelog.nodemap
789 m = self.changelog.nodemap
790 search = []
790 search = []
791 fetch = {}
791 fetch = {}
792 seen = {}
792 seen = {}
793 seenbranch = {}
793 seenbranch = {}
794 if base == None:
794 if base == None:
795 base = {}
795 base = {}
796
796
797 # assume we're closer to the tip than the root
797 # assume we're closer to the tip than the root
798 # and start by examining the heads
798 # and start by examining the heads
799 self.ui.status(_("searching for changes\n"))
799 self.ui.status(_("searching for changes\n"))
800
800
801 if not heads:
801 if not heads:
802 heads = remote.heads()
802 heads = remote.heads()
803
803
804 unknown = []
804 unknown = []
805 for h in heads:
805 for h in heads:
806 if h not in m:
806 if h not in m:
807 unknown.append(h)
807 unknown.append(h)
808 else:
808 else:
809 base[h] = 1
809 base[h] = 1
810
810
811 if not unknown:
811 if not unknown:
812 return None
812 return None
813
813
814 rep = {}
814 rep = {}
815 reqcnt = 0
815 reqcnt = 0
816
816
817 # search through remote branches
817 # search through remote branches
818 # a 'branch' here is a linear segment of history, with four parts:
818 # a 'branch' here is a linear segment of history, with four parts:
819 # head, root, first parent, second parent
819 # head, root, first parent, second parent
820 # (a branch always has two parents (or none) by definition)
820 # (a branch always has two parents (or none) by definition)
821 unknown = remote.branches(unknown)
821 unknown = remote.branches(unknown)
822 while unknown:
822 while unknown:
823 r = []
823 r = []
824 while unknown:
824 while unknown:
825 n = unknown.pop(0)
825 n = unknown.pop(0)
826 if n[0] in seen:
826 if n[0] in seen:
827 continue
827 continue
828
828
829 self.ui.debug(_("examining %s:%s\n")
829 self.ui.debug(_("examining %s:%s\n")
830 % (short(n[0]), short(n[1])))
830 % (short(n[0]), short(n[1])))
831 if n[0] == nullid:
831 if n[0] == nullid:
832 break
832 break
833 if n in seenbranch:
833 if n in seenbranch:
834 self.ui.debug(_("branch already found\n"))
834 self.ui.debug(_("branch already found\n"))
835 continue
835 continue
836 if n[1] and n[1] in m: # do we know the base?
836 if n[1] and n[1] in m: # do we know the base?
837 self.ui.debug(_("found incomplete branch %s:%s\n")
837 self.ui.debug(_("found incomplete branch %s:%s\n")
838 % (short(n[0]), short(n[1])))
838 % (short(n[0]), short(n[1])))
839 search.append(n) # schedule branch range for scanning
839 search.append(n) # schedule branch range for scanning
840 seenbranch[n] = 1
840 seenbranch[n] = 1
841 else:
841 else:
842 if n[1] not in seen and n[1] not in fetch:
842 if n[1] not in seen and n[1] not in fetch:
843 if n[2] in m and n[3] in m:
843 if n[2] in m and n[3] in m:
844 self.ui.debug(_("found new changeset %s\n") %
844 self.ui.debug(_("found new changeset %s\n") %
845 short(n[1]))
845 short(n[1]))
846 fetch[n[1]] = 1 # earliest unknown
846 fetch[n[1]] = 1 # earliest unknown
847 base[n[2]] = 1 # latest known
847 base[n[2]] = 1 # latest known
848 continue
848 continue
849
849
850 for a in n[2:4]:
850 for a in n[2:4]:
851 if a not in rep:
851 if a not in rep:
852 r.append(a)
852 r.append(a)
853 rep[a] = 1
853 rep[a] = 1
854
854
855 seen[n[0]] = 1
855 seen[n[0]] = 1
856
856
857 if r:
857 if r:
858 reqcnt += 1
858 reqcnt += 1
859 self.ui.debug(_("request %d: %s\n") %
859 self.ui.debug(_("request %d: %s\n") %
860 (reqcnt, " ".join(map(short, r))))
860 (reqcnt, " ".join(map(short, r))))
861 for p in range(0, len(r), 10):
861 for p in range(0, len(r), 10):
862 for b in remote.branches(r[p:p+10]):
862 for b in remote.branches(r[p:p+10]):
863 self.ui.debug(_("received %s:%s\n") %
863 self.ui.debug(_("received %s:%s\n") %
864 (short(b[0]), short(b[1])))
864 (short(b[0]), short(b[1])))
865 if b[0] in m:
865 if b[0] in m:
866 self.ui.debug(_("found base node %s\n")
866 self.ui.debug(_("found base node %s\n")
867 % short(b[0]))
867 % short(b[0]))
868 base[b[0]] = 1
868 base[b[0]] = 1
869 elif b[0] not in seen:
869 elif b[0] not in seen:
870 unknown.append(b)
870 unknown.append(b)
871
871
872 # do binary search on the branches we found
872 # do binary search on the branches we found
873 while search:
873 while search:
874 n = search.pop(0)
874 n = search.pop(0)
875 reqcnt += 1
875 reqcnt += 1
876 l = remote.between([(n[0], n[1])])[0]
876 l = remote.between([(n[0], n[1])])[0]
877 l.append(n[1])
877 l.append(n[1])
878 p = n[0]
878 p = n[0]
879 f = 1
879 f = 1
880 for i in l:
880 for i in l:
881 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
881 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
882 if i in m:
882 if i in m:
883 if f <= 2:
883 if f <= 2:
884 self.ui.debug(_("found new branch changeset %s\n") %
884 self.ui.debug(_("found new branch changeset %s\n") %
885 short(p))
885 short(p))
886 fetch[p] = 1
886 fetch[p] = 1
887 base[i] = 1
887 base[i] = 1
888 else:
888 else:
889 self.ui.debug(_("narrowed branch search to %s:%s\n")
889 self.ui.debug(_("narrowed branch search to %s:%s\n")
890 % (short(p), short(i)))
890 % (short(p), short(i)))
891 search.append((p, i))
891 search.append((p, i))
892 break
892 break
893 p, f = i, f * 2
893 p, f = i, f * 2
894
894
895 # sanity check our fetch list
895 # sanity check our fetch list
896 for f in fetch.keys():
896 for f in fetch.keys():
897 if f in m:
897 if f in m:
898 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
898 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
899
899
900 if base.keys() == [nullid]:
900 if base.keys() == [nullid]:
901 self.ui.warn(_("warning: pulling from an unrelated repository!\n"))
901 self.ui.warn(_("warning: pulling from an unrelated repository!\n"))
902
902
903 self.ui.note(_("found new changesets starting at ") +
903 self.ui.note(_("found new changesets starting at ") +
904 " ".join([short(f) for f in fetch]) + "\n")
904 " ".join([short(f) for f in fetch]) + "\n")
905
905
906 self.ui.debug(_("%d total queries\n") % reqcnt)
906 self.ui.debug(_("%d total queries\n") % reqcnt)
907
907
908 return fetch.keys()
908 return fetch.keys()
909
909
910 def findoutgoing(self, remote, base=None, heads=None):
910 def findoutgoing(self, remote, base=None, heads=None):
911 if base == None:
911 if base == None:
912 base = {}
912 base = {}
913 self.findincoming(remote, base, heads)
913 self.findincoming(remote, base, heads)
914
914
915 self.ui.debug(_("common changesets up to ")
915 self.ui.debug(_("common changesets up to ")
916 + " ".join(map(short, base.keys())) + "\n")
916 + " ".join(map(short, base.keys())) + "\n")
917
917
918 remain = dict.fromkeys(self.changelog.nodemap)
918 remain = dict.fromkeys(self.changelog.nodemap)
919
919
920 # prune everything remote has from the tree
920 # prune everything remote has from the tree
921 del remain[nullid]
921 del remain[nullid]
922 remove = base.keys()
922 remove = base.keys()
923 while remove:
923 while remove:
924 n = remove.pop(0)
924 n = remove.pop(0)
925 if n in remain:
925 if n in remain:
926 del remain[n]
926 del remain[n]
927 for p in self.changelog.parents(n):
927 for p in self.changelog.parents(n):
928 remove.append(p)
928 remove.append(p)
929
929
930 # find every node whose parents have been pruned
930 # find every node whose parents have been pruned
931 subset = []
931 subset = []
932 for n in remain:
932 for n in remain:
933 p1, p2 = self.changelog.parents(n)
933 p1, p2 = self.changelog.parents(n)
934 if p1 not in remain and p2 not in remain:
934 if p1 not in remain and p2 not in remain:
935 subset.append(n)
935 subset.append(n)
936
936
937 # this is the set of all roots we have to push
937 # this is the set of all roots we have to push
938 return subset
938 return subset
939
939
940 def pull(self, remote, heads=None):
940 def pull(self, remote, heads=None):
941 l = self.lock()
941 l = self.lock()
942
942
943 # if we have an empty repo, fetch everything
943 # if we have an empty repo, fetch everything
944 if self.changelog.tip() == nullid:
944 if self.changelog.tip() == nullid:
945 self.ui.status(_("requesting all changes\n"))
945 self.ui.status(_("requesting all changes\n"))
946 fetch = [nullid]
946 fetch = [nullid]
947 else:
947 else:
948 fetch = self.findincoming(remote)
948 fetch = self.findincoming(remote)
949
949
950 if not fetch:
950 if not fetch:
951 self.ui.status(_("no changes found\n"))
951 self.ui.status(_("no changes found\n"))
952 return 1
952 return 1
953
953
954 if heads is None:
954 if heads is None:
955 cg = remote.changegroup(fetch, 'pull')
955 cg = remote.changegroup(fetch, 'pull')
956 else:
956 else:
957 cg = remote.changegroupsubset(fetch, heads, 'pull')
957 cg = remote.changegroupsubset(fetch, heads, 'pull')
958 return self.addchangegroup(cg)
958 return self.addchangegroup(cg)
959
959
960 def push(self, remote, force=False, revs=None):
960 def push(self, remote, force=False, revs=None):
961 lock = remote.lock()
961 lock = remote.lock()
962
962
963 base = {}
963 base = {}
964 heads = remote.heads()
964 heads = remote.heads()
965 inc = self.findincoming(remote, base, heads)
965 inc = self.findincoming(remote, base, heads)
966 if not force and inc:
966 if not force and inc:
967 self.ui.warn(_("abort: unsynced remote changes!\n"))
967 self.ui.warn(_("abort: unsynced remote changes!\n"))
968 self.ui.status(_("(did you forget to sync? use push -f to force)\n"))
968 self.ui.status(_("(did you forget to sync? use push -f to force)\n"))
969 return 1
969 return 1
970
970
971 update = self.findoutgoing(remote, base)
971 update = self.findoutgoing(remote, base)
972 if revs is not None:
972 if revs is not None:
973 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
973 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
974 else:
974 else:
975 bases, heads = update, self.changelog.heads()
975 bases, heads = update, self.changelog.heads()
976
976
977 if not bases:
977 if not bases:
978 self.ui.status(_("no changes found\n"))
978 self.ui.status(_("no changes found\n"))
979 return 1
979 return 1
980 elif not force:
980 elif not force:
981 if len(bases) < len(heads):
981 if len(bases) < len(heads):
982 self.ui.warn(_("abort: push creates new remote branches!\n"))
982 self.ui.warn(_("abort: push creates new remote branches!\n"))
983 self.ui.status(_("(did you forget to merge?"
983 self.ui.status(_("(did you forget to merge?"
984 " use push -f to force)\n"))
984 " use push -f to force)\n"))
985 return 1
985 return 1
986
986
987 if revs is None:
987 if revs is None:
988 cg = self.changegroup(update, 'push')
988 cg = self.changegroup(update, 'push')
989 else:
989 else:
990 cg = self.changegroupsubset(update, revs, 'push')
990 cg = self.changegroupsubset(update, revs, 'push')
991 return remote.addchangegroup(cg)
991 return remote.addchangegroup(cg)
992
992
993 def changegroupsubset(self, bases, heads, source):
993 def changegroupsubset(self, bases, heads, source):
994 """This function generates a changegroup consisting of all the nodes
994 """This function generates a changegroup consisting of all the nodes
995 that are descendents of any of the bases, and ancestors of any of
995 that are descendents of any of the bases, and ancestors of any of
996 the heads.
996 the heads.
997
997
998 It is fairly complex as determining which filenodes and which
998 It is fairly complex as determining which filenodes and which
999 manifest nodes need to be included for the changeset to be complete
999 manifest nodes need to be included for the changeset to be complete
1000 is non-trivial.
1000 is non-trivial.
1001
1001
1002 Another wrinkle is doing the reverse, figuring out which changeset in
1002 Another wrinkle is doing the reverse, figuring out which changeset in
1003 the changegroup a particular filenode or manifestnode belongs to."""
1003 the changegroup a particular filenode or manifestnode belongs to."""
1004
1004
1005 self.hook('preoutgoing', throw=True, source=source)
1005 self.hook('preoutgoing', throw=True, source=source)
1006
1006
1007 # Set up some initial variables
1007 # Set up some initial variables
1008 # Make it easy to refer to self.changelog
1008 # Make it easy to refer to self.changelog
1009 cl = self.changelog
1009 cl = self.changelog
1010 # msng is short for missing - compute the list of changesets in this
1010 # msng is short for missing - compute the list of changesets in this
1011 # changegroup.
1011 # changegroup.
1012 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1012 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1013 # Some bases may turn out to be superfluous, and some heads may be
1013 # Some bases may turn out to be superfluous, and some heads may be
1014 # too. nodesbetween will return the minimal set of bases and heads
1014 # too. nodesbetween will return the minimal set of bases and heads
1015 # necessary to re-create the changegroup.
1015 # necessary to re-create the changegroup.
1016
1016
1017 # Known heads are the list of heads that it is assumed the recipient
1017 # Known heads are the list of heads that it is assumed the recipient
1018 # of this changegroup will know about.
1018 # of this changegroup will know about.
1019 knownheads = {}
1019 knownheads = {}
1020 # We assume that all parents of bases are known heads.
1020 # We assume that all parents of bases are known heads.
1021 for n in bases:
1021 for n in bases:
1022 for p in cl.parents(n):
1022 for p in cl.parents(n):
1023 if p != nullid:
1023 if p != nullid:
1024 knownheads[p] = 1
1024 knownheads[p] = 1
1025 knownheads = knownheads.keys()
1025 knownheads = knownheads.keys()
1026 if knownheads:
1026 if knownheads:
1027 # Now that we know what heads are known, we can compute which
1027 # Now that we know what heads are known, we can compute which
1028 # changesets are known. The recipient must know about all
1028 # changesets are known. The recipient must know about all
1029 # changesets required to reach the known heads from the null
1029 # changesets required to reach the known heads from the null
1030 # changeset.
1030 # changeset.
1031 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1031 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1032 junk = None
1032 junk = None
1033 # Transform the list into an ersatz set.
1033 # Transform the list into an ersatz set.
1034 has_cl_set = dict.fromkeys(has_cl_set)
1034 has_cl_set = dict.fromkeys(has_cl_set)
1035 else:
1035 else:
1036 # If there were no known heads, the recipient cannot be assumed to
1036 # If there were no known heads, the recipient cannot be assumed to
1037 # know about any changesets.
1037 # know about any changesets.
1038 has_cl_set = {}
1038 has_cl_set = {}
1039
1039
1040 # Make it easy to refer to self.manifest
1040 # Make it easy to refer to self.manifest
1041 mnfst = self.manifest
1041 mnfst = self.manifest
1042 # We don't know which manifests are missing yet
1042 # We don't know which manifests are missing yet
1043 msng_mnfst_set = {}
1043 msng_mnfst_set = {}
1044 # Nor do we know which filenodes are missing.
1044 # Nor do we know which filenodes are missing.
1045 msng_filenode_set = {}
1045 msng_filenode_set = {}
1046
1046
1047 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1047 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1048 junk = None
1048 junk = None
1049
1049
1050 # A changeset always belongs to itself, so the changenode lookup
1050 # A changeset always belongs to itself, so the changenode lookup
1051 # function for a changenode is identity.
1051 # function for a changenode is identity.
1052 def identity(x):
1052 def identity(x):
1053 return x
1053 return x
1054
1054
1055 # A function generating function. Sets up an environment for the
1055 # A function generating function. Sets up an environment for the
1056 # inner function.
1056 # inner function.
1057 def cmp_by_rev_func(revlog):
1057 def cmp_by_rev_func(revlog):
1058 # Compare two nodes by their revision number in the environment's
1058 # Compare two nodes by their revision number in the environment's
1059 # revision history. Since the revision number both represents the
1059 # revision history. Since the revision number both represents the
1060 # most efficient order to read the nodes in, and represents a
1060 # most efficient order to read the nodes in, and represents a
1061 # topological sorting of the nodes, this function is often useful.
1061 # topological sorting of the nodes, this function is often useful.
1062 def cmp_by_rev(a, b):
1062 def cmp_by_rev(a, b):
1063 return cmp(revlog.rev(a), revlog.rev(b))
1063 return cmp(revlog.rev(a), revlog.rev(b))
1064 return cmp_by_rev
1064 return cmp_by_rev
1065
1065
1066 # If we determine that a particular file or manifest node must be a
1066 # If we determine that a particular file or manifest node must be a
1067 # node that the recipient of the changegroup will already have, we can
1067 # node that the recipient of the changegroup will already have, we can
1068 # also assume the recipient will have all the parents. This function
1068 # also assume the recipient will have all the parents. This function
1069 # prunes them from the set of missing nodes.
1069 # prunes them from the set of missing nodes.
1070 def prune_parents(revlog, hasset, msngset):
1070 def prune_parents(revlog, hasset, msngset):
1071 haslst = hasset.keys()
1071 haslst = hasset.keys()
1072 haslst.sort(cmp_by_rev_func(revlog))
1072 haslst.sort(cmp_by_rev_func(revlog))
1073 for node in haslst:
1073 for node in haslst:
1074 parentlst = [p for p in revlog.parents(node) if p != nullid]
1074 parentlst = [p for p in revlog.parents(node) if p != nullid]
1075 while parentlst:
1075 while parentlst:
1076 n = parentlst.pop()
1076 n = parentlst.pop()
1077 if n not in hasset:
1077 if n not in hasset:
1078 hasset[n] = 1
1078 hasset[n] = 1
1079 p = [p for p in revlog.parents(n) if p != nullid]
1079 p = [p for p in revlog.parents(n) if p != nullid]
1080 parentlst.extend(p)
1080 parentlst.extend(p)
1081 for n in hasset:
1081 for n in hasset:
1082 msngset.pop(n, None)
1082 msngset.pop(n, None)
1083
1083
1084 # This is a function generating function used to set up an environment
1084 # This is a function generating function used to set up an environment
1085 # for the inner function to execute in.
1085 # for the inner function to execute in.
1086 def manifest_and_file_collector(changedfileset):
1086 def manifest_and_file_collector(changedfileset):
1087 # This is an information gathering function that gathers
1087 # This is an information gathering function that gathers
1088 # information from each changeset node that goes out as part of
1088 # information from each changeset node that goes out as part of
1089 # the changegroup. The information gathered is a list of which
1089 # the changegroup. The information gathered is a list of which
1090 # manifest nodes are potentially required (the recipient may
1090 # manifest nodes are potentially required (the recipient may
1091 # already have them) and total list of all files which were
1091 # already have them) and total list of all files which were
1092 # changed in any changeset in the changegroup.
1092 # changed in any changeset in the changegroup.
1093 #
1093 #
1094 # We also remember the first changenode we saw any manifest
1094 # We also remember the first changenode we saw any manifest
1095 # referenced by so we can later determine which changenode 'owns'
1095 # referenced by so we can later determine which changenode 'owns'
1096 # the manifest.
1096 # the manifest.
1097 def collect_manifests_and_files(clnode):
1097 def collect_manifests_and_files(clnode):
1098 c = cl.read(clnode)
1098 c = cl.read(clnode)
1099 for f in c[3]:
1099 for f in c[3]:
1100 # This is to make sure we only have one instance of each
1100 # This is to make sure we only have one instance of each
1101 # filename string for each filename.
1101 # filename string for each filename.
1102 changedfileset.setdefault(f, f)
1102 changedfileset.setdefault(f, f)
1103 msng_mnfst_set.setdefault(c[0], clnode)
1103 msng_mnfst_set.setdefault(c[0], clnode)
1104 return collect_manifests_and_files
1104 return collect_manifests_and_files
1105
1105
1106 # Figure out which manifest nodes (of the ones we think might be part
1106 # Figure out which manifest nodes (of the ones we think might be part
1107 # of the changegroup) the recipient must know about and remove them
1107 # of the changegroup) the recipient must know about and remove them
1108 # from the changegroup.
1108 # from the changegroup.
1109 def prune_manifests():
1109 def prune_manifests():
1110 has_mnfst_set = {}
1110 has_mnfst_set = {}
1111 for n in msng_mnfst_set:
1111 for n in msng_mnfst_set:
1112 # If a 'missing' manifest thinks it belongs to a changenode
1112 # If a 'missing' manifest thinks it belongs to a changenode
1113 # the recipient is assumed to have, obviously the recipient
1113 # the recipient is assumed to have, obviously the recipient
1114 # must have that manifest.
1114 # must have that manifest.
1115 linknode = cl.node(mnfst.linkrev(n))
1115 linknode = cl.node(mnfst.linkrev(n))
1116 if linknode in has_cl_set:
1116 if linknode in has_cl_set:
1117 has_mnfst_set[n] = 1
1117 has_mnfst_set[n] = 1
1118 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1118 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1119
1119
1120 # Use the information collected in collect_manifests_and_files to say
1120 # Use the information collected in collect_manifests_and_files to say
1121 # which changenode any manifestnode belongs to.
1121 # which changenode any manifestnode belongs to.
1122 def lookup_manifest_link(mnfstnode):
1122 def lookup_manifest_link(mnfstnode):
1123 return msng_mnfst_set[mnfstnode]
1123 return msng_mnfst_set[mnfstnode]
1124
1124
1125 # A function generating function that sets up the initial environment
1125 # A function generating function that sets up the initial environment
1126 # the inner function.
1126 # the inner function.
1127 def filenode_collector(changedfiles):
1127 def filenode_collector(changedfiles):
1128 next_rev = [0]
1128 next_rev = [0]
1129 # This gathers information from each manifestnode included in the
1129 # This gathers information from each manifestnode included in the
1130 # changegroup about which filenodes the manifest node references
1130 # changegroup about which filenodes the manifest node references
1131 # so we can include those in the changegroup too.
1131 # so we can include those in the changegroup too.
1132 #
1132 #
1133 # It also remembers which changenode each filenode belongs to. It
1133 # It also remembers which changenode each filenode belongs to. It
1134 # does this by assuming the a filenode belongs to the changenode
1134 # does this by assuming the a filenode belongs to the changenode
1135 # the first manifest that references it belongs to.
1135 # the first manifest that references it belongs to.
1136 def collect_msng_filenodes(mnfstnode):
1136 def collect_msng_filenodes(mnfstnode):
1137 r = mnfst.rev(mnfstnode)
1137 r = mnfst.rev(mnfstnode)
1138 if r == next_rev[0]:
1138 if r == next_rev[0]:
1139 # If the last rev we looked at was the one just previous,
1139 # If the last rev we looked at was the one just previous,
1140 # we only need to see a diff.
1140 # we only need to see a diff.
1141 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1141 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1142 # For each line in the delta
1142 # For each line in the delta
1143 for dline in delta.splitlines():
1143 for dline in delta.splitlines():
1144 # get the filename and filenode for that line
1144 # get the filename and filenode for that line
1145 f, fnode = dline.split('\0')
1145 f, fnode = dline.split('\0')
1146 fnode = bin(fnode[:40])
1146 fnode = bin(fnode[:40])
1147 f = changedfiles.get(f, None)
1147 f = changedfiles.get(f, None)
1148 # And if the file is in the list of files we care
1148 # And if the file is in the list of files we care
1149 # about.
1149 # about.
1150 if f is not None:
1150 if f is not None:
1151 # Get the changenode this manifest belongs to
1151 # Get the changenode this manifest belongs to
1152 clnode = msng_mnfst_set[mnfstnode]
1152 clnode = msng_mnfst_set[mnfstnode]
1153 # Create the set of filenodes for the file if
1153 # Create the set of filenodes for the file if
1154 # there isn't one already.
1154 # there isn't one already.
1155 ndset = msng_filenode_set.setdefault(f, {})
1155 ndset = msng_filenode_set.setdefault(f, {})
1156 # And set the filenode's changelog node to the
1156 # And set the filenode's changelog node to the
1157 # manifest's if it hasn't been set already.
1157 # manifest's if it hasn't been set already.
1158 ndset.setdefault(fnode, clnode)
1158 ndset.setdefault(fnode, clnode)
1159 else:
1159 else:
1160 # Otherwise we need a full manifest.
1160 # Otherwise we need a full manifest.
1161 m = mnfst.read(mnfstnode)
1161 m = mnfst.read(mnfstnode)
1162 # For every file in we care about.
1162 # For every file in we care about.
1163 for f in changedfiles:
1163 for f in changedfiles:
1164 fnode = m.get(f, None)
1164 fnode = m.get(f, None)
1165 # If it's in the manifest
1165 # If it's in the manifest
1166 if fnode is not None:
1166 if fnode is not None:
1167 # See comments above.
1167 # See comments above.
1168 clnode = msng_mnfst_set[mnfstnode]
1168 clnode = msng_mnfst_set[mnfstnode]
1169 ndset = msng_filenode_set.setdefault(f, {})
1169 ndset = msng_filenode_set.setdefault(f, {})
1170 ndset.setdefault(fnode, clnode)
1170 ndset.setdefault(fnode, clnode)
1171 # Remember the revision we hope to see next.
1171 # Remember the revision we hope to see next.
1172 next_rev[0] = r + 1
1172 next_rev[0] = r + 1
1173 return collect_msng_filenodes
1173 return collect_msng_filenodes
1174
1174
1175 # We have a list of filenodes we think we need for a file, lets remove
1175 # We have a list of filenodes we think we need for a file, lets remove
1176 # all those we now the recipient must have.
1176 # all those we now the recipient must have.
1177 def prune_filenodes(f, filerevlog):
1177 def prune_filenodes(f, filerevlog):
1178 msngset = msng_filenode_set[f]
1178 msngset = msng_filenode_set[f]
1179 hasset = {}
1179 hasset = {}
1180 # If a 'missing' filenode thinks it belongs to a changenode we
1180 # If a 'missing' filenode thinks it belongs to a changenode we
1181 # assume the recipient must have, then the recipient must have
1181 # assume the recipient must have, then the recipient must have
1182 # that filenode.
1182 # that filenode.
1183 for n in msngset:
1183 for n in msngset:
1184 clnode = cl.node(filerevlog.linkrev(n))
1184 clnode = cl.node(filerevlog.linkrev(n))
1185 if clnode in has_cl_set:
1185 if clnode in has_cl_set:
1186 hasset[n] = 1
1186 hasset[n] = 1
1187 prune_parents(filerevlog, hasset, msngset)
1187 prune_parents(filerevlog, hasset, msngset)
1188
1188
1189 # A function generator function that sets up the a context for the
1189 # A function generator function that sets up the a context for the
1190 # inner function.
1190 # inner function.
1191 def lookup_filenode_link_func(fname):
1191 def lookup_filenode_link_func(fname):
1192 msngset = msng_filenode_set[fname]
1192 msngset = msng_filenode_set[fname]
1193 # Lookup the changenode the filenode belongs to.
1193 # Lookup the changenode the filenode belongs to.
1194 def lookup_filenode_link(fnode):
1194 def lookup_filenode_link(fnode):
1195 return msngset[fnode]
1195 return msngset[fnode]
1196 return lookup_filenode_link
1196 return lookup_filenode_link
1197
1197
1198 # Now that we have all theses utility functions to help out and
1198 # Now that we have all theses utility functions to help out and
1199 # logically divide up the task, generate the group.
1199 # logically divide up the task, generate the group.
1200 def gengroup():
1200 def gengroup():
1201 # The set of changed files starts empty.
1201 # The set of changed files starts empty.
1202 changedfiles = {}
1202 changedfiles = {}
1203 # Create a changenode group generator that will call our functions
1203 # Create a changenode group generator that will call our functions
1204 # back to lookup the owning changenode and collect information.
1204 # back to lookup the owning changenode and collect information.
1205 group = cl.group(msng_cl_lst, identity,
1205 group = cl.group(msng_cl_lst, identity,
1206 manifest_and_file_collector(changedfiles))
1206 manifest_and_file_collector(changedfiles))
1207 for chnk in group:
1207 for chnk in group:
1208 yield chnk
1208 yield chnk
1209
1209
1210 # The list of manifests has been collected by the generator
1210 # The list of manifests has been collected by the generator
1211 # calling our functions back.
1211 # calling our functions back.
1212 prune_manifests()
1212 prune_manifests()
1213 msng_mnfst_lst = msng_mnfst_set.keys()
1213 msng_mnfst_lst = msng_mnfst_set.keys()
1214 # Sort the manifestnodes by revision number.
1214 # Sort the manifestnodes by revision number.
1215 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1215 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1216 # Create a generator for the manifestnodes that calls our lookup
1216 # Create a generator for the manifestnodes that calls our lookup
1217 # and data collection functions back.
1217 # and data collection functions back.
1218 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1218 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1219 filenode_collector(changedfiles))
1219 filenode_collector(changedfiles))
1220 for chnk in group:
1220 for chnk in group:
1221 yield chnk
1221 yield chnk
1222
1222
1223 # These are no longer needed, dereference and toss the memory for
1223 # These are no longer needed, dereference and toss the memory for
1224 # them.
1224 # them.
1225 msng_mnfst_lst = None
1225 msng_mnfst_lst = None
1226 msng_mnfst_set.clear()
1226 msng_mnfst_set.clear()
1227
1227
1228 changedfiles = changedfiles.keys()
1228 changedfiles = changedfiles.keys()
1229 changedfiles.sort()
1229 changedfiles.sort()
1230 # Go through all our files in order sorted by name.
1230 # Go through all our files in order sorted by name.
1231 for fname in changedfiles:
1231 for fname in changedfiles:
1232 filerevlog = self.file(fname)
1232 filerevlog = self.file(fname)
1233 # Toss out the filenodes that the recipient isn't really
1233 # Toss out the filenodes that the recipient isn't really
1234 # missing.
1234 # missing.
1235 if msng_filenode_set.has_key(fname):
1235 if msng_filenode_set.has_key(fname):
1236 prune_filenodes(fname, filerevlog)
1236 prune_filenodes(fname, filerevlog)
1237 msng_filenode_lst = msng_filenode_set[fname].keys()
1237 msng_filenode_lst = msng_filenode_set[fname].keys()
1238 else:
1238 else:
1239 msng_filenode_lst = []
1239 msng_filenode_lst = []
1240 # If any filenodes are left, generate the group for them,
1240 # If any filenodes are left, generate the group for them,
1241 # otherwise don't bother.
1241 # otherwise don't bother.
1242 if len(msng_filenode_lst) > 0:
1242 if len(msng_filenode_lst) > 0:
1243 yield struct.pack(">l", len(fname) + 4) + fname
1243 yield struct.pack(">l", len(fname) + 4) + fname
1244 # Sort the filenodes by their revision #
1244 # Sort the filenodes by their revision #
1245 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1245 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1246 # Create a group generator and only pass in a changenode
1246 # Create a group generator and only pass in a changenode
1247 # lookup function as we need to collect no information
1247 # lookup function as we need to collect no information
1248 # from filenodes.
1248 # from filenodes.
1249 group = filerevlog.group(msng_filenode_lst,
1249 group = filerevlog.group(msng_filenode_lst,
1250 lookup_filenode_link_func(fname))
1250 lookup_filenode_link_func(fname))
1251 for chnk in group:
1251 for chnk in group:
1252 yield chnk
1252 yield chnk
1253 if msng_filenode_set.has_key(fname):
1253 if msng_filenode_set.has_key(fname):
1254 # Don't need this anymore, toss it to free memory.
1254 # Don't need this anymore, toss it to free memory.
1255 del msng_filenode_set[fname]
1255 del msng_filenode_set[fname]
1256 # Signal that no more groups are left.
1256 # Signal that no more groups are left.
1257 yield struct.pack(">l", 0)
1257 yield struct.pack(">l", 0)
1258
1258
1259 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1259 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1260
1260
1261 return util.chunkbuffer(gengroup())
1261 return util.chunkbuffer(gengroup())
1262
1262
1263 def changegroup(self, basenodes, source):
1263 def changegroup(self, basenodes, source):
1264 """Generate a changegroup of all nodes that we have that a recipient
1264 """Generate a changegroup of all nodes that we have that a recipient
1265 doesn't.
1265 doesn't.
1266
1266
1267 This is much easier than the previous function as we can assume that
1267 This is much easier than the previous function as we can assume that
1268 the recipient has any changenode we aren't sending them."""
1268 the recipient has any changenode we aren't sending them."""
1269
1269
1270 self.hook('preoutgoing', throw=True, source=source)
1270 self.hook('preoutgoing', throw=True, source=source)
1271
1271
1272 cl = self.changelog
1272 cl = self.changelog
1273 nodes = cl.nodesbetween(basenodes, None)[0]
1273 nodes = cl.nodesbetween(basenodes, None)[0]
1274 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1274 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1275
1275
1276 def identity(x):
1276 def identity(x):
1277 return x
1277 return x
1278
1278
1279 def gennodelst(revlog):
1279 def gennodelst(revlog):
1280 for r in xrange(0, revlog.count()):
1280 for r in xrange(0, revlog.count()):
1281 n = revlog.node(r)
1281 n = revlog.node(r)
1282 if revlog.linkrev(n) in revset:
1282 if revlog.linkrev(n) in revset:
1283 yield n
1283 yield n
1284
1284
1285 def changed_file_collector(changedfileset):
1285 def changed_file_collector(changedfileset):
1286 def collect_changed_files(clnode):
1286 def collect_changed_files(clnode):
1287 c = cl.read(clnode)
1287 c = cl.read(clnode)
1288 for fname in c[3]:
1288 for fname in c[3]:
1289 changedfileset[fname] = 1
1289 changedfileset[fname] = 1
1290 return collect_changed_files
1290 return collect_changed_files
1291
1291
1292 def lookuprevlink_func(revlog):
1292 def lookuprevlink_func(revlog):
1293 def lookuprevlink(n):
1293 def lookuprevlink(n):
1294 return cl.node(revlog.linkrev(n))
1294 return cl.node(revlog.linkrev(n))
1295 return lookuprevlink
1295 return lookuprevlink
1296
1296
1297 def gengroup():
1297 def gengroup():
1298 # construct a list of all changed files
1298 # construct a list of all changed files
1299 changedfiles = {}
1299 changedfiles = {}
1300
1300
1301 for chnk in cl.group(nodes, identity,
1301 for chnk in cl.group(nodes, identity,
1302 changed_file_collector(changedfiles)):
1302 changed_file_collector(changedfiles)):
1303 yield chnk
1303 yield chnk
1304 changedfiles = changedfiles.keys()
1304 changedfiles = changedfiles.keys()
1305 changedfiles.sort()
1305 changedfiles.sort()
1306
1306
1307 mnfst = self.manifest
1307 mnfst = self.manifest
1308 nodeiter = gennodelst(mnfst)
1308 nodeiter = gennodelst(mnfst)
1309 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1309 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1310 yield chnk
1310 yield chnk
1311
1311
1312 for fname in changedfiles:
1312 for fname in changedfiles:
1313 filerevlog = self.file(fname)
1313 filerevlog = self.file(fname)
1314 nodeiter = gennodelst(filerevlog)
1314 nodeiter = gennodelst(filerevlog)
1315 nodeiter = list(nodeiter)
1315 nodeiter = list(nodeiter)
1316 if nodeiter:
1316 if nodeiter:
1317 yield struct.pack(">l", len(fname) + 4) + fname
1317 yield struct.pack(">l", len(fname) + 4) + fname
1318 lookup = lookuprevlink_func(filerevlog)
1318 lookup = lookuprevlink_func(filerevlog)
1319 for chnk in filerevlog.group(nodeiter, lookup):
1319 for chnk in filerevlog.group(nodeiter, lookup):
1320 yield chnk
1320 yield chnk
1321
1321
1322 yield struct.pack(">l", 0)
1322 yield struct.pack(">l", 0)
1323 self.hook('outgoing', node=hex(nodes[0]), source=source)
1323 self.hook('outgoing', node=hex(nodes[0]), source=source)
1324
1324
1325 return util.chunkbuffer(gengroup())
1325 return util.chunkbuffer(gengroup())
1326
1326
1327 def addchangegroup(self, source):
1327 def addchangegroup(self, source):
1328
1328
1329 def getchunk():
1329 def getchunk():
1330 d = source.read(4)
1330 d = source.read(4)
1331 if not d:
1331 if not d:
1332 return ""
1332 return ""
1333 l = struct.unpack(">l", d)[0]
1333 l = struct.unpack(">l", d)[0]
1334 if l <= 4:
1334 if l <= 4:
1335 return ""
1335 return ""
1336 d = source.read(l - 4)
1336 d = source.read(l - 4)
1337 if len(d) < l - 4:
1337 if len(d) < l - 4:
1338 raise repo.RepoError(_("premature EOF reading chunk"
1338 raise repo.RepoError(_("premature EOF reading chunk"
1339 " (got %d bytes, expected %d)")
1339 " (got %d bytes, expected %d)")
1340 % (len(d), l - 4))
1340 % (len(d), l - 4))
1341 return d
1341 return d
1342
1342
1343 def getgroup():
1343 def getgroup():
1344 while 1:
1344 while 1:
1345 c = getchunk()
1345 c = getchunk()
1346 if not c:
1346 if not c:
1347 break
1347 break
1348 yield c
1348 yield c
1349
1349
1350 def csmap(x):
1350 def csmap(x):
1351 self.ui.debug(_("add changeset %s\n") % short(x))
1351 self.ui.debug(_("add changeset %s\n") % short(x))
1352 return self.changelog.count()
1352 return self.changelog.count()
1353
1353
1354 def revmap(x):
1354 def revmap(x):
1355 return self.changelog.rev(x)
1355 return self.changelog.rev(x)
1356
1356
1357 if not source:
1357 if not source:
1358 return
1358 return
1359
1359
1360 self.hook('prechangegroup', throw=True)
1360 self.hook('prechangegroup', throw=True)
1361
1361
1362 changesets = files = revisions = 0
1362 changesets = files = revisions = 0
1363
1363
1364 tr = self.transaction()
1364 tr = self.transaction()
1365
1365
1366 oldheads = len(self.changelog.heads())
1366 oldheads = len(self.changelog.heads())
1367
1367
1368 # pull off the changeset group
1368 # pull off the changeset group
1369 self.ui.status(_("adding changesets\n"))
1369 self.ui.status(_("adding changesets\n"))
1370 co = self.changelog.tip()
1370 co = self.changelog.tip()
1371 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1371 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1372 cnr, cor = map(self.changelog.rev, (cn, co))
1372 cnr, cor = map(self.changelog.rev, (cn, co))
1373 if cn == nullid:
1373 if cn == nullid:
1374 cnr = cor
1374 cnr = cor
1375 changesets = cnr - cor
1375 changesets = cnr - cor
1376
1376
1377 # pull off the manifest group
1377 # pull off the manifest group
1378 self.ui.status(_("adding manifests\n"))
1378 self.ui.status(_("adding manifests\n"))
1379 mm = self.manifest.tip()
1379 mm = self.manifest.tip()
1380 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1380 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1381
1381
1382 # process the files
1382 # process the files
1383 self.ui.status(_("adding file changes\n"))
1383 self.ui.status(_("adding file changes\n"))
1384 while 1:
1384 while 1:
1385 f = getchunk()
1385 f = getchunk()
1386 if not f:
1386 if not f:
1387 break
1387 break
1388 self.ui.debug(_("adding %s revisions\n") % f)
1388 self.ui.debug(_("adding %s revisions\n") % f)
1389 fl = self.file(f)
1389 fl = self.file(f)
1390 o = fl.count()
1390 o = fl.count()
1391 n = fl.addgroup(getgroup(), revmap, tr)
1391 n = fl.addgroup(getgroup(), revmap, tr)
1392 revisions += fl.count() - o
1392 revisions += fl.count() - o
1393 files += 1
1393 files += 1
1394
1394
1395 newheads = len(self.changelog.heads())
1395 newheads = len(self.changelog.heads())
1396 heads = ""
1396 heads = ""
1397 if oldheads and newheads > oldheads:
1397 if oldheads and newheads > oldheads:
1398 heads = _(" (+%d heads)") % (newheads - oldheads)
1398 heads = _(" (+%d heads)") % (newheads - oldheads)
1399
1399
1400 self.ui.status(_("added %d changesets"
1400 self.ui.status(_("added %d changesets"
1401 " with %d changes to %d files%s\n")
1401 " with %d changes to %d files%s\n")
1402 % (changesets, revisions, files, heads))
1402 % (changesets, revisions, files, heads))
1403
1403
1404 self.hook('pretxnchangegroup', throw=True,
1404 self.hook('pretxnchangegroup', throw=True,
1405 node=hex(self.changelog.node(cor+1)))
1405 node=hex(self.changelog.node(cor+1)))
1406
1406
1407 tr.close()
1407 tr.close()
1408
1408
1409 if changesets > 0:
1409 if changesets > 0:
1410 self.hook("changegroup", node=hex(self.changelog.node(cor+1)))
1410 self.hook("changegroup", node=hex(self.changelog.node(cor+1)))
1411
1411
1412 for i in range(cor + 1, cnr + 1):
1412 for i in range(cor + 1, cnr + 1):
1413 self.hook("incoming", node=hex(self.changelog.node(i)))
1413 self.hook("incoming", node=hex(self.changelog.node(i)))
1414
1414
1415 def update(self, node, allow=False, force=False, choose=None,
1415 def update(self, node, allow=False, force=False, choose=None,
1416 moddirstate=True, forcemerge=False, wlock=None):
1416 moddirstate=True, forcemerge=False, wlock=None):
1417 pl = self.dirstate.parents()
1417 pl = self.dirstate.parents()
1418 if not force and pl[1] != nullid:
1418 if not force and pl[1] != nullid:
1419 self.ui.warn(_("aborting: outstanding uncommitted merges\n"))
1419 self.ui.warn(_("aborting: outstanding uncommitted merges\n"))
1420 return 1
1420 return 1
1421
1421
1422 err = False
1422 err = False
1423
1423
1424 p1, p2 = pl[0], node
1424 p1, p2 = pl[0], node
1425 pa = self.changelog.ancestor(p1, p2)
1425 pa = self.changelog.ancestor(p1, p2)
1426 m1n = self.changelog.read(p1)[0]
1426 m1n = self.changelog.read(p1)[0]
1427 m2n = self.changelog.read(p2)[0]
1427 m2n = self.changelog.read(p2)[0]
1428 man = self.manifest.ancestor(m1n, m2n)
1428 man = self.manifest.ancestor(m1n, m2n)
1429 m1 = self.manifest.read(m1n)
1429 m1 = self.manifest.read(m1n)
1430 mf1 = self.manifest.readflags(m1n)
1430 mf1 = self.manifest.readflags(m1n)
1431 m2 = self.manifest.read(m2n).copy()
1431 m2 = self.manifest.read(m2n).copy()
1432 mf2 = self.manifest.readflags(m2n)
1432 mf2 = self.manifest.readflags(m2n)
1433 ma = self.manifest.read(man)
1433 ma = self.manifest.read(man)
1434 mfa = self.manifest.readflags(man)
1434 mfa = self.manifest.readflags(man)
1435
1435
1436 modified, added, removed, deleted, unknown = self.changes()
1436 modified, added, removed, deleted, unknown = self.changes()
1437
1437
1438 # is this a jump, or a merge? i.e. is there a linear path
1438 # is this a jump, or a merge? i.e. is there a linear path
1439 # from p1 to p2?
1439 # from p1 to p2?
1440 linear_path = (pa == p1 or pa == p2)
1440 linear_path = (pa == p1 or pa == p2)
1441
1441
1442 if allow and linear_path:
1442 if allow and linear_path:
1443 raise util.Abort(_("there is nothing to merge, "
1443 raise util.Abort(_("there is nothing to merge, "
1444 "just use 'hg update'"))
1444 "just use 'hg update'"))
1445 if allow and not forcemerge:
1445 if allow and not forcemerge:
1446 if modified or added or removed:
1446 if modified or added or removed:
1447 raise util.Abort(_("outstanding uncommited changes"))
1447 raise util.Abort(_("outstanding uncommited changes"))
1448 if not forcemerge and not force:
1448 if not forcemerge and not force:
1449 for f in unknown:
1449 for f in unknown:
1450 if f in m2:
1450 if f in m2:
1451 t1 = self.wread(f)
1451 t1 = self.wread(f)
1452 t2 = self.file(f).read(m2[f])
1452 t2 = self.file(f).read(m2[f])
1453 if cmp(t1, t2) != 0:
1453 if cmp(t1, t2) != 0:
1454 raise util.Abort(_("'%s' already exists in the working"
1454 raise util.Abort(_("'%s' already exists in the working"
1455 " dir and differs from remote") % f)
1455 " dir and differs from remote") % f)
1456
1456
1457 # resolve the manifest to determine which files
1457 # resolve the manifest to determine which files
1458 # we care about merging
1458 # we care about merging
1459 self.ui.note(_("resolving manifests\n"))
1459 self.ui.note(_("resolving manifests\n"))
1460 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1460 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1461 (force, allow, moddirstate, linear_path))
1461 (force, allow, moddirstate, linear_path))
1462 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1462 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1463 (short(man), short(m1n), short(m2n)))
1463 (short(man), short(m1n), short(m2n)))
1464
1464
1465 merge = {}
1465 merge = {}
1466 get = {}
1466 get = {}
1467 remove = []
1467 remove = []
1468
1468
1469 # construct a working dir manifest
1469 # construct a working dir manifest
1470 mw = m1.copy()
1470 mw = m1.copy()
1471 mfw = mf1.copy()
1471 mfw = mf1.copy()
1472 umap = dict.fromkeys(unknown)
1472 umap = dict.fromkeys(unknown)
1473
1473
1474 for f in added + modified + unknown:
1474 for f in added + modified + unknown:
1475 mw[f] = ""
1475 mw[f] = ""
1476 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1476 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1477
1477
1478 if moddirstate and not wlock:
1478 if moddirstate and not wlock:
1479 wlock = self.wlock()
1479 wlock = self.wlock()
1480
1480
1481 for f in deleted + removed:
1481 for f in deleted + removed:
1482 if f in mw:
1482 if f in mw:
1483 del mw[f]
1483 del mw[f]
1484
1484
1485 # If we're jumping between revisions (as opposed to merging),
1485 # If we're jumping between revisions (as opposed to merging),
1486 # and if neither the working directory nor the target rev has
1486 # and if neither the working directory nor the target rev has
1487 # the file, then we need to remove it from the dirstate, to
1487 # the file, then we need to remove it from the dirstate, to
1488 # prevent the dirstate from listing the file when it is no
1488 # prevent the dirstate from listing the file when it is no
1489 # longer in the manifest.
1489 # longer in the manifest.
1490 if moddirstate and linear_path and f not in m2:
1490 if moddirstate and linear_path and f not in m2:
1491 self.dirstate.forget((f,))
1491 self.dirstate.forget((f,))
1492
1492
1493 # Compare manifests
1493 # Compare manifests
1494 for f, n in mw.iteritems():
1494 for f, n in mw.iteritems():
1495 if choose and not choose(f):
1495 if choose and not choose(f):
1496 continue
1496 continue
1497 if f in m2:
1497 if f in m2:
1498 s = 0
1498 s = 0
1499
1499
1500 # is the wfile new since m1, and match m2?
1500 # is the wfile new since m1, and match m2?
1501 if f not in m1:
1501 if f not in m1:
1502 t1 = self.wread(f)
1502 t1 = self.wread(f)
1503 t2 = self.file(f).read(m2[f])
1503 t2 = self.file(f).read(m2[f])
1504 if cmp(t1, t2) == 0:
1504 if cmp(t1, t2) == 0:
1505 n = m2[f]
1505 n = m2[f]
1506 del t1, t2
1506 del t1, t2
1507
1507
1508 # are files different?
1508 # are files different?
1509 if n != m2[f]:
1509 if n != m2[f]:
1510 a = ma.get(f, nullid)
1510 a = ma.get(f, nullid)
1511 # are both different from the ancestor?
1511 # are both different from the ancestor?
1512 if n != a and m2[f] != a:
1512 if n != a and m2[f] != a:
1513 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1513 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1514 # merge executable bits
1514 # merge executable bits
1515 # "if we changed or they changed, change in merge"
1515 # "if we changed or they changed, change in merge"
1516 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1516 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1517 mode = ((a^b) | (a^c)) ^ a
1517 mode = ((a^b) | (a^c)) ^ a
1518 merge[f] = (m1.get(f, nullid), m2[f], mode)
1518 merge[f] = (m1.get(f, nullid), m2[f], mode)
1519 s = 1
1519 s = 1
1520 # are we clobbering?
1520 # are we clobbering?
1521 # is remote's version newer?
1521 # is remote's version newer?
1522 # or are we going back in time?
1522 # or are we going back in time?
1523 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1523 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1524 self.ui.debug(_(" remote %s is newer, get\n") % f)
1524 self.ui.debug(_(" remote %s is newer, get\n") % f)
1525 get[f] = m2[f]
1525 get[f] = m2[f]
1526 s = 1
1526 s = 1
1527 elif f in umap:
1527 elif f in umap:
1528 # this unknown file is the same as the checkout
1528 # this unknown file is the same as the checkout
1529 get[f] = m2[f]
1529 get[f] = m2[f]
1530
1530
1531 if not s and mfw[f] != mf2[f]:
1531 if not s and mfw[f] != mf2[f]:
1532 if force:
1532 if force:
1533 self.ui.debug(_(" updating permissions for %s\n") % f)
1533 self.ui.debug(_(" updating permissions for %s\n") % f)
1534 util.set_exec(self.wjoin(f), mf2[f])
1534 util.set_exec(self.wjoin(f), mf2[f])
1535 else:
1535 else:
1536 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1536 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1537 mode = ((a^b) | (a^c)) ^ a
1537 mode = ((a^b) | (a^c)) ^ a
1538 if mode != b:
1538 if mode != b:
1539 self.ui.debug(_(" updating permissions for %s\n")
1539 self.ui.debug(_(" updating permissions for %s\n")
1540 % f)
1540 % f)
1541 util.set_exec(self.wjoin(f), mode)
1541 util.set_exec(self.wjoin(f), mode)
1542 del m2[f]
1542 del m2[f]
1543 elif f in ma:
1543 elif f in ma:
1544 if n != ma[f]:
1544 if n != ma[f]:
1545 r = _("d")
1545 r = _("d")
1546 if not force and (linear_path or allow):
1546 if not force and (linear_path or allow):
1547 r = self.ui.prompt(
1547 r = self.ui.prompt(
1548 (_(" local changed %s which remote deleted\n") % f) +
1548 (_(" local changed %s which remote deleted\n") % f) +
1549 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1549 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1550 if r == _("d"):
1550 if r == _("d"):
1551 remove.append(f)
1551 remove.append(f)
1552 else:
1552 else:
1553 self.ui.debug(_("other deleted %s\n") % f)
1553 self.ui.debug(_("other deleted %s\n") % f)
1554 remove.append(f) # other deleted it
1554 remove.append(f) # other deleted it
1555 else:
1555 else:
1556 # file is created on branch or in working directory
1556 # file is created on branch or in working directory
1557 if force and f not in umap:
1557 if force and f not in umap:
1558 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1558 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1559 remove.append(f)
1559 remove.append(f)
1560 elif n == m1.get(f, nullid): # same as parent
1560 elif n == m1.get(f, nullid): # same as parent
1561 if p2 == pa: # going backwards?
1561 if p2 == pa: # going backwards?
1562 self.ui.debug(_("remote deleted %s\n") % f)
1562 self.ui.debug(_("remote deleted %s\n") % f)
1563 remove.append(f)
1563 remove.append(f)
1564 else:
1564 else:
1565 self.ui.debug(_("local modified %s, keeping\n") % f)
1565 self.ui.debug(_("local modified %s, keeping\n") % f)
1566 else:
1566 else:
1567 self.ui.debug(_("working dir created %s, keeping\n") % f)
1567 self.ui.debug(_("working dir created %s, keeping\n") % f)
1568
1568
1569 for f, n in m2.iteritems():
1569 for f, n in m2.iteritems():
1570 if choose and not choose(f):
1570 if choose and not choose(f):
1571 continue
1571 continue
1572 if f[0] == "/":
1572 if f[0] == "/":
1573 continue
1573 continue
1574 if f in ma and n != ma[f]:
1574 if f in ma and n != ma[f]:
1575 r = _("k")
1575 r = _("k")
1576 if not force and (linear_path or allow):
1576 if not force and (linear_path or allow):
1577 r = self.ui.prompt(
1577 r = self.ui.prompt(
1578 (_("remote changed %s which local deleted\n") % f) +
1578 (_("remote changed %s which local deleted\n") % f) +
1579 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1579 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1580 if r == _("k"):
1580 if r == _("k"):
1581 get[f] = n
1581 get[f] = n
1582 elif f not in ma:
1582 elif f not in ma:
1583 self.ui.debug(_("remote created %s\n") % f)
1583 self.ui.debug(_("remote created %s\n") % f)
1584 get[f] = n
1584 get[f] = n
1585 else:
1585 else:
1586 if force or p2 == pa: # going backwards?
1586 if force or p2 == pa: # going backwards?
1587 self.ui.debug(_("local deleted %s, recreating\n") % f)
1587 self.ui.debug(_("local deleted %s, recreating\n") % f)
1588 get[f] = n
1588 get[f] = n
1589 else:
1589 else:
1590 self.ui.debug(_("local deleted %s\n") % f)
1590 self.ui.debug(_("local deleted %s\n") % f)
1591
1591
1592 del mw, m1, m2, ma
1592 del mw, m1, m2, ma
1593
1593
1594 if force:
1594 if force:
1595 for f in merge:
1595 for f in merge:
1596 get[f] = merge[f][1]
1596 get[f] = merge[f][1]
1597 merge = {}
1597 merge = {}
1598
1598
1599 if linear_path or force:
1599 if linear_path or force:
1600 # we don't need to do any magic, just jump to the new rev
1600 # we don't need to do any magic, just jump to the new rev
1601 branch_merge = False
1601 branch_merge = False
1602 p1, p2 = p2, nullid
1602 p1, p2 = p2, nullid
1603 else:
1603 else:
1604 if not allow:
1604 if not allow:
1605 self.ui.status(_("this update spans a branch"
1605 self.ui.status(_("this update spans a branch"
1606 " affecting the following files:\n"))
1606 " affecting the following files:\n"))
1607 fl = merge.keys() + get.keys()
1607 fl = merge.keys() + get.keys()
1608 fl.sort()
1608 fl.sort()
1609 for f in fl:
1609 for f in fl:
1610 cf = ""
1610 cf = ""
1611 if f in merge:
1611 if f in merge:
1612 cf = _(" (resolve)")
1612 cf = _(" (resolve)")
1613 self.ui.status(" %s%s\n" % (f, cf))
1613 self.ui.status(" %s%s\n" % (f, cf))
1614 self.ui.warn(_("aborting update spanning branches!\n"))
1614 self.ui.warn(_("aborting update spanning branches!\n"))
1615 self.ui.status(_("(use update -m to merge across branches"
1615 self.ui.status(_("(use update -m to merge across branches"
1616 " or -C to lose changes)\n"))
1616 " or -C to lose changes)\n"))
1617 return 1
1617 return 1
1618 branch_merge = True
1618 branch_merge = True
1619
1619
1620 # get the files we don't need to change
1620 # get the files we don't need to change
1621 files = get.keys()
1621 files = get.keys()
1622 files.sort()
1622 files.sort()
1623 for f in files:
1623 for f in files:
1624 if f[0] == "/":
1624 if f[0] == "/":
1625 continue
1625 continue
1626 self.ui.note(_("getting %s\n") % f)
1626 self.ui.note(_("getting %s\n") % f)
1627 t = self.file(f).read(get[f])
1627 t = self.file(f).read(get[f])
1628 self.wwrite(f, t)
1628 self.wwrite(f, t)
1629 util.set_exec(self.wjoin(f), mf2[f])
1629 util.set_exec(self.wjoin(f), mf2[f])
1630 if moddirstate:
1630 if moddirstate:
1631 if branch_merge:
1631 if branch_merge:
1632 self.dirstate.update([f], 'n', st_mtime=-1)
1632 self.dirstate.update([f], 'n', st_mtime=-1)
1633 else:
1633 else:
1634 self.dirstate.update([f], 'n')
1634 self.dirstate.update([f], 'n')
1635
1635
1636 # merge the tricky bits
1636 # merge the tricky bits
1637 files = merge.keys()
1637 files = merge.keys()
1638 files.sort()
1638 files.sort()
1639 for f in files:
1639 for f in files:
1640 self.ui.status(_("merging %s\n") % f)
1640 self.ui.status(_("merging %s\n") % f)
1641 my, other, flag = merge[f]
1641 my, other, flag = merge[f]
1642 ret = self.merge3(f, my, other)
1642 ret = self.merge3(f, my, other)
1643 if ret:
1643 if ret:
1644 err = True
1644 err = True
1645 util.set_exec(self.wjoin(f), flag)
1645 util.set_exec(self.wjoin(f), flag)
1646 if moddirstate:
1646 if moddirstate:
1647 if branch_merge:
1647 if branch_merge:
1648 # We've done a branch merge, mark this file as merged
1648 # We've done a branch merge, mark this file as merged
1649 # so that we properly record the merger later
1649 # so that we properly record the merger later
1650 self.dirstate.update([f], 'm')
1650 self.dirstate.update([f], 'm')
1651 else:
1651 else:
1652 # We've update-merged a locally modified file, so
1652 # We've update-merged a locally modified file, so
1653 # we set the dirstate to emulate a normal checkout
1653 # we set the dirstate to emulate a normal checkout
1654 # of that file some time in the past. Thus our
1654 # of that file some time in the past. Thus our
1655 # merge will appear as a normal local file
1655 # merge will appear as a normal local file
1656 # modification.
1656 # modification.
1657 f_len = len(self.file(f).read(other))
1657 f_len = len(self.file(f).read(other))
1658 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1658 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1659
1659
1660 remove.sort()
1660 remove.sort()
1661 for f in remove:
1661 for f in remove:
1662 self.ui.note(_("removing %s\n") % f)
1662 self.ui.note(_("removing %s\n") % f)
1663 util.audit_path(f)
1663 util.audit_path(f)
1664 try:
1664 try:
1665 util.unlink(self.wjoin(f))
1665 util.unlink(self.wjoin(f))
1666 except OSError, inst:
1666 except OSError, inst:
1667 if inst.errno != errno.ENOENT:
1667 if inst.errno != errno.ENOENT:
1668 self.ui.warn(_("update failed to remove %s: %s!\n") %
1668 self.ui.warn(_("update failed to remove %s: %s!\n") %
1669 (f, inst.strerror))
1669 (f, inst.strerror))
1670 if moddirstate:
1670 if moddirstate:
1671 if branch_merge:
1671 if branch_merge:
1672 self.dirstate.update(remove, 'r')
1672 self.dirstate.update(remove, 'r')
1673 else:
1673 else:
1674 self.dirstate.forget(remove)
1674 self.dirstate.forget(remove)
1675
1675
1676 if moddirstate:
1676 if moddirstate:
1677 self.dirstate.setparents(p1, p2)
1677 self.dirstate.setparents(p1, p2)
1678 return err
1678 return err
1679
1679
1680 def merge3(self, fn, my, other):
1680 def merge3(self, fn, my, other):
1681 """perform a 3-way merge in the working directory"""
1681 """perform a 3-way merge in the working directory"""
1682
1682
1683 def temp(prefix, node):
1683 def temp(prefix, node):
1684 pre = "%s~%s." % (os.path.basename(fn), prefix)
1684 pre = "%s~%s." % (os.path.basename(fn), prefix)
1685 (fd, name) = tempfile.mkstemp("", pre)
1685 (fd, name) = tempfile.mkstemp("", pre)
1686 f = os.fdopen(fd, "wb")
1686 f = os.fdopen(fd, "wb")
1687 self.wwrite(fn, fl.read(node), f)
1687 self.wwrite(fn, fl.read(node), f)
1688 f.close()
1688 f.close()
1689 return name
1689 return name
1690
1690
1691 fl = self.file(fn)
1691 fl = self.file(fn)
1692 base = fl.ancestor(my, other)
1692 base = fl.ancestor(my, other)
1693 a = self.wjoin(fn)
1693 a = self.wjoin(fn)
1694 b = temp("base", base)
1694 b = temp("base", base)
1695 c = temp("other", other)
1695 c = temp("other", other)
1696
1696
1697 self.ui.note(_("resolving %s\n") % fn)
1697 self.ui.note(_("resolving %s\n") % fn)
1698 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1698 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1699 (fn, short(my), short(other), short(base)))
1699 (fn, short(my), short(other), short(base)))
1700
1700
1701 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1701 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1702 or "hgmerge")
1702 or "hgmerge")
1703 r = os.system('%s "%s" "%s" "%s"' % (cmd, a, b, c))
1703 r = os.system('%s "%s" "%s" "%s"' % (cmd, a, b, c))
1704 if r:
1704 if r:
1705 self.ui.warn(_("merging %s failed!\n") % fn)
1705 self.ui.warn(_("merging %s failed!\n") % fn)
1706
1706
1707 os.unlink(b)
1707 os.unlink(b)
1708 os.unlink(c)
1708 os.unlink(c)
1709 return r
1709 return r
1710
1710
1711 def verify(self):
1711 def verify(self):
1712 filelinkrevs = {}
1712 filelinkrevs = {}
1713 filenodes = {}
1713 filenodes = {}
1714 changesets = revisions = files = 0
1714 changesets = revisions = files = 0
1715 errors = [0]
1715 errors = [0]
1716 neededmanifests = {}
1716 neededmanifests = {}
1717
1717
1718 def err(msg):
1718 def err(msg):
1719 self.ui.warn(msg + "\n")
1719 self.ui.warn(msg + "\n")
1720 errors[0] += 1
1720 errors[0] += 1
1721
1721
1722 def checksize(obj, name):
1722 def checksize(obj, name):
1723 d = obj.checksize()
1723 d = obj.checksize()
1724 if d[0]:
1724 if d[0]:
1725 err(_("%s data length off by %d bytes") % (name, d[0]))
1725 err(_("%s data length off by %d bytes") % (name, d[0]))
1726 if d[1]:
1726 if d[1]:
1727 err(_("%s index contains %d extra bytes") % (name, d[1]))
1727 err(_("%s index contains %d extra bytes") % (name, d[1]))
1728
1728
1729 seen = {}
1729 seen = {}
1730 self.ui.status(_("checking changesets\n"))
1730 self.ui.status(_("checking changesets\n"))
1731 checksize(self.changelog, "changelog")
1731 checksize(self.changelog, "changelog")
1732
1732
1733 for i in range(self.changelog.count()):
1733 for i in range(self.changelog.count()):
1734 changesets += 1
1734 changesets += 1
1735 n = self.changelog.node(i)
1735 n = self.changelog.node(i)
1736 l = self.changelog.linkrev(n)
1736 l = self.changelog.linkrev(n)
1737 if l != i:
1737 if l != i:
1738 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1738 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1739 if n in seen:
1739 if n in seen:
1740 err(_("duplicate changeset at revision %d") % i)
1740 err(_("duplicate changeset at revision %d") % i)
1741 seen[n] = 1
1741 seen[n] = 1
1742
1742
1743 for p in self.changelog.parents(n):
1743 for p in self.changelog.parents(n):
1744 if p not in self.changelog.nodemap:
1744 if p not in self.changelog.nodemap:
1745 err(_("changeset %s has unknown parent %s") %
1745 err(_("changeset %s has unknown parent %s") %
1746 (short(n), short(p)))
1746 (short(n), short(p)))
1747 try:
1747 try:
1748 changes = self.changelog.read(n)
1748 changes = self.changelog.read(n)
1749 except KeyboardInterrupt:
1749 except KeyboardInterrupt:
1750 self.ui.warn(_("interrupted"))
1750 self.ui.warn(_("interrupted"))
1751 raise
1751 raise
1752 except Exception, inst:
1752 except Exception, inst:
1753 err(_("unpacking changeset %s: %s") % (short(n), inst))
1753 err(_("unpacking changeset %s: %s") % (short(n), inst))
1754 continue
1754 continue
1755
1755
1756 neededmanifests[changes[0]] = n
1756 neededmanifests[changes[0]] = n
1757
1757
1758 for f in changes[3]:
1758 for f in changes[3]:
1759 filelinkrevs.setdefault(f, []).append(i)
1759 filelinkrevs.setdefault(f, []).append(i)
1760
1760
1761 seen = {}
1761 seen = {}
1762 self.ui.status(_("checking manifests\n"))
1762 self.ui.status(_("checking manifests\n"))
1763 checksize(self.manifest, "manifest")
1763 checksize(self.manifest, "manifest")
1764
1764
1765 for i in range(self.manifest.count()):
1765 for i in range(self.manifest.count()):
1766 n = self.manifest.node(i)
1766 n = self.manifest.node(i)
1767 l = self.manifest.linkrev(n)
1767 l = self.manifest.linkrev(n)
1768
1768
1769 if l < 0 or l >= self.changelog.count():
1769 if l < 0 or l >= self.changelog.count():
1770 err(_("bad manifest link (%d) at revision %d") % (l, i))
1770 err(_("bad manifest link (%d) at revision %d") % (l, i))
1771
1771
1772 if n in neededmanifests:
1772 if n in neededmanifests:
1773 del neededmanifests[n]
1773 del neededmanifests[n]
1774
1774
1775 if n in seen:
1775 if n in seen:
1776 err(_("duplicate manifest at revision %d") % i)
1776 err(_("duplicate manifest at revision %d") % i)
1777
1777
1778 seen[n] = 1
1778 seen[n] = 1
1779
1779
1780 for p in self.manifest.parents(n):
1780 for p in self.manifest.parents(n):
1781 if p not in self.manifest.nodemap:
1781 if p not in self.manifest.nodemap:
1782 err(_("manifest %s has unknown parent %s") %
1782 err(_("manifest %s has unknown parent %s") %
1783 (short(n), short(p)))
1783 (short(n), short(p)))
1784
1784
1785 try:
1785 try:
1786 delta = mdiff.patchtext(self.manifest.delta(n))
1786 delta = mdiff.patchtext(self.manifest.delta(n))
1787 except KeyboardInterrupt:
1787 except KeyboardInterrupt:
1788 self.ui.warn(_("interrupted"))
1788 self.ui.warn(_("interrupted"))
1789 raise
1789 raise
1790 except Exception, inst:
1790 except Exception, inst:
1791 err(_("unpacking manifest %s: %s") % (short(n), inst))
1791 err(_("unpacking manifest %s: %s") % (short(n), inst))
1792 continue
1792 continue
1793
1793
1794 try:
1794 try:
1795 ff = [ l.split('\0') for l in delta.splitlines() ]
1795 ff = [ l.split('\0') for l in delta.splitlines() ]
1796 for f, fn in ff:
1796 for f, fn in ff:
1797 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1797 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1798 except (ValueError, TypeError), inst:
1798 except (ValueError, TypeError), inst:
1799 err(_("broken delta in manifest %s: %s") % (short(n), inst))
1799 err(_("broken delta in manifest %s: %s") % (short(n), inst))
1800
1800
1801 self.ui.status(_("crosschecking files in changesets and manifests\n"))
1801 self.ui.status(_("crosschecking files in changesets and manifests\n"))
1802
1802
1803 for m, c in neededmanifests.items():
1803 for m, c in neededmanifests.items():
1804 err(_("Changeset %s refers to unknown manifest %s") %
1804 err(_("Changeset %s refers to unknown manifest %s") %
1805 (short(m), short(c)))
1805 (short(m), short(c)))
1806 del neededmanifests
1806 del neededmanifests
1807
1807
1808 for f in filenodes:
1808 for f in filenodes:
1809 if f not in filelinkrevs:
1809 if f not in filelinkrevs:
1810 err(_("file %s in manifest but not in changesets") % f)
1810 err(_("file %s in manifest but not in changesets") % f)
1811
1811
1812 for f in filelinkrevs:
1812 for f in filelinkrevs:
1813 if f not in filenodes:
1813 if f not in filenodes:
1814 err(_("file %s in changeset but not in manifest") % f)
1814 err(_("file %s in changeset but not in manifest") % f)
1815
1815
1816 self.ui.status(_("checking files\n"))
1816 self.ui.status(_("checking files\n"))
1817 ff = filenodes.keys()
1817 ff = filenodes.keys()
1818 ff.sort()
1818 ff.sort()
1819 for f in ff:
1819 for f in ff:
1820 if f == "/dev/null":
1820 if f == "/dev/null":
1821 continue
1821 continue
1822 files += 1
1822 files += 1
1823 if not f:
1823 if not f:
1824 err(_("file without name in manifest %s") % short(n))
1824 err(_("file without name in manifest %s") % short(n))
1825 continue
1825 continue
1826 fl = self.file(f)
1826 fl = self.file(f)
1827 checksize(fl, f)
1827 checksize(fl, f)
1828
1828
1829 nodes = {nullid: 1}
1829 nodes = {nullid: 1}
1830 seen = {}
1830 seen = {}
1831 for i in range(fl.count()):
1831 for i in range(fl.count()):
1832 revisions += 1
1832 revisions += 1
1833 n = fl.node(i)
1833 n = fl.node(i)
1834
1834
1835 if n in seen:
1835 if n in seen:
1836 err(_("%s: duplicate revision %d") % (f, i))
1836 err(_("%s: duplicate revision %d") % (f, i))
1837 if n not in filenodes[f]:
1837 if n not in filenodes[f]:
1838 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
1838 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
1839 else:
1839 else:
1840 del filenodes[f][n]
1840 del filenodes[f][n]
1841
1841
1842 flr = fl.linkrev(n)
1842 flr = fl.linkrev(n)
1843 if flr not in filelinkrevs.get(f, []):
1843 if flr not in filelinkrevs.get(f, []):
1844 err(_("%s:%s points to unexpected changeset %d")
1844 err(_("%s:%s points to unexpected changeset %d")
1845 % (f, short(n), flr))
1845 % (f, short(n), flr))
1846 else:
1846 else:
1847 filelinkrevs[f].remove(flr)
1847 filelinkrevs[f].remove(flr)
1848
1848
1849 # verify contents
1849 # verify contents
1850 try:
1850 try:
1851 t = fl.read(n)
1851 t = fl.read(n)
1852 except KeyboardInterrupt:
1852 except KeyboardInterrupt:
1853 self.ui.warn(_("interrupted"))
1853 self.ui.warn(_("interrupted"))
1854 raise
1854 raise
1855 except Exception, inst:
1855 except Exception, inst:
1856 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
1856 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
1857
1857
1858 # verify parents
1858 # verify parents
1859 (p1, p2) = fl.parents(n)
1859 (p1, p2) = fl.parents(n)
1860 if p1 not in nodes:
1860 if p1 not in nodes:
1861 err(_("file %s:%s unknown parent 1 %s") %
1861 err(_("file %s:%s unknown parent 1 %s") %
1862 (f, short(n), short(p1)))
1862 (f, short(n), short(p1)))
1863 if p2 not in nodes:
1863 if p2 not in nodes:
1864 err(_("file %s:%s unknown parent 2 %s") %
1864 err(_("file %s:%s unknown parent 2 %s") %
1865 (f, short(n), short(p1)))
1865 (f, short(n), short(p1)))
1866 nodes[n] = 1
1866 nodes[n] = 1
1867
1867
1868 # cross-check
1868 # cross-check
1869 for node in filenodes[f]:
1869 for node in filenodes[f]:
1870 err(_("node %s in manifests not in %s") % (hex(node), f))
1870 err(_("node %s in manifests not in %s") % (hex(node), f))
1871
1871
1872 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
1872 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
1873 (files, changesets, revisions))
1873 (files, changesets, revisions))
1874
1874
1875 if errors[0]:
1875 if errors[0]:
1876 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
1876 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
1877 return 1
1877 return 1
1878
1878
1879 # used to avoid circular references so destructors work
1879 # used to avoid circular references so destructors work
1880 def aftertrans(base):
1880 def aftertrans(base):
1881 p = base
1881 p = base
1882 def a():
1882 def a():
1883 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1883 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1884 util.rename(os.path.join(p, "journal.dirstate"),
1884 util.rename(os.path.join(p, "journal.dirstate"),
1885 os.path.join(p, "undo.dirstate"))
1885 os.path.join(p, "undo.dirstate"))
1886 return a
1886 return a
1887
1887
@@ -1,219 +1,221
1 # ui.py - user interface bits for mercurial
1 # ui.py - user interface bits for mercurial
2 #
2 #
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 import ConfigParser
8 import ConfigParser
9 from i18n import gettext as _
9 from i18n import gettext as _
10 from demandload import *
10 from demandload import *
11 demandload(globals(), "os re socket sys util")
11 demandload(globals(), "os re socket sys util")
12
12
13 class ui(object):
13 class ui(object):
14 def __init__(self, verbose=False, debug=False, quiet=False,
14 def __init__(self, verbose=False, debug=False, quiet=False,
15 interactive=True, parentui=None):
15 interactive=True, parentui=None):
16 self.overlay = {}
16 self.overlay = {}
17 if parentui is None:
17 if parentui is None:
18 # this is the parent of all ui children
18 # this is the parent of all ui children
19 self.parentui = None
19 self.parentui = None
20 self.cdata = ConfigParser.SafeConfigParser()
20 self.cdata = ConfigParser.SafeConfigParser()
21 self.readconfig(util.rcpath)
21 self.readconfig(util.rcpath)
22
22
23 self.quiet = self.configbool("ui", "quiet")
23 self.quiet = self.configbool("ui", "quiet")
24 self.verbose = self.configbool("ui", "verbose")
24 self.verbose = self.configbool("ui", "verbose")
25 self.debugflag = self.configbool("ui", "debug")
25 self.debugflag = self.configbool("ui", "debug")
26 self.interactive = self.configbool("ui", "interactive", True)
26 self.interactive = self.configbool("ui", "interactive", True)
27
27
28 self.updateopts(verbose, debug, quiet, interactive)
28 self.updateopts(verbose, debug, quiet, interactive)
29 self.diffcache = None
29 self.diffcache = None
30 else:
30 else:
31 # parentui may point to an ui object which is already a child
31 # parentui may point to an ui object which is already a child
32 self.parentui = parentui.parentui or parentui
32 self.parentui = parentui.parentui or parentui
33 parent_cdata = self.parentui.cdata
33 parent_cdata = self.parentui.cdata
34 self.cdata = ConfigParser.SafeConfigParser(parent_cdata.defaults())
34 self.cdata = ConfigParser.SafeConfigParser(parent_cdata.defaults())
35 # make interpolation work
35 # make interpolation work
36 for section in parent_cdata.sections():
36 for section in parent_cdata.sections():
37 self.cdata.add_section(section)
37 self.cdata.add_section(section)
38 for name, value in parent_cdata.items(section, raw=True):
38 for name, value in parent_cdata.items(section, raw=True):
39 self.cdata.set(section, name, value)
39 self.cdata.set(section, name, value)
40
40
41 def __getattr__(self, key):
41 def __getattr__(self, key):
42 return getattr(self.parentui, key)
42 return getattr(self.parentui, key)
43
43
44 def updateopts(self, verbose=False, debug=False, quiet=False,
44 def updateopts(self, verbose=False, debug=False, quiet=False,
45 interactive=True):
45 interactive=True):
46 self.quiet = (self.quiet or quiet) and not verbose and not debug
46 self.quiet = (self.quiet or quiet) and not verbose and not debug
47 self.verbose = (self.verbose or verbose) or debug
47 self.verbose = (self.verbose or verbose) or debug
48 self.debugflag = (self.debugflag or debug)
48 self.debugflag = (self.debugflag or debug)
49 self.interactive = (self.interactive and interactive)
49 self.interactive = (self.interactive and interactive)
50
50
51 def readconfig(self, fn):
51 def readconfig(self, fn):
52 if isinstance(fn, basestring):
52 if isinstance(fn, basestring):
53 fn = [fn]
53 fn = [fn]
54 for f in fn:
54 for f in fn:
55 try:
55 try:
56 self.cdata.read(f)
56 self.cdata.read(f)
57 except ConfigParser.ParsingError, inst:
57 except ConfigParser.ParsingError, inst:
58 raise util.Abort(_("Failed to parse %s\n%s") % (f, inst))
58 raise util.Abort(_("Failed to parse %s\n%s") % (f, inst))
59
59
60 def setconfig(self, section, name, val):
60 def setconfig(self, section, name, val):
61 self.overlay[(section, name)] = val
61 self.overlay[(section, name)] = val
62
62
63 def config(self, section, name, default=None):
63 def config(self, section, name, default=None):
64 if self.overlay.has_key((section, name)):
64 if self.overlay.has_key((section, name)):
65 return self.overlay[(section, name)]
65 return self.overlay[(section, name)]
66 if self.cdata.has_option(section, name):
66 if self.cdata.has_option(section, name):
67 try:
67 try:
68 return self.cdata.get(section, name)
68 return self.cdata.get(section, name)
69 except ConfigParser.InterpolationError, inst:
69 except ConfigParser.InterpolationError, inst:
70 raise util.Abort(_("Error in configuration:\n%s") % inst)
70 raise util.Abort(_("Error in configuration:\n%s") % inst)
71 if self.parentui is None:
71 if self.parentui is None:
72 return default
72 return default
73 else:
73 else:
74 return self.parentui.config(section, name, default)
74 return self.parentui.config(section, name, default)
75
75
76 def configbool(self, section, name, default=False):
76 def configbool(self, section, name, default=False):
77 if self.overlay.has_key((section, name)):
77 if self.overlay.has_key((section, name)):
78 return self.overlay[(section, name)]
78 return self.overlay[(section, name)]
79 if self.cdata.has_option(section, name):
79 if self.cdata.has_option(section, name):
80 try:
80 try:
81 return self.cdata.getboolean(section, name)
81 return self.cdata.getboolean(section, name)
82 except ConfigParser.InterpolationError, inst:
82 except ConfigParser.InterpolationError, inst:
83 raise util.Abort(_("Error in configuration:\n%s") % inst)
83 raise util.Abort(_("Error in configuration:\n%s") % inst)
84 if self.parentui is None:
84 if self.parentui is None:
85 return default
85 return default
86 else:
86 else:
87 return self.parentui.configbool(section, name, default)
87 return self.parentui.configbool(section, name, default)
88
88
89 def configitems(self, section):
89 def configitems(self, section):
90 items = {}
90 items = {}
91 if self.parentui is not None:
91 if self.parentui is not None:
92 items = dict(self.parentui.configitems(section))
92 items = dict(self.parentui.configitems(section))
93 if self.cdata.has_section(section):
93 if self.cdata.has_section(section):
94 try:
94 try:
95 items.update(dict(self.cdata.items(section)))
95 items.update(dict(self.cdata.items(section)))
96 except ConfigParser.InterpolationError, inst:
96 except ConfigParser.InterpolationError, inst:
97 raise util.Abort(_("Error in configuration:\n%s") % inst)
97 raise util.Abort(_("Error in configuration:\n%s") % inst)
98 x = items.items()
98 x = items.items()
99 x.sort()
99 x.sort()
100 return x
100 return x
101
101
102 def walkconfig(self, seen=None):
102 def walkconfig(self, seen=None):
103 if seen is None:
103 if seen is None:
104 seen = {}
104 seen = {}
105 for (section, name), value in self.overlay.iteritems():
105 for (section, name), value in self.overlay.iteritems():
106 yield section, name, value
106 yield section, name, value
107 seen[section, name] = 1
107 seen[section, name] = 1
108 for section in self.cdata.sections():
108 for section in self.cdata.sections():
109 for name, value in self.cdata.items(section):
109 for name, value in self.cdata.items(section):
110 if (section, name) in seen: continue
110 if (section, name) in seen: continue
111 yield section, name, value.replace('\n', '\\n')
111 yield section, name, value.replace('\n', '\\n')
112 seen[section, name] = 1
112 seen[section, name] = 1
113 if self.parentui is not None:
113 if self.parentui is not None:
114 for parent in self.parentui.walkconfig(seen):
114 for parent in self.parentui.walkconfig(seen):
115 yield parent
115 yield parent
116
116
117 def extensions(self):
117 def extensions(self):
118 return self.configitems("extensions")
118 return self.configitems("extensions")
119
119
120 def diffopts(self):
120 def diffopts(self):
121 if self.diffcache:
121 if self.diffcache:
122 return self.diffcache
122 return self.diffcache
123 ret = { 'showfunc' : True, 'ignorews' : False}
123 ret = { 'showfunc' : True, 'ignorews' : False}
124 for x in self.configitems("diff"):
124 for x in self.configitems("diff"):
125 k = x[0].lower()
125 k = x[0].lower()
126 v = x[1]
126 v = x[1]
127 if v:
127 if v:
128 v = v.lower()
128 v = v.lower()
129 if v == 'true':
129 if v == 'true':
130 value = True
130 value = True
131 else:
131 else:
132 value = False
132 value = False
133 ret[k] = value
133 ret[k] = value
134 self.diffcache = ret
134 self.diffcache = ret
135 return ret
135 return ret
136
136
137 def username(self):
137 def username(self):
138 return (os.environ.get("HGUSER") or
138 return (os.environ.get("HGUSER") or
139 self.config("ui", "username") or
139 self.config("ui", "username") or
140 os.environ.get("EMAIL") or
140 os.environ.get("EMAIL") or
141 (os.environ.get("LOGNAME",
141 (os.environ.get("LOGNAME",
142 os.environ.get("USERNAME", "unknown"))
142 os.environ.get("USERNAME", "unknown"))
143 + '@' + socket.getfqdn()))
143 + '@' + socket.getfqdn()))
144
144
145 def shortuser(self, user):
145 def shortuser(self, user):
146 """Return a short representation of a user name or email address."""
146 """Return a short representation of a user name or email address."""
147 if not self.verbose:
147 if not self.verbose:
148 f = user.find('@')
148 f = user.find('@')
149 if f >= 0:
149 if f >= 0:
150 user = user[:f]
150 user = user[:f]
151 f = user.find('<')
151 f = user.find('<')
152 if f >= 0:
152 if f >= 0:
153 user = user[f+1:]
153 user = user[f+1:]
154 return user
154 return user
155
155
156 def expandpath(self, loc, root=""):
156 def expandpath(self, loc, root=""):
157 paths = {}
157 paths = {}
158 for name, path in self.configitems("paths"):
158 for name, path in self.configitems("paths"):
159 m = path.find("://")
159 m = path.find("://")
160 if m == -1:
160 if m == -1:
161 path = os.path.join(root, path)
161 path = os.path.join(root, path)
162 paths[name] = path
162 paths[name] = path
163
163
164 return paths.get(loc, loc)
164 return paths.get(loc, loc)
165
165
166 def write(self, *args):
166 def write(self, *args):
167 for a in args:
167 for a in args:
168 sys.stdout.write(str(a))
168 sys.stdout.write(str(a))
169
169
170 def write_err(self, *args):
170 def write_err(self, *args):
171 if not sys.stdout.closed: sys.stdout.flush()
171 if not sys.stdout.closed: sys.stdout.flush()
172 for a in args:
172 for a in args:
173 sys.stderr.write(str(a))
173 sys.stderr.write(str(a))
174
174
175 def flush(self):
175 def flush(self):
176 try:
176 try:
177 sys.stdout.flush()
177 sys.stdout.flush()
178 finally:
178 finally:
179 sys.stderr.flush()
179 sys.stderr.flush()
180
180
181 def readline(self):
181 def readline(self):
182 return sys.stdin.readline()[:-1]
182 return sys.stdin.readline()[:-1]
183 def prompt(self, msg, pat, default="y"):
183 def prompt(self, msg, pat, default="y"):
184 if not self.interactive: return default
184 if not self.interactive: return default
185 while 1:
185 while 1:
186 self.write(msg, " ")
186 self.write(msg, " ")
187 r = self.readline()
187 r = self.readline()
188 if re.match(pat, r):
188 if re.match(pat, r):
189 return r
189 return r
190 else:
190 else:
191 self.write(_("unrecognized response\n"))
191 self.write(_("unrecognized response\n"))
192 def status(self, *msg):
192 def status(self, *msg):
193 if not self.quiet: self.write(*msg)
193 if not self.quiet: self.write(*msg)
194 def warn(self, *msg):
194 def warn(self, *msg):
195 self.write_err(*msg)
195 self.write_err(*msg)
196 def note(self, *msg):
196 def note(self, *msg):
197 if self.verbose: self.write(*msg)
197 if self.verbose: self.write(*msg)
198 def debug(self, *msg):
198 def debug(self, *msg):
199 if self.debugflag: self.write(*msg)
199 if self.debugflag: self.write(*msg)
200 def edit(self, text):
200 def edit(self, text):
201 import tempfile
201 import tempfile
202 (fd, name) = tempfile.mkstemp("hg")
202 (fd, name) = tempfile.mkstemp("hg")
203 f = os.fdopen(fd, "w")
203 f = os.fdopen(fd, "w")
204 f.write(text)
204 f.write(text)
205 f.close()
205 f.close()
206
206
207 editor = (os.environ.get("HGEDITOR") or
207 editor = (os.environ.get("HGEDITOR") or
208 self.config("ui", "editor") or
208 self.config("ui", "editor") or
209 os.environ.get("EDITOR", "vi"))
209 os.environ.get("EDITOR", "vi"))
210
210
211 os.environ["HGUSER"] = self.username()
211 os.environ["HGUSER"] = self.username()
212 util.system("%s \"%s\"" % (editor, name), errprefix=_("edit failed"))
212 util.system("%s \"%s\"" % (editor, name),
213 environ={'HGUSER': self.username()},
214 onerr=util.Abort, errprefix=_("edit failed"))
213
215
214 t = open(name).read()
216 t = open(name).read()
215 t = re.sub("(?m)^HG:.*\n", "", t)
217 t = re.sub("(?m)^HG:.*\n", "", t)
216
218
217 os.unlink(name)
219 os.unlink(name)
218
220
219 return t
221 return t
@@ -1,756 +1,760
1 """
1 """
2 util.py - Mercurial utility functions and platform specfic implementations
2 util.py - Mercurial utility functions and platform specfic implementations
3
3
4 Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 Copyright 2005 K. Thananchayan <thananck@yahoo.com>
5
5
6 This software may be used and distributed according to the terms
6 This software may be used and distributed according to the terms
7 of the GNU General Public License, incorporated herein by reference.
7 of the GNU General Public License, incorporated herein by reference.
8
8
9 This contains helper routines that are independent of the SCM core and hide
9 This contains helper routines that are independent of the SCM core and hide
10 platform-specific details from the core.
10 platform-specific details from the core.
11 """
11 """
12
12
13 import os, errno
13 import os, errno
14 from i18n import gettext as _
14 from i18n import gettext as _
15 from demandload import *
15 from demandload import *
16 demandload(globals(), "cStringIO errno popen2 re shutil sys tempfile")
16 demandload(globals(), "cStringIO errno popen2 re shutil sys tempfile")
17 demandload(globals(), "threading time")
17 demandload(globals(), "threading time")
18
18
19 def pipefilter(s, cmd):
19 def pipefilter(s, cmd):
20 '''filter string S through command CMD, returning its output'''
20 '''filter string S through command CMD, returning its output'''
21 (pout, pin) = popen2.popen2(cmd, -1, 'b')
21 (pout, pin) = popen2.popen2(cmd, -1, 'b')
22 def writer():
22 def writer():
23 pin.write(s)
23 pin.write(s)
24 pin.close()
24 pin.close()
25
25
26 # we should use select instead on UNIX, but this will work on most
26 # we should use select instead on UNIX, but this will work on most
27 # systems, including Windows
27 # systems, including Windows
28 w = threading.Thread(target=writer)
28 w = threading.Thread(target=writer)
29 w.start()
29 w.start()
30 f = pout.read()
30 f = pout.read()
31 pout.close()
31 pout.close()
32 w.join()
32 w.join()
33 return f
33 return f
34
34
35 def tempfilter(s, cmd):
35 def tempfilter(s, cmd):
36 '''filter string S through a pair of temporary files with CMD.
36 '''filter string S through a pair of temporary files with CMD.
37 CMD is used as a template to create the real command to be run,
37 CMD is used as a template to create the real command to be run,
38 with the strings INFILE and OUTFILE replaced by the real names of
38 with the strings INFILE and OUTFILE replaced by the real names of
39 the temporary files generated.'''
39 the temporary files generated.'''
40 inname, outname = None, None
40 inname, outname = None, None
41 try:
41 try:
42 infd, inname = tempfile.mkstemp(prefix='hgfin')
42 infd, inname = tempfile.mkstemp(prefix='hgfin')
43 fp = os.fdopen(infd, 'wb')
43 fp = os.fdopen(infd, 'wb')
44 fp.write(s)
44 fp.write(s)
45 fp.close()
45 fp.close()
46 outfd, outname = tempfile.mkstemp(prefix='hgfout')
46 outfd, outname = tempfile.mkstemp(prefix='hgfout')
47 os.close(outfd)
47 os.close(outfd)
48 cmd = cmd.replace('INFILE', inname)
48 cmd = cmd.replace('INFILE', inname)
49 cmd = cmd.replace('OUTFILE', outname)
49 cmd = cmd.replace('OUTFILE', outname)
50 code = os.system(cmd)
50 code = os.system(cmd)
51 if code: raise Abort(_("command '%s' failed: %s") %
51 if code: raise Abort(_("command '%s' failed: %s") %
52 (cmd, explain_exit(code)))
52 (cmd, explain_exit(code)))
53 return open(outname, 'rb').read()
53 return open(outname, 'rb').read()
54 finally:
54 finally:
55 try:
55 try:
56 if inname: os.unlink(inname)
56 if inname: os.unlink(inname)
57 except: pass
57 except: pass
58 try:
58 try:
59 if outname: os.unlink(outname)
59 if outname: os.unlink(outname)
60 except: pass
60 except: pass
61
61
62 filtertable = {
62 filtertable = {
63 'tempfile:': tempfilter,
63 'tempfile:': tempfilter,
64 'pipe:': pipefilter,
64 'pipe:': pipefilter,
65 }
65 }
66
66
67 def filter(s, cmd):
67 def filter(s, cmd):
68 "filter a string through a command that transforms its input to its output"
68 "filter a string through a command that transforms its input to its output"
69 for name, fn in filtertable.iteritems():
69 for name, fn in filtertable.iteritems():
70 if cmd.startswith(name):
70 if cmd.startswith(name):
71 return fn(s, cmd[len(name):].lstrip())
71 return fn(s, cmd[len(name):].lstrip())
72 return pipefilter(s, cmd)
72 return pipefilter(s, cmd)
73
73
74 def patch(strip, patchname, ui):
74 def patch(strip, patchname, ui):
75 """apply the patch <patchname> to the working directory.
75 """apply the patch <patchname> to the working directory.
76 a list of patched files is returned"""
76 a list of patched files is returned"""
77 fp = os.popen('patch -p%d < "%s"' % (strip, patchname))
77 fp = os.popen('patch -p%d < "%s"' % (strip, patchname))
78 files = {}
78 files = {}
79 for line in fp:
79 for line in fp:
80 line = line.rstrip()
80 line = line.rstrip()
81 ui.status("%s\n" % line)
81 ui.status("%s\n" % line)
82 if line.startswith('patching file '):
82 if line.startswith('patching file '):
83 pf = parse_patch_output(line)
83 pf = parse_patch_output(line)
84 files.setdefault(pf, 1)
84 files.setdefault(pf, 1)
85 code = fp.close()
85 code = fp.close()
86 if code:
86 if code:
87 raise Abort(_("patch command failed: %s") % explain_exit(code)[0])
87 raise Abort(_("patch command failed: %s") % explain_exit(code)[0])
88 return files.keys()
88 return files.keys()
89
89
90 def binary(s):
90 def binary(s):
91 """return true if a string is binary data using diff's heuristic"""
91 """return true if a string is binary data using diff's heuristic"""
92 if s and '\0' in s[:4096]:
92 if s and '\0' in s[:4096]:
93 return True
93 return True
94 return False
94 return False
95
95
96 def unique(g):
96 def unique(g):
97 """return the uniq elements of iterable g"""
97 """return the uniq elements of iterable g"""
98 seen = {}
98 seen = {}
99 for f in g:
99 for f in g:
100 if f not in seen:
100 if f not in seen:
101 seen[f] = 1
101 seen[f] = 1
102 yield f
102 yield f
103
103
104 class Abort(Exception):
104 class Abort(Exception):
105 """Raised if a command needs to print an error and exit."""
105 """Raised if a command needs to print an error and exit."""
106
106
107 def always(fn): return True
107 def always(fn): return True
108 def never(fn): return False
108 def never(fn): return False
109
109
110 def patkind(name, dflt_pat='glob'):
110 def patkind(name, dflt_pat='glob'):
111 """Split a string into an optional pattern kind prefix and the
111 """Split a string into an optional pattern kind prefix and the
112 actual pattern."""
112 actual pattern."""
113 for prefix in 're', 'glob', 'path', 'relglob', 'relpath', 'relre':
113 for prefix in 're', 'glob', 'path', 'relglob', 'relpath', 'relre':
114 if name.startswith(prefix + ':'): return name.split(':', 1)
114 if name.startswith(prefix + ':'): return name.split(':', 1)
115 return dflt_pat, name
115 return dflt_pat, name
116
116
117 def globre(pat, head='^', tail='$'):
117 def globre(pat, head='^', tail='$'):
118 "convert a glob pattern into a regexp"
118 "convert a glob pattern into a regexp"
119 i, n = 0, len(pat)
119 i, n = 0, len(pat)
120 res = ''
120 res = ''
121 group = False
121 group = False
122 def peek(): return i < n and pat[i]
122 def peek(): return i < n and pat[i]
123 while i < n:
123 while i < n:
124 c = pat[i]
124 c = pat[i]
125 i = i+1
125 i = i+1
126 if c == '*':
126 if c == '*':
127 if peek() == '*':
127 if peek() == '*':
128 i += 1
128 i += 1
129 res += '.*'
129 res += '.*'
130 else:
130 else:
131 res += '[^/]*'
131 res += '[^/]*'
132 elif c == '?':
132 elif c == '?':
133 res += '.'
133 res += '.'
134 elif c == '[':
134 elif c == '[':
135 j = i
135 j = i
136 if j < n and pat[j] in '!]':
136 if j < n and pat[j] in '!]':
137 j += 1
137 j += 1
138 while j < n and pat[j] != ']':
138 while j < n and pat[j] != ']':
139 j += 1
139 j += 1
140 if j >= n:
140 if j >= n:
141 res += '\\['
141 res += '\\['
142 else:
142 else:
143 stuff = pat[i:j].replace('\\','\\\\')
143 stuff = pat[i:j].replace('\\','\\\\')
144 i = j + 1
144 i = j + 1
145 if stuff[0] == '!':
145 if stuff[0] == '!':
146 stuff = '^' + stuff[1:]
146 stuff = '^' + stuff[1:]
147 elif stuff[0] == '^':
147 elif stuff[0] == '^':
148 stuff = '\\' + stuff
148 stuff = '\\' + stuff
149 res = '%s[%s]' % (res, stuff)
149 res = '%s[%s]' % (res, stuff)
150 elif c == '{':
150 elif c == '{':
151 group = True
151 group = True
152 res += '(?:'
152 res += '(?:'
153 elif c == '}' and group:
153 elif c == '}' and group:
154 res += ')'
154 res += ')'
155 group = False
155 group = False
156 elif c == ',' and group:
156 elif c == ',' and group:
157 res += '|'
157 res += '|'
158 else:
158 else:
159 res += re.escape(c)
159 res += re.escape(c)
160 return head + res + tail
160 return head + res + tail
161
161
162 _globchars = {'[': 1, '{': 1, '*': 1, '?': 1}
162 _globchars = {'[': 1, '{': 1, '*': 1, '?': 1}
163
163
164 def pathto(n1, n2):
164 def pathto(n1, n2):
165 '''return the relative path from one place to another.
165 '''return the relative path from one place to another.
166 this returns a path in the form used by the local filesystem, not hg.'''
166 this returns a path in the form used by the local filesystem, not hg.'''
167 if not n1: return localpath(n2)
167 if not n1: return localpath(n2)
168 a, b = n1.split('/'), n2.split('/')
168 a, b = n1.split('/'), n2.split('/')
169 a.reverse()
169 a.reverse()
170 b.reverse()
170 b.reverse()
171 while a and b and a[-1] == b[-1]:
171 while a and b and a[-1] == b[-1]:
172 a.pop()
172 a.pop()
173 b.pop()
173 b.pop()
174 b.reverse()
174 b.reverse()
175 return os.sep.join((['..'] * len(a)) + b)
175 return os.sep.join((['..'] * len(a)) + b)
176
176
177 def canonpath(root, cwd, myname):
177 def canonpath(root, cwd, myname):
178 """return the canonical path of myname, given cwd and root"""
178 """return the canonical path of myname, given cwd and root"""
179 if root == os.sep:
179 if root == os.sep:
180 rootsep = os.sep
180 rootsep = os.sep
181 else:
181 else:
182 rootsep = root + os.sep
182 rootsep = root + os.sep
183 name = myname
183 name = myname
184 if not name.startswith(os.sep):
184 if not name.startswith(os.sep):
185 name = os.path.join(root, cwd, name)
185 name = os.path.join(root, cwd, name)
186 name = os.path.normpath(name)
186 name = os.path.normpath(name)
187 if name.startswith(rootsep):
187 if name.startswith(rootsep):
188 return pconvert(name[len(rootsep):])
188 return pconvert(name[len(rootsep):])
189 elif name == root:
189 elif name == root:
190 return ''
190 return ''
191 else:
191 else:
192 raise Abort('%s not under root' % myname)
192 raise Abort('%s not under root' % myname)
193
193
194 def matcher(canonroot, cwd='', names=['.'], inc=[], exc=[], head='', src=None):
194 def matcher(canonroot, cwd='', names=['.'], inc=[], exc=[], head='', src=None):
195 return _matcher(canonroot, cwd, names, inc, exc, head, 'glob', src)
195 return _matcher(canonroot, cwd, names, inc, exc, head, 'glob', src)
196
196
197 def cmdmatcher(canonroot, cwd='', names=['.'], inc=[], exc=[], head='', src=None):
197 def cmdmatcher(canonroot, cwd='', names=['.'], inc=[], exc=[], head='', src=None):
198 if os.name == 'nt':
198 if os.name == 'nt':
199 dflt_pat = 'glob'
199 dflt_pat = 'glob'
200 else:
200 else:
201 dflt_pat = 'relpath'
201 dflt_pat = 'relpath'
202 return _matcher(canonroot, cwd, names, inc, exc, head, dflt_pat, src)
202 return _matcher(canonroot, cwd, names, inc, exc, head, dflt_pat, src)
203
203
204 def _matcher(canonroot, cwd, names, inc, exc, head, dflt_pat, src):
204 def _matcher(canonroot, cwd, names, inc, exc, head, dflt_pat, src):
205 """build a function to match a set of file patterns
205 """build a function to match a set of file patterns
206
206
207 arguments:
207 arguments:
208 canonroot - the canonical root of the tree you're matching against
208 canonroot - the canonical root of the tree you're matching against
209 cwd - the current working directory, if relevant
209 cwd - the current working directory, if relevant
210 names - patterns to find
210 names - patterns to find
211 inc - patterns to include
211 inc - patterns to include
212 exc - patterns to exclude
212 exc - patterns to exclude
213 head - a regex to prepend to patterns to control whether a match is rooted
213 head - a regex to prepend to patterns to control whether a match is rooted
214
214
215 a pattern is one of:
215 a pattern is one of:
216 'glob:<rooted glob>'
216 'glob:<rooted glob>'
217 're:<rooted regexp>'
217 're:<rooted regexp>'
218 'path:<rooted path>'
218 'path:<rooted path>'
219 'relglob:<relative glob>'
219 'relglob:<relative glob>'
220 'relpath:<relative path>'
220 'relpath:<relative path>'
221 'relre:<relative regexp>'
221 'relre:<relative regexp>'
222 '<rooted path or regexp>'
222 '<rooted path or regexp>'
223
223
224 returns:
224 returns:
225 a 3-tuple containing
225 a 3-tuple containing
226 - list of explicit non-pattern names passed in
226 - list of explicit non-pattern names passed in
227 - a bool match(filename) function
227 - a bool match(filename) function
228 - a bool indicating if any patterns were passed in
228 - a bool indicating if any patterns were passed in
229
229
230 todo:
230 todo:
231 make head regex a rooted bool
231 make head regex a rooted bool
232 """
232 """
233
233
234 def contains_glob(name):
234 def contains_glob(name):
235 for c in name:
235 for c in name:
236 if c in _globchars: return True
236 if c in _globchars: return True
237 return False
237 return False
238
238
239 def regex(kind, name, tail):
239 def regex(kind, name, tail):
240 '''convert a pattern into a regular expression'''
240 '''convert a pattern into a regular expression'''
241 if kind == 're':
241 if kind == 're':
242 return name
242 return name
243 elif kind == 'path':
243 elif kind == 'path':
244 return '^' + re.escape(name) + '(?:/|$)'
244 return '^' + re.escape(name) + '(?:/|$)'
245 elif kind == 'relglob':
245 elif kind == 'relglob':
246 return head + globre(name, '(?:|.*/)', tail)
246 return head + globre(name, '(?:|.*/)', tail)
247 elif kind == 'relpath':
247 elif kind == 'relpath':
248 return head + re.escape(name) + tail
248 return head + re.escape(name) + tail
249 elif kind == 'relre':
249 elif kind == 'relre':
250 if name.startswith('^'):
250 if name.startswith('^'):
251 return name
251 return name
252 return '.*' + name
252 return '.*' + name
253 return head + globre(name, '', tail)
253 return head + globre(name, '', tail)
254
254
255 def matchfn(pats, tail):
255 def matchfn(pats, tail):
256 """build a matching function from a set of patterns"""
256 """build a matching function from a set of patterns"""
257 if not pats:
257 if not pats:
258 return
258 return
259 matches = []
259 matches = []
260 for k, p in pats:
260 for k, p in pats:
261 try:
261 try:
262 pat = '(?:%s)' % regex(k, p, tail)
262 pat = '(?:%s)' % regex(k, p, tail)
263 matches.append(re.compile(pat).match)
263 matches.append(re.compile(pat).match)
264 except re.error:
264 except re.error:
265 if src: raise Abort("%s: invalid pattern (%s): %s" % (src, k, p))
265 if src: raise Abort("%s: invalid pattern (%s): %s" % (src, k, p))
266 else: raise Abort("invalid pattern (%s): %s" % (k, p))
266 else: raise Abort("invalid pattern (%s): %s" % (k, p))
267
267
268 def buildfn(text):
268 def buildfn(text):
269 for m in matches:
269 for m in matches:
270 r = m(text)
270 r = m(text)
271 if r:
271 if r:
272 return r
272 return r
273
273
274 return buildfn
274 return buildfn
275
275
276 def globprefix(pat):
276 def globprefix(pat):
277 '''return the non-glob prefix of a path, e.g. foo/* -> foo'''
277 '''return the non-glob prefix of a path, e.g. foo/* -> foo'''
278 root = []
278 root = []
279 for p in pat.split(os.sep):
279 for p in pat.split(os.sep):
280 if contains_glob(p): break
280 if contains_glob(p): break
281 root.append(p)
281 root.append(p)
282 return '/'.join(root)
282 return '/'.join(root)
283
283
284 pats = []
284 pats = []
285 files = []
285 files = []
286 roots = []
286 roots = []
287 for kind, name in [patkind(p, dflt_pat) for p in names]:
287 for kind, name in [patkind(p, dflt_pat) for p in names]:
288 if kind in ('glob', 'relpath'):
288 if kind in ('glob', 'relpath'):
289 name = canonpath(canonroot, cwd, name)
289 name = canonpath(canonroot, cwd, name)
290 if name == '':
290 if name == '':
291 kind, name = 'glob', '**'
291 kind, name = 'glob', '**'
292 if kind in ('glob', 'path', 're'):
292 if kind in ('glob', 'path', 're'):
293 pats.append((kind, name))
293 pats.append((kind, name))
294 if kind == 'glob':
294 if kind == 'glob':
295 root = globprefix(name)
295 root = globprefix(name)
296 if root: roots.append(root)
296 if root: roots.append(root)
297 elif kind == 'relpath':
297 elif kind == 'relpath':
298 files.append((kind, name))
298 files.append((kind, name))
299 roots.append(name)
299 roots.append(name)
300
300
301 patmatch = matchfn(pats, '$') or always
301 patmatch = matchfn(pats, '$') or always
302 filematch = matchfn(files, '(?:/|$)') or always
302 filematch = matchfn(files, '(?:/|$)') or always
303 incmatch = always
303 incmatch = always
304 if inc:
304 if inc:
305 incmatch = matchfn(map(patkind, inc), '(?:/|$)')
305 incmatch = matchfn(map(patkind, inc), '(?:/|$)')
306 excmatch = lambda fn: False
306 excmatch = lambda fn: False
307 if exc:
307 if exc:
308 excmatch = matchfn(map(patkind, exc), '(?:/|$)')
308 excmatch = matchfn(map(patkind, exc), '(?:/|$)')
309
309
310 return (roots,
310 return (roots,
311 lambda fn: (incmatch(fn) and not excmatch(fn) and
311 lambda fn: (incmatch(fn) and not excmatch(fn) and
312 (fn.endswith('/') or
312 (fn.endswith('/') or
313 (not pats and not files) or
313 (not pats and not files) or
314 (pats and patmatch(fn)) or
314 (pats and patmatch(fn)) or
315 (files and filematch(fn)))),
315 (files and filematch(fn)))),
316 (inc or exc or (pats and pats != [('glob', '**')])) and True)
316 (inc or exc or (pats and pats != [('glob', '**')])) and True)
317
317
318 def system(cmd, errprefix=None):
318 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None):
319 """execute a shell command that must succeed"""
319 '''enhanced shell command execution.
320 rc = os.system(cmd)
320 run with environment maybe modified, maybe in different dir.
321 if rc:
322 errmsg = "%s %s" % (os.path.basename(cmd.split(None, 1)[0]),
323 explain_exit(rc)[0])
324 if errprefix:
325 errmsg = "%s: %s" % (errprefix, errmsg)
326 raise Abort(errmsg)
327
321
328 def esystem(cmd, environ={}, cwd=None):
322 if command fails and onerr is None, return status. if ui object,
329 '''enhanced shell command execution.
323 print error message and return status, else raise onerr object as
330 run with environment maybe modified, maybe in different dir.'''
324 exception.'''
331 oldenv = {}
325 oldenv = {}
332 for k in environ:
326 for k in environ:
333 oldenv[k] = os.environ.get(k)
327 oldenv[k] = os.environ.get(k)
334 if cwd is not None:
328 if cwd is not None:
335 oldcwd = os.getcwd()
329 oldcwd = os.getcwd()
336 try:
330 try:
337 for k, v in environ.iteritems():
331 for k, v in environ.iteritems():
338 os.environ[k] = str(v)
332 os.environ[k] = str(v)
339 if cwd is not None and oldcwd != cwd:
333 if cwd is not None and oldcwd != cwd:
340 os.chdir(cwd)
334 os.chdir(cwd)
341 return os.system(cmd)
335 rc = os.system(cmd)
336 if rc and onerr:
337 errmsg = '%s %s' % (os.path.basename(cmd.split(None, 1)[0]),
338 explain_exit(rc)[0])
339 if errprefix:
340 errmsg = '%s: %s' % (errprefix, errmsg)
341 try:
342 onerr.warn(errmsg + '\n')
343 except AttributeError:
344 raise onerr(errmsg)
345 return rc
342 finally:
346 finally:
343 for k, v in oldenv.iteritems():
347 for k, v in oldenv.iteritems():
344 if v is None:
348 if v is None:
345 del os.environ[k]
349 del os.environ[k]
346 else:
350 else:
347 os.environ[k] = v
351 os.environ[k] = v
348 if cwd is not None and oldcwd != cwd:
352 if cwd is not None and oldcwd != cwd:
349 os.chdir(oldcwd)
353 os.chdir(oldcwd)
350
354
351 def rename(src, dst):
355 def rename(src, dst):
352 """forcibly rename a file"""
356 """forcibly rename a file"""
353 try:
357 try:
354 os.rename(src, dst)
358 os.rename(src, dst)
355 except:
359 except:
356 os.unlink(dst)
360 os.unlink(dst)
357 os.rename(src, dst)
361 os.rename(src, dst)
358
362
359 def unlink(f):
363 def unlink(f):
360 """unlink and remove the directory if it is empty"""
364 """unlink and remove the directory if it is empty"""
361 os.unlink(f)
365 os.unlink(f)
362 # try removing directories that might now be empty
366 # try removing directories that might now be empty
363 try: os.removedirs(os.path.dirname(f))
367 try: os.removedirs(os.path.dirname(f))
364 except: pass
368 except: pass
365
369
366 def copyfiles(src, dst, hardlink=None):
370 def copyfiles(src, dst, hardlink=None):
367 """Copy a directory tree using hardlinks if possible"""
371 """Copy a directory tree using hardlinks if possible"""
368
372
369 if hardlink is None:
373 if hardlink is None:
370 hardlink = (os.stat(src).st_dev ==
374 hardlink = (os.stat(src).st_dev ==
371 os.stat(os.path.dirname(dst)).st_dev)
375 os.stat(os.path.dirname(dst)).st_dev)
372
376
373 if os.path.isdir(src):
377 if os.path.isdir(src):
374 os.mkdir(dst)
378 os.mkdir(dst)
375 for name in os.listdir(src):
379 for name in os.listdir(src):
376 srcname = os.path.join(src, name)
380 srcname = os.path.join(src, name)
377 dstname = os.path.join(dst, name)
381 dstname = os.path.join(dst, name)
378 copyfiles(srcname, dstname, hardlink)
382 copyfiles(srcname, dstname, hardlink)
379 else:
383 else:
380 if hardlink:
384 if hardlink:
381 try:
385 try:
382 os_link(src, dst)
386 os_link(src, dst)
383 except:
387 except:
384 hardlink = False
388 hardlink = False
385 shutil.copy(src, dst)
389 shutil.copy(src, dst)
386 else:
390 else:
387 shutil.copy(src, dst)
391 shutil.copy(src, dst)
388
392
389 def audit_path(path):
393 def audit_path(path):
390 """Abort if path contains dangerous components"""
394 """Abort if path contains dangerous components"""
391 parts = os.path.normcase(path).split(os.sep)
395 parts = os.path.normcase(path).split(os.sep)
392 if (os.path.splitdrive(path)[0] or parts[0] in ('.hg', '')
396 if (os.path.splitdrive(path)[0] or parts[0] in ('.hg', '')
393 or os.pardir in parts):
397 or os.pardir in parts):
394 raise Abort(_("path contains illegal component: %s\n") % path)
398 raise Abort(_("path contains illegal component: %s\n") % path)
395
399
396 def opener(base, audit=True):
400 def opener(base, audit=True):
397 """
401 """
398 return a function that opens files relative to base
402 return a function that opens files relative to base
399
403
400 this function is used to hide the details of COW semantics and
404 this function is used to hide the details of COW semantics and
401 remote file access from higher level code.
405 remote file access from higher level code.
402 """
406 """
403 p = base
407 p = base
404 audit_p = audit
408 audit_p = audit
405
409
406 def mktempcopy(name):
410 def mktempcopy(name):
407 d, fn = os.path.split(name)
411 d, fn = os.path.split(name)
408 fd, temp = tempfile.mkstemp(prefix=fn, dir=d)
412 fd, temp = tempfile.mkstemp(prefix=fn, dir=d)
409 fp = os.fdopen(fd, "wb")
413 fp = os.fdopen(fd, "wb")
410 try:
414 try:
411 fp.write(file(name, "rb").read())
415 fp.write(file(name, "rb").read())
412 except:
416 except:
413 try: os.unlink(temp)
417 try: os.unlink(temp)
414 except: pass
418 except: pass
415 raise
419 raise
416 fp.close()
420 fp.close()
417 st = os.lstat(name)
421 st = os.lstat(name)
418 os.chmod(temp, st.st_mode)
422 os.chmod(temp, st.st_mode)
419 return temp
423 return temp
420
424
421 class atomicfile(file):
425 class atomicfile(file):
422 """the file will only be copied on close"""
426 """the file will only be copied on close"""
423 def __init__(self, name, mode, atomic=False):
427 def __init__(self, name, mode, atomic=False):
424 self.__name = name
428 self.__name = name
425 self.temp = mktempcopy(name)
429 self.temp = mktempcopy(name)
426 file.__init__(self, self.temp, mode)
430 file.__init__(self, self.temp, mode)
427 def close(self):
431 def close(self):
428 if not self.closed:
432 if not self.closed:
429 file.close(self)
433 file.close(self)
430 rename(self.temp, self.__name)
434 rename(self.temp, self.__name)
431 def __del__(self):
435 def __del__(self):
432 self.close()
436 self.close()
433
437
434 def o(path, mode="r", text=False, atomic=False):
438 def o(path, mode="r", text=False, atomic=False):
435 if audit_p:
439 if audit_p:
436 audit_path(path)
440 audit_path(path)
437 f = os.path.join(p, path)
441 f = os.path.join(p, path)
438
442
439 if not text:
443 if not text:
440 mode += "b" # for that other OS
444 mode += "b" # for that other OS
441
445
442 if mode[0] != "r":
446 if mode[0] != "r":
443 try:
447 try:
444 nlink = nlinks(f)
448 nlink = nlinks(f)
445 except OSError:
449 except OSError:
446 d = os.path.dirname(f)
450 d = os.path.dirname(f)
447 if not os.path.isdir(d):
451 if not os.path.isdir(d):
448 os.makedirs(d)
452 os.makedirs(d)
449 else:
453 else:
450 if atomic:
454 if atomic:
451 return atomicfile(f, mode)
455 return atomicfile(f, mode)
452 if nlink > 1:
456 if nlink > 1:
453 rename(mktempcopy(f), f)
457 rename(mktempcopy(f), f)
454 return file(f, mode)
458 return file(f, mode)
455
459
456 return o
460 return o
457
461
458 def _makelock_file(info, pathname):
462 def _makelock_file(info, pathname):
459 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
463 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
460 os.write(ld, info)
464 os.write(ld, info)
461 os.close(ld)
465 os.close(ld)
462
466
463 def _readlock_file(pathname):
467 def _readlock_file(pathname):
464 return file(pathname).read()
468 return file(pathname).read()
465
469
466 def nlinks(pathname):
470 def nlinks(pathname):
467 """Return number of hardlinks for the given file."""
471 """Return number of hardlinks for the given file."""
468 return os.stat(pathname).st_nlink
472 return os.stat(pathname).st_nlink
469
473
470 if hasattr(os, 'link'):
474 if hasattr(os, 'link'):
471 os_link = os.link
475 os_link = os.link
472 else:
476 else:
473 def os_link(src, dst):
477 def os_link(src, dst):
474 raise OSError(0, _("Hardlinks not supported"))
478 raise OSError(0, _("Hardlinks not supported"))
475
479
476 # Platform specific variants
480 # Platform specific variants
477 if os.name == 'nt':
481 if os.name == 'nt':
478 demandload(globals(), "msvcrt")
482 demandload(globals(), "msvcrt")
479 nulldev = 'NUL:'
483 nulldev = 'NUL:'
480
484
481 class winstdout:
485 class winstdout:
482 '''stdout on windows misbehaves if sent through a pipe'''
486 '''stdout on windows misbehaves if sent through a pipe'''
483
487
484 def __init__(self, fp):
488 def __init__(self, fp):
485 self.fp = fp
489 self.fp = fp
486
490
487 def __getattr__(self, key):
491 def __getattr__(self, key):
488 return getattr(self.fp, key)
492 return getattr(self.fp, key)
489
493
490 def close(self):
494 def close(self):
491 try:
495 try:
492 self.fp.close()
496 self.fp.close()
493 except: pass
497 except: pass
494
498
495 def write(self, s):
499 def write(self, s):
496 try:
500 try:
497 return self.fp.write(s)
501 return self.fp.write(s)
498 except IOError, inst:
502 except IOError, inst:
499 if inst.errno != 0: raise
503 if inst.errno != 0: raise
500 self.close()
504 self.close()
501 raise IOError(errno.EPIPE, 'Broken pipe')
505 raise IOError(errno.EPIPE, 'Broken pipe')
502
506
503 sys.stdout = winstdout(sys.stdout)
507 sys.stdout = winstdout(sys.stdout)
504
508
505 try:
509 try:
506 import win32api, win32process
510 import win32api, win32process
507 filename = win32process.GetModuleFileNameEx(win32api.GetCurrentProcess(), 0)
511 filename = win32process.GetModuleFileNameEx(win32api.GetCurrentProcess(), 0)
508 systemrc = os.path.join(os.path.dirname(filename), 'mercurial.ini')
512 systemrc = os.path.join(os.path.dirname(filename), 'mercurial.ini')
509
513
510 except ImportError:
514 except ImportError:
511 systemrc = r'c:\mercurial\mercurial.ini'
515 systemrc = r'c:\mercurial\mercurial.ini'
512 pass
516 pass
513
517
514 rcpath = (systemrc,
518 rcpath = (systemrc,
515 os.path.join(os.path.expanduser('~'), 'mercurial.ini'))
519 os.path.join(os.path.expanduser('~'), 'mercurial.ini'))
516
520
517 def parse_patch_output(output_line):
521 def parse_patch_output(output_line):
518 """parses the output produced by patch and returns the file name"""
522 """parses the output produced by patch and returns the file name"""
519 pf = output_line[14:]
523 pf = output_line[14:]
520 if pf[0] == '`':
524 if pf[0] == '`':
521 pf = pf[1:-1] # Remove the quotes
525 pf = pf[1:-1] # Remove the quotes
522 return pf
526 return pf
523
527
524 try: # ActivePython can create hard links using win32file module
528 try: # ActivePython can create hard links using win32file module
525 import win32api, win32con, win32file
529 import win32api, win32con, win32file
526
530
527 def os_link(src, dst): # NB will only succeed on NTFS
531 def os_link(src, dst): # NB will only succeed on NTFS
528 win32file.CreateHardLink(dst, src)
532 win32file.CreateHardLink(dst, src)
529
533
530 def nlinks(pathname):
534 def nlinks(pathname):
531 """Return number of hardlinks for the given file."""
535 """Return number of hardlinks for the given file."""
532 try:
536 try:
533 fh = win32file.CreateFile(pathname,
537 fh = win32file.CreateFile(pathname,
534 win32file.GENERIC_READ, win32file.FILE_SHARE_READ,
538 win32file.GENERIC_READ, win32file.FILE_SHARE_READ,
535 None, win32file.OPEN_EXISTING, 0, None)
539 None, win32file.OPEN_EXISTING, 0, None)
536 res = win32file.GetFileInformationByHandle(fh)
540 res = win32file.GetFileInformationByHandle(fh)
537 fh.Close()
541 fh.Close()
538 return res[7]
542 return res[7]
539 except:
543 except:
540 return os.stat(pathname).st_nlink
544 return os.stat(pathname).st_nlink
541
545
542 def testpid(pid):
546 def testpid(pid):
543 '''return False if pid is dead, True if running or not known'''
547 '''return False if pid is dead, True if running or not known'''
544 try:
548 try:
545 win32api.OpenProcess(win32con.PROCESS_QUERY_INFORMATION,
549 win32api.OpenProcess(win32con.PROCESS_QUERY_INFORMATION,
546 False, pid)
550 False, pid)
547 except:
551 except:
548 return True
552 return True
549
553
550 except ImportError:
554 except ImportError:
551 def testpid(pid):
555 def testpid(pid):
552 '''return False if pid dead, True if running or not known'''
556 '''return False if pid dead, True if running or not known'''
553 return True
557 return True
554
558
555 def is_exec(f, last):
559 def is_exec(f, last):
556 return last
560 return last
557
561
558 def set_exec(f, mode):
562 def set_exec(f, mode):
559 pass
563 pass
560
564
561 def set_binary(fd):
565 def set_binary(fd):
562 msvcrt.setmode(fd.fileno(), os.O_BINARY)
566 msvcrt.setmode(fd.fileno(), os.O_BINARY)
563
567
564 def pconvert(path):
568 def pconvert(path):
565 return path.replace("\\", "/")
569 return path.replace("\\", "/")
566
570
567 def localpath(path):
571 def localpath(path):
568 return path.replace('/', '\\')
572 return path.replace('/', '\\')
569
573
570 def normpath(path):
574 def normpath(path):
571 return pconvert(os.path.normpath(path))
575 return pconvert(os.path.normpath(path))
572
576
573 makelock = _makelock_file
577 makelock = _makelock_file
574 readlock = _readlock_file
578 readlock = _readlock_file
575
579
576 def explain_exit(code):
580 def explain_exit(code):
577 return _("exited with status %d") % code, code
581 return _("exited with status %d") % code, code
578
582
579 else:
583 else:
580 nulldev = '/dev/null'
584 nulldev = '/dev/null'
581
585
582 def rcfiles(path):
586 def rcfiles(path):
583 rcs = [os.path.join(path, 'hgrc')]
587 rcs = [os.path.join(path, 'hgrc')]
584 rcdir = os.path.join(path, 'hgrc.d')
588 rcdir = os.path.join(path, 'hgrc.d')
585 try:
589 try:
586 rcs.extend([os.path.join(rcdir, f) for f in os.listdir(rcdir)
590 rcs.extend([os.path.join(rcdir, f) for f in os.listdir(rcdir)
587 if f.endswith(".rc")])
591 if f.endswith(".rc")])
588 except OSError, inst: pass
592 except OSError, inst: pass
589 return rcs
593 return rcs
590 rcpath = []
594 rcpath = []
591 if len(sys.argv) > 0:
595 if len(sys.argv) > 0:
592 rcpath.extend(rcfiles(os.path.dirname(sys.argv[0]) + '/../etc/mercurial'))
596 rcpath.extend(rcfiles(os.path.dirname(sys.argv[0]) + '/../etc/mercurial'))
593 rcpath.extend(rcfiles('/etc/mercurial'))
597 rcpath.extend(rcfiles('/etc/mercurial'))
594 rcpath.append(os.path.expanduser('~/.hgrc'))
598 rcpath.append(os.path.expanduser('~/.hgrc'))
595 rcpath = [os.path.normpath(f) for f in rcpath]
599 rcpath = [os.path.normpath(f) for f in rcpath]
596
600
597 def parse_patch_output(output_line):
601 def parse_patch_output(output_line):
598 """parses the output produced by patch and returns the file name"""
602 """parses the output produced by patch and returns the file name"""
599 pf = output_line[14:]
603 pf = output_line[14:]
600 if pf.startswith("'") and pf.endswith("'") and pf.find(" ") >= 0:
604 if pf.startswith("'") and pf.endswith("'") and pf.find(" ") >= 0:
601 pf = pf[1:-1] # Remove the quotes
605 pf = pf[1:-1] # Remove the quotes
602 return pf
606 return pf
603
607
604 def is_exec(f, last):
608 def is_exec(f, last):
605 """check whether a file is executable"""
609 """check whether a file is executable"""
606 return (os.stat(f).st_mode & 0100 != 0)
610 return (os.stat(f).st_mode & 0100 != 0)
607
611
608 def set_exec(f, mode):
612 def set_exec(f, mode):
609 s = os.stat(f).st_mode
613 s = os.stat(f).st_mode
610 if (s & 0100 != 0) == mode:
614 if (s & 0100 != 0) == mode:
611 return
615 return
612 if mode:
616 if mode:
613 # Turn on +x for every +r bit when making a file executable
617 # Turn on +x for every +r bit when making a file executable
614 # and obey umask.
618 # and obey umask.
615 umask = os.umask(0)
619 umask = os.umask(0)
616 os.umask(umask)
620 os.umask(umask)
617 os.chmod(f, s | (s & 0444) >> 2 & ~umask)
621 os.chmod(f, s | (s & 0444) >> 2 & ~umask)
618 else:
622 else:
619 os.chmod(f, s & 0666)
623 os.chmod(f, s & 0666)
620
624
621 def set_binary(fd):
625 def set_binary(fd):
622 pass
626 pass
623
627
624 def pconvert(path):
628 def pconvert(path):
625 return path
629 return path
626
630
627 def localpath(path):
631 def localpath(path):
628 return path
632 return path
629
633
630 normpath = os.path.normpath
634 normpath = os.path.normpath
631
635
632 def makelock(info, pathname):
636 def makelock(info, pathname):
633 try:
637 try:
634 os.symlink(info, pathname)
638 os.symlink(info, pathname)
635 except OSError, why:
639 except OSError, why:
636 if why.errno == errno.EEXIST:
640 if why.errno == errno.EEXIST:
637 raise
641 raise
638 else:
642 else:
639 _makelock_file(info, pathname)
643 _makelock_file(info, pathname)
640
644
641 def readlock(pathname):
645 def readlock(pathname):
642 try:
646 try:
643 return os.readlink(pathname)
647 return os.readlink(pathname)
644 except OSError, why:
648 except OSError, why:
645 if why.errno == errno.EINVAL:
649 if why.errno == errno.EINVAL:
646 return _readlock_file(pathname)
650 return _readlock_file(pathname)
647 else:
651 else:
648 raise
652 raise
649
653
650 def testpid(pid):
654 def testpid(pid):
651 '''return False if pid dead, True if running or not sure'''
655 '''return False if pid dead, True if running or not sure'''
652 try:
656 try:
653 os.kill(pid, 0)
657 os.kill(pid, 0)
654 return True
658 return True
655 except OSError, inst:
659 except OSError, inst:
656 return inst.errno != errno.ESRCH
660 return inst.errno != errno.ESRCH
657
661
658 def explain_exit(code):
662 def explain_exit(code):
659 """return a 2-tuple (desc, code) describing a process's status"""
663 """return a 2-tuple (desc, code) describing a process's status"""
660 if os.WIFEXITED(code):
664 if os.WIFEXITED(code):
661 val = os.WEXITSTATUS(code)
665 val = os.WEXITSTATUS(code)
662 return _("exited with status %d") % val, val
666 return _("exited with status %d") % val, val
663 elif os.WIFSIGNALED(code):
667 elif os.WIFSIGNALED(code):
664 val = os.WTERMSIG(code)
668 val = os.WTERMSIG(code)
665 return _("killed by signal %d") % val, val
669 return _("killed by signal %d") % val, val
666 elif os.WIFSTOPPED(code):
670 elif os.WIFSTOPPED(code):
667 val = os.WSTOPSIG(code)
671 val = os.WSTOPSIG(code)
668 return _("stopped by signal %d") % val, val
672 return _("stopped by signal %d") % val, val
669 raise ValueError(_("invalid exit code"))
673 raise ValueError(_("invalid exit code"))
670
674
671 class chunkbuffer(object):
675 class chunkbuffer(object):
672 """Allow arbitrary sized chunks of data to be efficiently read from an
676 """Allow arbitrary sized chunks of data to be efficiently read from an
673 iterator over chunks of arbitrary size."""
677 iterator over chunks of arbitrary size."""
674
678
675 def __init__(self, in_iter, targetsize = 2**16):
679 def __init__(self, in_iter, targetsize = 2**16):
676 """in_iter is the iterator that's iterating over the input chunks.
680 """in_iter is the iterator that's iterating over the input chunks.
677 targetsize is how big a buffer to try to maintain."""
681 targetsize is how big a buffer to try to maintain."""
678 self.in_iter = iter(in_iter)
682 self.in_iter = iter(in_iter)
679 self.buf = ''
683 self.buf = ''
680 self.targetsize = int(targetsize)
684 self.targetsize = int(targetsize)
681 if self.targetsize <= 0:
685 if self.targetsize <= 0:
682 raise ValueError(_("targetsize must be greater than 0, was %d") %
686 raise ValueError(_("targetsize must be greater than 0, was %d") %
683 targetsize)
687 targetsize)
684 self.iterempty = False
688 self.iterempty = False
685
689
686 def fillbuf(self):
690 def fillbuf(self):
687 """Ignore target size; read every chunk from iterator until empty."""
691 """Ignore target size; read every chunk from iterator until empty."""
688 if not self.iterempty:
692 if not self.iterempty:
689 collector = cStringIO.StringIO()
693 collector = cStringIO.StringIO()
690 collector.write(self.buf)
694 collector.write(self.buf)
691 for ch in self.in_iter:
695 for ch in self.in_iter:
692 collector.write(ch)
696 collector.write(ch)
693 self.buf = collector.getvalue()
697 self.buf = collector.getvalue()
694 self.iterempty = True
698 self.iterempty = True
695
699
696 def read(self, l):
700 def read(self, l):
697 """Read L bytes of data from the iterator of chunks of data.
701 """Read L bytes of data from the iterator of chunks of data.
698 Returns less than L bytes if the iterator runs dry."""
702 Returns less than L bytes if the iterator runs dry."""
699 if l > len(self.buf) and not self.iterempty:
703 if l > len(self.buf) and not self.iterempty:
700 # Clamp to a multiple of self.targetsize
704 # Clamp to a multiple of self.targetsize
701 targetsize = self.targetsize * ((l // self.targetsize) + 1)
705 targetsize = self.targetsize * ((l // self.targetsize) + 1)
702 collector = cStringIO.StringIO()
706 collector = cStringIO.StringIO()
703 collector.write(self.buf)
707 collector.write(self.buf)
704 collected = len(self.buf)
708 collected = len(self.buf)
705 for chunk in self.in_iter:
709 for chunk in self.in_iter:
706 collector.write(chunk)
710 collector.write(chunk)
707 collected += len(chunk)
711 collected += len(chunk)
708 if collected >= targetsize:
712 if collected >= targetsize:
709 break
713 break
710 if collected < targetsize:
714 if collected < targetsize:
711 self.iterempty = True
715 self.iterempty = True
712 self.buf = collector.getvalue()
716 self.buf = collector.getvalue()
713 s, self.buf = self.buf[:l], buffer(self.buf, l)
717 s, self.buf = self.buf[:l], buffer(self.buf, l)
714 return s
718 return s
715
719
716 def filechunkiter(f, size = 65536):
720 def filechunkiter(f, size = 65536):
717 """Create a generator that produces all the data in the file size
721 """Create a generator that produces all the data in the file size
718 (default 65536) bytes at a time. Chunks may be less than size
722 (default 65536) bytes at a time. Chunks may be less than size
719 bytes if the chunk is the last chunk in the file, or the file is a
723 bytes if the chunk is the last chunk in the file, or the file is a
720 socket or some other type of file that sometimes reads less data
724 socket or some other type of file that sometimes reads less data
721 than is requested."""
725 than is requested."""
722 s = f.read(size)
726 s = f.read(size)
723 while len(s) > 0:
727 while len(s) > 0:
724 yield s
728 yield s
725 s = f.read(size)
729 s = f.read(size)
726
730
727 def makedate():
731 def makedate():
728 lt = time.localtime()
732 lt = time.localtime()
729 if lt[8] == 1 and time.daylight:
733 if lt[8] == 1 and time.daylight:
730 tz = time.altzone
734 tz = time.altzone
731 else:
735 else:
732 tz = time.timezone
736 tz = time.timezone
733 return time.mktime(lt), tz
737 return time.mktime(lt), tz
734
738
735 def datestr(date=None, format='%c'):
739 def datestr(date=None, format='%c'):
736 """represent a (unixtime, offset) tuple as a localized time.
740 """represent a (unixtime, offset) tuple as a localized time.
737 unixtime is seconds since the epoch, and offset is the time zone's
741 unixtime is seconds since the epoch, and offset is the time zone's
738 number of seconds away from UTC."""
742 number of seconds away from UTC."""
739 t, tz = date or makedate()
743 t, tz = date or makedate()
740 return ("%s %+03d%02d" %
744 return ("%s %+03d%02d" %
741 (time.strftime(format, time.gmtime(float(t) - tz)),
745 (time.strftime(format, time.gmtime(float(t) - tz)),
742 -tz / 3600,
746 -tz / 3600,
743 ((-tz % 3600) / 60)))
747 ((-tz % 3600) / 60)))
744
748
745 def walkrepos(path):
749 def walkrepos(path):
746 '''yield every hg repository under path, recursively.'''
750 '''yield every hg repository under path, recursively.'''
747 def errhandler(err):
751 def errhandler(err):
748 if err.filename == path:
752 if err.filename == path:
749 raise err
753 raise err
750
754
751 for root, dirs, files in os.walk(path, onerror=errhandler):
755 for root, dirs, files in os.walk(path, onerror=errhandler):
752 for d in dirs:
756 for d in dirs:
753 if d == '.hg':
757 if d == '.hg':
754 yield root
758 yield root
755 dirs[:] = []
759 dirs[:] = []
756 break
760 break
General Comments 0
You need to be logged in to leave comments. Login now