##// END OF EJS Templates
ui.py: remove revlogopts and (unused) diffcache variables...
Alexis S. L. Carvalho -
r3340:929d0496 default
parent child Browse files
Show More
@@ -1,1760 +1,1760 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import gettext as _
9 from i18n import gettext as _
10 from demandload import *
10 from demandload import *
11 import repo
11 import repo
12 demandload(globals(), "appendfile changegroup")
12 demandload(globals(), "appendfile changegroup")
13 demandload(globals(), "changelog dirstate filelog manifest context")
13 demandload(globals(), "changelog dirstate filelog manifest context")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 demandload(globals(), "os revlog time util")
15 demandload(globals(), "os revlog time util")
16
16
17 class localrepository(repo.repository):
17 class localrepository(repo.repository):
18 capabilities = ()
18 capabilities = ()
19
19
20 def __del__(self):
20 def __del__(self):
21 self.transhandle = None
21 self.transhandle = None
22 def __init__(self, parentui, path=None, create=0):
22 def __init__(self, parentui, path=None, create=0):
23 repo.repository.__init__(self)
23 repo.repository.__init__(self)
24 if not path:
24 if not path:
25 p = os.getcwd()
25 p = os.getcwd()
26 while not os.path.isdir(os.path.join(p, ".hg")):
26 while not os.path.isdir(os.path.join(p, ".hg")):
27 oldp = p
27 oldp = p
28 p = os.path.dirname(p)
28 p = os.path.dirname(p)
29 if p == oldp:
29 if p == oldp:
30 raise repo.RepoError(_("There is no Mercurial repository"
30 raise repo.RepoError(_("There is no Mercurial repository"
31 " here (.hg not found)"))
31 " here (.hg not found)"))
32 path = p
32 path = p
33 self.path = os.path.join(path, ".hg")
33 self.path = os.path.join(path, ".hg")
34
34
35 if not os.path.isdir(self.path):
35 if not os.path.isdir(self.path):
36 if create:
36 if create:
37 if not os.path.exists(path):
37 if not os.path.exists(path):
38 os.mkdir(path)
38 os.mkdir(path)
39 os.mkdir(self.path)
39 os.mkdir(self.path)
40 os.mkdir(self.join("data"))
40 os.mkdir(self.join("data"))
41 else:
41 else:
42 raise repo.RepoError(_("repository %s not found") % path)
42 raise repo.RepoError(_("repository %s not found") % path)
43 elif create:
43 elif create:
44 raise repo.RepoError(_("repository %s already exists") % path)
44 raise repo.RepoError(_("repository %s already exists") % path)
45
45
46 self.root = os.path.abspath(path)
46 self.root = os.path.abspath(path)
47 self.origroot = path
47 self.origroot = path
48 self.ui = ui.ui(parentui=parentui)
48 self.ui = ui.ui(parentui=parentui)
49 self.opener = util.opener(self.path)
49 self.opener = util.opener(self.path)
50 self.wopener = util.opener(self.root)
50 self.wopener = util.opener(self.root)
51
51
52 try:
52 try:
53 self.ui.readconfig(self.join("hgrc"), self.root)
53 self.ui.readconfig(self.join("hgrc"), self.root)
54 except IOError:
54 except IOError:
55 pass
55 pass
56
56
57 v = self.ui.revlogopts
57 v = self.ui.configrevlog()
58 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
58 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
59 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
59 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
60 fl = v.get('flags', None)
60 fl = v.get('flags', None)
61 flags = 0
61 flags = 0
62 if fl != None:
62 if fl != None:
63 for x in fl.split():
63 for x in fl.split():
64 flags |= revlog.flagstr(x)
64 flags |= revlog.flagstr(x)
65 elif self.revlogv1:
65 elif self.revlogv1:
66 flags = revlog.REVLOG_DEFAULT_FLAGS
66 flags = revlog.REVLOG_DEFAULT_FLAGS
67
67
68 v = self.revlogversion | flags
68 v = self.revlogversion | flags
69 self.manifest = manifest.manifest(self.opener, v)
69 self.manifest = manifest.manifest(self.opener, v)
70 self.changelog = changelog.changelog(self.opener, v)
70 self.changelog = changelog.changelog(self.opener, v)
71
71
72 # the changelog might not have the inline index flag
72 # the changelog might not have the inline index flag
73 # on. If the format of the changelog is the same as found in
73 # on. If the format of the changelog is the same as found in
74 # .hgrc, apply any flags found in the .hgrc as well.
74 # .hgrc, apply any flags found in the .hgrc as well.
75 # Otherwise, just version from the changelog
75 # Otherwise, just version from the changelog
76 v = self.changelog.version
76 v = self.changelog.version
77 if v == self.revlogversion:
77 if v == self.revlogversion:
78 v |= flags
78 v |= flags
79 self.revlogversion = v
79 self.revlogversion = v
80
80
81 self.tagscache = None
81 self.tagscache = None
82 self.nodetagscache = None
82 self.nodetagscache = None
83 self.encodepats = None
83 self.encodepats = None
84 self.decodepats = None
84 self.decodepats = None
85 self.transhandle = None
85 self.transhandle = None
86
86
87 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
87 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
88
88
89 def url(self):
89 def url(self):
90 return 'file:' + self.root
90 return 'file:' + self.root
91
91
92 def hook(self, name, throw=False, **args):
92 def hook(self, name, throw=False, **args):
93 def callhook(hname, funcname):
93 def callhook(hname, funcname):
94 '''call python hook. hook is callable object, looked up as
94 '''call python hook. hook is callable object, looked up as
95 name in python module. if callable returns "true", hook
95 name in python module. if callable returns "true", hook
96 fails, else passes. if hook raises exception, treated as
96 fails, else passes. if hook raises exception, treated as
97 hook failure. exception propagates if throw is "true".
97 hook failure. exception propagates if throw is "true".
98
98
99 reason for "true" meaning "hook failed" is so that
99 reason for "true" meaning "hook failed" is so that
100 unmodified commands (e.g. mercurial.commands.update) can
100 unmodified commands (e.g. mercurial.commands.update) can
101 be run as hooks without wrappers to convert return values.'''
101 be run as hooks without wrappers to convert return values.'''
102
102
103 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
103 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
104 d = funcname.rfind('.')
104 d = funcname.rfind('.')
105 if d == -1:
105 if d == -1:
106 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
106 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
107 % (hname, funcname))
107 % (hname, funcname))
108 modname = funcname[:d]
108 modname = funcname[:d]
109 try:
109 try:
110 obj = __import__(modname)
110 obj = __import__(modname)
111 except ImportError:
111 except ImportError:
112 try:
112 try:
113 # extensions are loaded with hgext_ prefix
113 # extensions are loaded with hgext_ prefix
114 obj = __import__("hgext_%s" % modname)
114 obj = __import__("hgext_%s" % modname)
115 except ImportError:
115 except ImportError:
116 raise util.Abort(_('%s hook is invalid '
116 raise util.Abort(_('%s hook is invalid '
117 '(import of "%s" failed)') %
117 '(import of "%s" failed)') %
118 (hname, modname))
118 (hname, modname))
119 try:
119 try:
120 for p in funcname.split('.')[1:]:
120 for p in funcname.split('.')[1:]:
121 obj = getattr(obj, p)
121 obj = getattr(obj, p)
122 except AttributeError, err:
122 except AttributeError, err:
123 raise util.Abort(_('%s hook is invalid '
123 raise util.Abort(_('%s hook is invalid '
124 '("%s" is not defined)') %
124 '("%s" is not defined)') %
125 (hname, funcname))
125 (hname, funcname))
126 if not callable(obj):
126 if not callable(obj):
127 raise util.Abort(_('%s hook is invalid '
127 raise util.Abort(_('%s hook is invalid '
128 '("%s" is not callable)') %
128 '("%s" is not callable)') %
129 (hname, funcname))
129 (hname, funcname))
130 try:
130 try:
131 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
131 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
132 except (KeyboardInterrupt, util.SignalInterrupt):
132 except (KeyboardInterrupt, util.SignalInterrupt):
133 raise
133 raise
134 except Exception, exc:
134 except Exception, exc:
135 if isinstance(exc, util.Abort):
135 if isinstance(exc, util.Abort):
136 self.ui.warn(_('error: %s hook failed: %s\n') %
136 self.ui.warn(_('error: %s hook failed: %s\n') %
137 (hname, exc.args[0]))
137 (hname, exc.args[0]))
138 else:
138 else:
139 self.ui.warn(_('error: %s hook raised an exception: '
139 self.ui.warn(_('error: %s hook raised an exception: '
140 '%s\n') % (hname, exc))
140 '%s\n') % (hname, exc))
141 if throw:
141 if throw:
142 raise
142 raise
143 self.ui.print_exc()
143 self.ui.print_exc()
144 return True
144 return True
145 if r:
145 if r:
146 if throw:
146 if throw:
147 raise util.Abort(_('%s hook failed') % hname)
147 raise util.Abort(_('%s hook failed') % hname)
148 self.ui.warn(_('warning: %s hook failed\n') % hname)
148 self.ui.warn(_('warning: %s hook failed\n') % hname)
149 return r
149 return r
150
150
151 def runhook(name, cmd):
151 def runhook(name, cmd):
152 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
152 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
153 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
153 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
154 r = util.system(cmd, environ=env, cwd=self.root)
154 r = util.system(cmd, environ=env, cwd=self.root)
155 if r:
155 if r:
156 desc, r = util.explain_exit(r)
156 desc, r = util.explain_exit(r)
157 if throw:
157 if throw:
158 raise util.Abort(_('%s hook %s') % (name, desc))
158 raise util.Abort(_('%s hook %s') % (name, desc))
159 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
159 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
160 return r
160 return r
161
161
162 r = False
162 r = False
163 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
163 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
164 if hname.split(".", 1)[0] == name and cmd]
164 if hname.split(".", 1)[0] == name and cmd]
165 hooks.sort()
165 hooks.sort()
166 for hname, cmd in hooks:
166 for hname, cmd in hooks:
167 if cmd.startswith('python:'):
167 if cmd.startswith('python:'):
168 r = callhook(hname, cmd[7:].strip()) or r
168 r = callhook(hname, cmd[7:].strip()) or r
169 else:
169 else:
170 r = runhook(hname, cmd) or r
170 r = runhook(hname, cmd) or r
171 return r
171 return r
172
172
173 tag_disallowed = ':\r\n'
173 tag_disallowed = ':\r\n'
174
174
175 def tag(self, name, node, message, local, user, date):
175 def tag(self, name, node, message, local, user, date):
176 '''tag a revision with a symbolic name.
176 '''tag a revision with a symbolic name.
177
177
178 if local is True, the tag is stored in a per-repository file.
178 if local is True, the tag is stored in a per-repository file.
179 otherwise, it is stored in the .hgtags file, and a new
179 otherwise, it is stored in the .hgtags file, and a new
180 changeset is committed with the change.
180 changeset is committed with the change.
181
181
182 keyword arguments:
182 keyword arguments:
183
183
184 local: whether to store tag in non-version-controlled file
184 local: whether to store tag in non-version-controlled file
185 (default False)
185 (default False)
186
186
187 message: commit message to use if committing
187 message: commit message to use if committing
188
188
189 user: name of user to use if committing
189 user: name of user to use if committing
190
190
191 date: date tuple to use if committing'''
191 date: date tuple to use if committing'''
192
192
193 for c in self.tag_disallowed:
193 for c in self.tag_disallowed:
194 if c in name:
194 if c in name:
195 raise util.Abort(_('%r cannot be used in a tag name') % c)
195 raise util.Abort(_('%r cannot be used in a tag name') % c)
196
196
197 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
197 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
198
198
199 if local:
199 if local:
200 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
200 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
201 self.hook('tag', node=hex(node), tag=name, local=local)
201 self.hook('tag', node=hex(node), tag=name, local=local)
202 return
202 return
203
203
204 for x in self.status()[:5]:
204 for x in self.status()[:5]:
205 if '.hgtags' in x:
205 if '.hgtags' in x:
206 raise util.Abort(_('working copy of .hgtags is changed '
206 raise util.Abort(_('working copy of .hgtags is changed '
207 '(please commit .hgtags manually)'))
207 '(please commit .hgtags manually)'))
208
208
209 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
209 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
210 if self.dirstate.state('.hgtags') == '?':
210 if self.dirstate.state('.hgtags') == '?':
211 self.add(['.hgtags'])
211 self.add(['.hgtags'])
212
212
213 self.commit(['.hgtags'], message, user, date)
213 self.commit(['.hgtags'], message, user, date)
214 self.hook('tag', node=hex(node), tag=name, local=local)
214 self.hook('tag', node=hex(node), tag=name, local=local)
215
215
216 def tags(self):
216 def tags(self):
217 '''return a mapping of tag to node'''
217 '''return a mapping of tag to node'''
218 if not self.tagscache:
218 if not self.tagscache:
219 self.tagscache = {}
219 self.tagscache = {}
220
220
221 def parsetag(line, context):
221 def parsetag(line, context):
222 if not line:
222 if not line:
223 return
223 return
224 s = l.split(" ", 1)
224 s = l.split(" ", 1)
225 if len(s) != 2:
225 if len(s) != 2:
226 self.ui.warn(_("%s: cannot parse entry\n") % context)
226 self.ui.warn(_("%s: cannot parse entry\n") % context)
227 return
227 return
228 node, key = s
228 node, key = s
229 key = key.strip()
229 key = key.strip()
230 try:
230 try:
231 bin_n = bin(node)
231 bin_n = bin(node)
232 except TypeError:
232 except TypeError:
233 self.ui.warn(_("%s: node '%s' is not well formed\n") %
233 self.ui.warn(_("%s: node '%s' is not well formed\n") %
234 (context, node))
234 (context, node))
235 return
235 return
236 if bin_n not in self.changelog.nodemap:
236 if bin_n not in self.changelog.nodemap:
237 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
237 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
238 (context, key))
238 (context, key))
239 return
239 return
240 self.tagscache[key] = bin_n
240 self.tagscache[key] = bin_n
241
241
242 # read the tags file from each head, ending with the tip,
242 # read the tags file from each head, ending with the tip,
243 # and add each tag found to the map, with "newer" ones
243 # and add each tag found to the map, with "newer" ones
244 # taking precedence
244 # taking precedence
245 heads = self.heads()
245 heads = self.heads()
246 heads.reverse()
246 heads.reverse()
247 fl = self.file(".hgtags")
247 fl = self.file(".hgtags")
248 for node in heads:
248 for node in heads:
249 change = self.changelog.read(node)
249 change = self.changelog.read(node)
250 rev = self.changelog.rev(node)
250 rev = self.changelog.rev(node)
251 fn, ff = self.manifest.find(change[0], '.hgtags')
251 fn, ff = self.manifest.find(change[0], '.hgtags')
252 if fn is None: continue
252 if fn is None: continue
253 count = 0
253 count = 0
254 for l in fl.read(fn).splitlines():
254 for l in fl.read(fn).splitlines():
255 count += 1
255 count += 1
256 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
256 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
257 (rev, short(node), count))
257 (rev, short(node), count))
258 try:
258 try:
259 f = self.opener("localtags")
259 f = self.opener("localtags")
260 count = 0
260 count = 0
261 for l in f:
261 for l in f:
262 count += 1
262 count += 1
263 parsetag(l, _("localtags, line %d") % count)
263 parsetag(l, _("localtags, line %d") % count)
264 except IOError:
264 except IOError:
265 pass
265 pass
266
266
267 self.tagscache['tip'] = self.changelog.tip()
267 self.tagscache['tip'] = self.changelog.tip()
268
268
269 return self.tagscache
269 return self.tagscache
270
270
271 def tagslist(self):
271 def tagslist(self):
272 '''return a list of tags ordered by revision'''
272 '''return a list of tags ordered by revision'''
273 l = []
273 l = []
274 for t, n in self.tags().items():
274 for t, n in self.tags().items():
275 try:
275 try:
276 r = self.changelog.rev(n)
276 r = self.changelog.rev(n)
277 except:
277 except:
278 r = -2 # sort to the beginning of the list if unknown
278 r = -2 # sort to the beginning of the list if unknown
279 l.append((r, t, n))
279 l.append((r, t, n))
280 l.sort()
280 l.sort()
281 return [(t, n) for r, t, n in l]
281 return [(t, n) for r, t, n in l]
282
282
283 def nodetags(self, node):
283 def nodetags(self, node):
284 '''return the tags associated with a node'''
284 '''return the tags associated with a node'''
285 if not self.nodetagscache:
285 if not self.nodetagscache:
286 self.nodetagscache = {}
286 self.nodetagscache = {}
287 for t, n in self.tags().items():
287 for t, n in self.tags().items():
288 self.nodetagscache.setdefault(n, []).append(t)
288 self.nodetagscache.setdefault(n, []).append(t)
289 return self.nodetagscache.get(node, [])
289 return self.nodetagscache.get(node, [])
290
290
291 def lookup(self, key):
291 def lookup(self, key):
292 try:
292 try:
293 return self.tags()[key]
293 return self.tags()[key]
294 except KeyError:
294 except KeyError:
295 if key == '.':
295 if key == '.':
296 key = self.dirstate.parents()[0]
296 key = self.dirstate.parents()[0]
297 if key == nullid:
297 if key == nullid:
298 raise repo.RepoError(_("no revision checked out"))
298 raise repo.RepoError(_("no revision checked out"))
299 try:
299 try:
300 return self.changelog.lookup(key)
300 return self.changelog.lookup(key)
301 except:
301 except:
302 raise repo.RepoError(_("unknown revision '%s'") % key)
302 raise repo.RepoError(_("unknown revision '%s'") % key)
303
303
304 def dev(self):
304 def dev(self):
305 return os.lstat(self.path).st_dev
305 return os.lstat(self.path).st_dev
306
306
307 def local(self):
307 def local(self):
308 return True
308 return True
309
309
310 def join(self, f):
310 def join(self, f):
311 return os.path.join(self.path, f)
311 return os.path.join(self.path, f)
312
312
313 def wjoin(self, f):
313 def wjoin(self, f):
314 return os.path.join(self.root, f)
314 return os.path.join(self.root, f)
315
315
316 def file(self, f):
316 def file(self, f):
317 if f[0] == '/':
317 if f[0] == '/':
318 f = f[1:]
318 f = f[1:]
319 return filelog.filelog(self.opener, f, self.revlogversion)
319 return filelog.filelog(self.opener, f, self.revlogversion)
320
320
321 def changectx(self, changeid=None):
321 def changectx(self, changeid=None):
322 return context.changectx(self, changeid)
322 return context.changectx(self, changeid)
323
323
324 def workingctx(self):
324 def workingctx(self):
325 return context.workingctx(self)
325 return context.workingctx(self)
326
326
327 def parents(self, changeid=None):
327 def parents(self, changeid=None):
328 '''
328 '''
329 get list of changectxs for parents of changeid or working directory
329 get list of changectxs for parents of changeid or working directory
330 '''
330 '''
331 if changeid is None:
331 if changeid is None:
332 pl = self.dirstate.parents()
332 pl = self.dirstate.parents()
333 else:
333 else:
334 n = self.changelog.lookup(changeid)
334 n = self.changelog.lookup(changeid)
335 pl = self.changelog.parents(n)
335 pl = self.changelog.parents(n)
336 if pl[1] == nullid:
336 if pl[1] == nullid:
337 return [self.changectx(pl[0])]
337 return [self.changectx(pl[0])]
338 return [self.changectx(pl[0]), self.changectx(pl[1])]
338 return [self.changectx(pl[0]), self.changectx(pl[1])]
339
339
340 def filectx(self, path, changeid=None, fileid=None):
340 def filectx(self, path, changeid=None, fileid=None):
341 """changeid can be a changeset revision, node, or tag.
341 """changeid can be a changeset revision, node, or tag.
342 fileid can be a file revision or node."""
342 fileid can be a file revision or node."""
343 return context.filectx(self, path, changeid, fileid)
343 return context.filectx(self, path, changeid, fileid)
344
344
345 def getcwd(self):
345 def getcwd(self):
346 return self.dirstate.getcwd()
346 return self.dirstate.getcwd()
347
347
348 def wfile(self, f, mode='r'):
348 def wfile(self, f, mode='r'):
349 return self.wopener(f, mode)
349 return self.wopener(f, mode)
350
350
351 def wread(self, filename):
351 def wread(self, filename):
352 if self.encodepats == None:
352 if self.encodepats == None:
353 l = []
353 l = []
354 for pat, cmd in self.ui.configitems("encode"):
354 for pat, cmd in self.ui.configitems("encode"):
355 mf = util.matcher(self.root, "", [pat], [], [])[1]
355 mf = util.matcher(self.root, "", [pat], [], [])[1]
356 l.append((mf, cmd))
356 l.append((mf, cmd))
357 self.encodepats = l
357 self.encodepats = l
358
358
359 data = self.wopener(filename, 'r').read()
359 data = self.wopener(filename, 'r').read()
360
360
361 for mf, cmd in self.encodepats:
361 for mf, cmd in self.encodepats:
362 if mf(filename):
362 if mf(filename):
363 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
363 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
364 data = util.filter(data, cmd)
364 data = util.filter(data, cmd)
365 break
365 break
366
366
367 return data
367 return data
368
368
369 def wwrite(self, filename, data, fd=None):
369 def wwrite(self, filename, data, fd=None):
370 if self.decodepats == None:
370 if self.decodepats == None:
371 l = []
371 l = []
372 for pat, cmd in self.ui.configitems("decode"):
372 for pat, cmd in self.ui.configitems("decode"):
373 mf = util.matcher(self.root, "", [pat], [], [])[1]
373 mf = util.matcher(self.root, "", [pat], [], [])[1]
374 l.append((mf, cmd))
374 l.append((mf, cmd))
375 self.decodepats = l
375 self.decodepats = l
376
376
377 for mf, cmd in self.decodepats:
377 for mf, cmd in self.decodepats:
378 if mf(filename):
378 if mf(filename):
379 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
379 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
380 data = util.filter(data, cmd)
380 data = util.filter(data, cmd)
381 break
381 break
382
382
383 if fd:
383 if fd:
384 return fd.write(data)
384 return fd.write(data)
385 return self.wopener(filename, 'w').write(data)
385 return self.wopener(filename, 'w').write(data)
386
386
387 def transaction(self):
387 def transaction(self):
388 tr = self.transhandle
388 tr = self.transhandle
389 if tr != None and tr.running():
389 if tr != None and tr.running():
390 return tr.nest()
390 return tr.nest()
391
391
392 # save dirstate for rollback
392 # save dirstate for rollback
393 try:
393 try:
394 ds = self.opener("dirstate").read()
394 ds = self.opener("dirstate").read()
395 except IOError:
395 except IOError:
396 ds = ""
396 ds = ""
397 self.opener("journal.dirstate", "w").write(ds)
397 self.opener("journal.dirstate", "w").write(ds)
398
398
399 tr = transaction.transaction(self.ui.warn, self.opener,
399 tr = transaction.transaction(self.ui.warn, self.opener,
400 self.join("journal"),
400 self.join("journal"),
401 aftertrans(self.path))
401 aftertrans(self.path))
402 self.transhandle = tr
402 self.transhandle = tr
403 return tr
403 return tr
404
404
405 def recover(self):
405 def recover(self):
406 l = self.lock()
406 l = self.lock()
407 if os.path.exists(self.join("journal")):
407 if os.path.exists(self.join("journal")):
408 self.ui.status(_("rolling back interrupted transaction\n"))
408 self.ui.status(_("rolling back interrupted transaction\n"))
409 transaction.rollback(self.opener, self.join("journal"))
409 transaction.rollback(self.opener, self.join("journal"))
410 self.reload()
410 self.reload()
411 return True
411 return True
412 else:
412 else:
413 self.ui.warn(_("no interrupted transaction available\n"))
413 self.ui.warn(_("no interrupted transaction available\n"))
414 return False
414 return False
415
415
416 def rollback(self, wlock=None):
416 def rollback(self, wlock=None):
417 if not wlock:
417 if not wlock:
418 wlock = self.wlock()
418 wlock = self.wlock()
419 l = self.lock()
419 l = self.lock()
420 if os.path.exists(self.join("undo")):
420 if os.path.exists(self.join("undo")):
421 self.ui.status(_("rolling back last transaction\n"))
421 self.ui.status(_("rolling back last transaction\n"))
422 transaction.rollback(self.opener, self.join("undo"))
422 transaction.rollback(self.opener, self.join("undo"))
423 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
423 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
424 self.reload()
424 self.reload()
425 self.wreload()
425 self.wreload()
426 else:
426 else:
427 self.ui.warn(_("no rollback information available\n"))
427 self.ui.warn(_("no rollback information available\n"))
428
428
429 def wreload(self):
429 def wreload(self):
430 self.dirstate.read()
430 self.dirstate.read()
431
431
432 def reload(self):
432 def reload(self):
433 self.changelog.load()
433 self.changelog.load()
434 self.manifest.load()
434 self.manifest.load()
435 self.tagscache = None
435 self.tagscache = None
436 self.nodetagscache = None
436 self.nodetagscache = None
437
437
438 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
438 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
439 desc=None):
439 desc=None):
440 try:
440 try:
441 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
441 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
442 except lock.LockHeld, inst:
442 except lock.LockHeld, inst:
443 if not wait:
443 if not wait:
444 raise
444 raise
445 self.ui.warn(_("waiting for lock on %s held by %s\n") %
445 self.ui.warn(_("waiting for lock on %s held by %s\n") %
446 (desc, inst.args[0]))
446 (desc, inst.args[0]))
447 # default to 600 seconds timeout
447 # default to 600 seconds timeout
448 l = lock.lock(self.join(lockname),
448 l = lock.lock(self.join(lockname),
449 int(self.ui.config("ui", "timeout") or 600),
449 int(self.ui.config("ui", "timeout") or 600),
450 releasefn, desc=desc)
450 releasefn, desc=desc)
451 if acquirefn:
451 if acquirefn:
452 acquirefn()
452 acquirefn()
453 return l
453 return l
454
454
455 def lock(self, wait=1):
455 def lock(self, wait=1):
456 return self.do_lock("lock", wait, acquirefn=self.reload,
456 return self.do_lock("lock", wait, acquirefn=self.reload,
457 desc=_('repository %s') % self.origroot)
457 desc=_('repository %s') % self.origroot)
458
458
459 def wlock(self, wait=1):
459 def wlock(self, wait=1):
460 return self.do_lock("wlock", wait, self.dirstate.write,
460 return self.do_lock("wlock", wait, self.dirstate.write,
461 self.wreload,
461 self.wreload,
462 desc=_('working directory of %s') % self.origroot)
462 desc=_('working directory of %s') % self.origroot)
463
463
464 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
464 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
465 """
465 """
466 commit an individual file as part of a larger transaction
466 commit an individual file as part of a larger transaction
467 """
467 """
468
468
469 t = self.wread(fn)
469 t = self.wread(fn)
470 fl = self.file(fn)
470 fl = self.file(fn)
471 fp1 = manifest1.get(fn, nullid)
471 fp1 = manifest1.get(fn, nullid)
472 fp2 = manifest2.get(fn, nullid)
472 fp2 = manifest2.get(fn, nullid)
473
473
474 meta = {}
474 meta = {}
475 cp = self.dirstate.copied(fn)
475 cp = self.dirstate.copied(fn)
476 if cp:
476 if cp:
477 meta["copy"] = cp
477 meta["copy"] = cp
478 if not manifest2: # not a branch merge
478 if not manifest2: # not a branch merge
479 meta["copyrev"] = hex(manifest1.get(cp, nullid))
479 meta["copyrev"] = hex(manifest1.get(cp, nullid))
480 fp2 = nullid
480 fp2 = nullid
481 elif fp2 != nullid: # copied on remote side
481 elif fp2 != nullid: # copied on remote side
482 meta["copyrev"] = hex(manifest1.get(cp, nullid))
482 meta["copyrev"] = hex(manifest1.get(cp, nullid))
483 else: # copied on local side, reversed
483 else: # copied on local side, reversed
484 meta["copyrev"] = hex(manifest2.get(cp))
484 meta["copyrev"] = hex(manifest2.get(cp))
485 fp2 = nullid
485 fp2 = nullid
486 self.ui.debug(_(" %s: copy %s:%s\n") %
486 self.ui.debug(_(" %s: copy %s:%s\n") %
487 (fn, cp, meta["copyrev"]))
487 (fn, cp, meta["copyrev"]))
488 fp1 = nullid
488 fp1 = nullid
489 elif fp2 != nullid:
489 elif fp2 != nullid:
490 # is one parent an ancestor of the other?
490 # is one parent an ancestor of the other?
491 fpa = fl.ancestor(fp1, fp2)
491 fpa = fl.ancestor(fp1, fp2)
492 if fpa == fp1:
492 if fpa == fp1:
493 fp1, fp2 = fp2, nullid
493 fp1, fp2 = fp2, nullid
494 elif fpa == fp2:
494 elif fpa == fp2:
495 fp2 = nullid
495 fp2 = nullid
496
496
497 # is the file unmodified from the parent? report existing entry
497 # is the file unmodified from the parent? report existing entry
498 if fp2 == nullid and not fl.cmp(fp1, t):
498 if fp2 == nullid and not fl.cmp(fp1, t):
499 return fp1
499 return fp1
500
500
501 changelist.append(fn)
501 changelist.append(fn)
502 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
502 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
503
503
504 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
504 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
505 orig_parent = self.dirstate.parents()[0] or nullid
505 orig_parent = self.dirstate.parents()[0] or nullid
506 p1 = p1 or self.dirstate.parents()[0] or nullid
506 p1 = p1 or self.dirstate.parents()[0] or nullid
507 p2 = p2 or self.dirstate.parents()[1] or nullid
507 p2 = p2 or self.dirstate.parents()[1] or nullid
508 c1 = self.changelog.read(p1)
508 c1 = self.changelog.read(p1)
509 c2 = self.changelog.read(p2)
509 c2 = self.changelog.read(p2)
510 m1 = self.manifest.read(c1[0]).copy()
510 m1 = self.manifest.read(c1[0]).copy()
511 m2 = self.manifest.read(c2[0])
511 m2 = self.manifest.read(c2[0])
512 changed = []
512 changed = []
513
513
514 if orig_parent == p1:
514 if orig_parent == p1:
515 update_dirstate = 1
515 update_dirstate = 1
516 else:
516 else:
517 update_dirstate = 0
517 update_dirstate = 0
518
518
519 if not wlock:
519 if not wlock:
520 wlock = self.wlock()
520 wlock = self.wlock()
521 l = self.lock()
521 l = self.lock()
522 tr = self.transaction()
522 tr = self.transaction()
523 linkrev = self.changelog.count()
523 linkrev = self.changelog.count()
524 for f in files:
524 for f in files:
525 try:
525 try:
526 m1[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
526 m1[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
527 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
527 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
528 except IOError:
528 except IOError:
529 try:
529 try:
530 del m1[f]
530 del m1[f]
531 if update_dirstate:
531 if update_dirstate:
532 self.dirstate.forget([f])
532 self.dirstate.forget([f])
533 except:
533 except:
534 # deleted from p2?
534 # deleted from p2?
535 pass
535 pass
536
536
537 mnode = self.manifest.add(m1, tr, linkrev, c1[0], c2[0])
537 mnode = self.manifest.add(m1, tr, linkrev, c1[0], c2[0])
538 user = user or self.ui.username()
538 user = user or self.ui.username()
539 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
539 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
540 tr.close()
540 tr.close()
541 if update_dirstate:
541 if update_dirstate:
542 self.dirstate.setparents(n, nullid)
542 self.dirstate.setparents(n, nullid)
543
543
544 def commit(self, files=None, text="", user=None, date=None,
544 def commit(self, files=None, text="", user=None, date=None,
545 match=util.always, force=False, lock=None, wlock=None,
545 match=util.always, force=False, lock=None, wlock=None,
546 force_editor=False):
546 force_editor=False):
547 commit = []
547 commit = []
548 remove = []
548 remove = []
549 changed = []
549 changed = []
550
550
551 if files:
551 if files:
552 for f in files:
552 for f in files:
553 s = self.dirstate.state(f)
553 s = self.dirstate.state(f)
554 if s in 'nmai':
554 if s in 'nmai':
555 commit.append(f)
555 commit.append(f)
556 elif s == 'r':
556 elif s == 'r':
557 remove.append(f)
557 remove.append(f)
558 else:
558 else:
559 self.ui.warn(_("%s not tracked!\n") % f)
559 self.ui.warn(_("%s not tracked!\n") % f)
560 else:
560 else:
561 modified, added, removed, deleted, unknown = self.status(match=match)[:5]
561 modified, added, removed, deleted, unknown = self.status(match=match)[:5]
562 commit = modified + added
562 commit = modified + added
563 remove = removed
563 remove = removed
564
564
565 p1, p2 = self.dirstate.parents()
565 p1, p2 = self.dirstate.parents()
566 c1 = self.changelog.read(p1)
566 c1 = self.changelog.read(p1)
567 c2 = self.changelog.read(p2)
567 c2 = self.changelog.read(p2)
568 m1 = self.manifest.read(c1[0]).copy()
568 m1 = self.manifest.read(c1[0]).copy()
569 m2 = self.manifest.read(c2[0])
569 m2 = self.manifest.read(c2[0])
570
570
571 if not commit and not remove and not force and p2 == nullid:
571 if not commit and not remove and not force and p2 == nullid:
572 self.ui.status(_("nothing changed\n"))
572 self.ui.status(_("nothing changed\n"))
573 return None
573 return None
574
574
575 xp1 = hex(p1)
575 xp1 = hex(p1)
576 if p2 == nullid: xp2 = ''
576 if p2 == nullid: xp2 = ''
577 else: xp2 = hex(p2)
577 else: xp2 = hex(p2)
578
578
579 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
579 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
580
580
581 if not wlock:
581 if not wlock:
582 wlock = self.wlock()
582 wlock = self.wlock()
583 if not lock:
583 if not lock:
584 lock = self.lock()
584 lock = self.lock()
585 tr = self.transaction()
585 tr = self.transaction()
586
586
587 # check in files
587 # check in files
588 new = {}
588 new = {}
589 linkrev = self.changelog.count()
589 linkrev = self.changelog.count()
590 commit.sort()
590 commit.sort()
591 for f in commit:
591 for f in commit:
592 self.ui.note(f + "\n")
592 self.ui.note(f + "\n")
593 try:
593 try:
594 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
594 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
595 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
595 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
596 except IOError:
596 except IOError:
597 self.ui.warn(_("trouble committing %s!\n") % f)
597 self.ui.warn(_("trouble committing %s!\n") % f)
598 raise
598 raise
599
599
600 # update manifest
600 # update manifest
601 m1.update(new)
601 m1.update(new)
602 for f in remove:
602 for f in remove:
603 if f in m1:
603 if f in m1:
604 del m1[f]
604 del m1[f]
605 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
605 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
606
606
607 # add changeset
607 # add changeset
608 new = new.keys()
608 new = new.keys()
609 new.sort()
609 new.sort()
610
610
611 user = user or self.ui.username()
611 user = user or self.ui.username()
612 if not text or force_editor:
612 if not text or force_editor:
613 edittext = []
613 edittext = []
614 if text:
614 if text:
615 edittext.append(text)
615 edittext.append(text)
616 edittext.append("")
616 edittext.append("")
617 if p2 != nullid:
617 if p2 != nullid:
618 edittext.append("HG: branch merge")
618 edittext.append("HG: branch merge")
619 edittext.extend(["HG: changed %s" % f for f in changed])
619 edittext.extend(["HG: changed %s" % f for f in changed])
620 edittext.extend(["HG: removed %s" % f for f in remove])
620 edittext.extend(["HG: removed %s" % f for f in remove])
621 if not changed and not remove:
621 if not changed and not remove:
622 edittext.append("HG: no files changed")
622 edittext.append("HG: no files changed")
623 edittext.append("")
623 edittext.append("")
624 # run editor in the repository root
624 # run editor in the repository root
625 olddir = os.getcwd()
625 olddir = os.getcwd()
626 os.chdir(self.root)
626 os.chdir(self.root)
627 text = self.ui.edit("\n".join(edittext), user)
627 text = self.ui.edit("\n".join(edittext), user)
628 os.chdir(olddir)
628 os.chdir(olddir)
629
629
630 lines = [line.rstrip() for line in text.rstrip().splitlines()]
630 lines = [line.rstrip() for line in text.rstrip().splitlines()]
631 while lines and not lines[0]:
631 while lines and not lines[0]:
632 del lines[0]
632 del lines[0]
633 if not lines:
633 if not lines:
634 return None
634 return None
635 text = '\n'.join(lines)
635 text = '\n'.join(lines)
636 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
636 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
637 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
637 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
638 parent2=xp2)
638 parent2=xp2)
639 tr.close()
639 tr.close()
640
640
641 self.dirstate.setparents(n)
641 self.dirstate.setparents(n)
642 self.dirstate.update(new, "n")
642 self.dirstate.update(new, "n")
643 self.dirstate.forget(remove)
643 self.dirstate.forget(remove)
644
644
645 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
645 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
646 return n
646 return n
647
647
648 def walk(self, node=None, files=[], match=util.always, badmatch=None):
648 def walk(self, node=None, files=[], match=util.always, badmatch=None):
649 if node:
649 if node:
650 fdict = dict.fromkeys(files)
650 fdict = dict.fromkeys(files)
651 for fn in self.manifest.read(self.changelog.read(node)[0]):
651 for fn in self.manifest.read(self.changelog.read(node)[0]):
652 for ffn in fdict:
652 for ffn in fdict:
653 # match if the file is the exact name or a directory
653 # match if the file is the exact name or a directory
654 if ffn == fn or fn.startswith("%s/" % ffn):
654 if ffn == fn or fn.startswith("%s/" % ffn):
655 del fdict[ffn]
655 del fdict[ffn]
656 break
656 break
657 if match(fn):
657 if match(fn):
658 yield 'm', fn
658 yield 'm', fn
659 for fn in fdict:
659 for fn in fdict:
660 if badmatch and badmatch(fn):
660 if badmatch and badmatch(fn):
661 if match(fn):
661 if match(fn):
662 yield 'b', fn
662 yield 'b', fn
663 else:
663 else:
664 self.ui.warn(_('%s: No such file in rev %s\n') % (
664 self.ui.warn(_('%s: No such file in rev %s\n') % (
665 util.pathto(self.getcwd(), fn), short(node)))
665 util.pathto(self.getcwd(), fn), short(node)))
666 else:
666 else:
667 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
667 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
668 yield src, fn
668 yield src, fn
669
669
670 def status(self, node1=None, node2=None, files=[], match=util.always,
670 def status(self, node1=None, node2=None, files=[], match=util.always,
671 wlock=None, list_ignored=False, list_clean=False):
671 wlock=None, list_ignored=False, list_clean=False):
672 """return status of files between two nodes or node and working directory
672 """return status of files between two nodes or node and working directory
673
673
674 If node1 is None, use the first dirstate parent instead.
674 If node1 is None, use the first dirstate parent instead.
675 If node2 is None, compare node1 with working directory.
675 If node2 is None, compare node1 with working directory.
676 """
676 """
677
677
678 def fcmp(fn, mf):
678 def fcmp(fn, mf):
679 t1 = self.wread(fn)
679 t1 = self.wread(fn)
680 return self.file(fn).cmp(mf.get(fn, nullid), t1)
680 return self.file(fn).cmp(mf.get(fn, nullid), t1)
681
681
682 def mfmatches(node):
682 def mfmatches(node):
683 change = self.changelog.read(node)
683 change = self.changelog.read(node)
684 mf = self.manifest.read(change[0]).copy()
684 mf = self.manifest.read(change[0]).copy()
685 for fn in mf.keys():
685 for fn in mf.keys():
686 if not match(fn):
686 if not match(fn):
687 del mf[fn]
687 del mf[fn]
688 return mf
688 return mf
689
689
690 modified, added, removed, deleted, unknown = [], [], [], [], []
690 modified, added, removed, deleted, unknown = [], [], [], [], []
691 ignored, clean = [], []
691 ignored, clean = [], []
692
692
693 compareworking = False
693 compareworking = False
694 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
694 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
695 compareworking = True
695 compareworking = True
696
696
697 if not compareworking:
697 if not compareworking:
698 # read the manifest from node1 before the manifest from node2,
698 # read the manifest from node1 before the manifest from node2,
699 # so that we'll hit the manifest cache if we're going through
699 # so that we'll hit the manifest cache if we're going through
700 # all the revisions in parent->child order.
700 # all the revisions in parent->child order.
701 mf1 = mfmatches(node1)
701 mf1 = mfmatches(node1)
702
702
703 # are we comparing the working directory?
703 # are we comparing the working directory?
704 if not node2:
704 if not node2:
705 if not wlock:
705 if not wlock:
706 try:
706 try:
707 wlock = self.wlock(wait=0)
707 wlock = self.wlock(wait=0)
708 except lock.LockException:
708 except lock.LockException:
709 wlock = None
709 wlock = None
710 (lookup, modified, added, removed, deleted, unknown,
710 (lookup, modified, added, removed, deleted, unknown,
711 ignored, clean) = self.dirstate.status(files, match,
711 ignored, clean) = self.dirstate.status(files, match,
712 list_ignored, list_clean)
712 list_ignored, list_clean)
713
713
714 # are we comparing working dir against its parent?
714 # are we comparing working dir against its parent?
715 if compareworking:
715 if compareworking:
716 if lookup:
716 if lookup:
717 # do a full compare of any files that might have changed
717 # do a full compare of any files that might have changed
718 mf2 = mfmatches(self.dirstate.parents()[0])
718 mf2 = mfmatches(self.dirstate.parents()[0])
719 for f in lookup:
719 for f in lookup:
720 if fcmp(f, mf2):
720 if fcmp(f, mf2):
721 modified.append(f)
721 modified.append(f)
722 else:
722 else:
723 clean.append(f)
723 clean.append(f)
724 if wlock is not None:
724 if wlock is not None:
725 self.dirstate.update([f], "n")
725 self.dirstate.update([f], "n")
726 else:
726 else:
727 # we are comparing working dir against non-parent
727 # we are comparing working dir against non-parent
728 # generate a pseudo-manifest for the working dir
728 # generate a pseudo-manifest for the working dir
729 # XXX: create it in dirstate.py ?
729 # XXX: create it in dirstate.py ?
730 mf2 = mfmatches(self.dirstate.parents()[0])
730 mf2 = mfmatches(self.dirstate.parents()[0])
731 for f in lookup + modified + added:
731 for f in lookup + modified + added:
732 mf2[f] = ""
732 mf2[f] = ""
733 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
733 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
734 for f in removed:
734 for f in removed:
735 if f in mf2:
735 if f in mf2:
736 del mf2[f]
736 del mf2[f]
737 else:
737 else:
738 # we are comparing two revisions
738 # we are comparing two revisions
739 mf2 = mfmatches(node2)
739 mf2 = mfmatches(node2)
740
740
741 if not compareworking:
741 if not compareworking:
742 # flush lists from dirstate before comparing manifests
742 # flush lists from dirstate before comparing manifests
743 modified, added, clean = [], [], []
743 modified, added, clean = [], [], []
744
744
745 # make sure to sort the files so we talk to the disk in a
745 # make sure to sort the files so we talk to the disk in a
746 # reasonable order
746 # reasonable order
747 mf2keys = mf2.keys()
747 mf2keys = mf2.keys()
748 mf2keys.sort()
748 mf2keys.sort()
749 for fn in mf2keys:
749 for fn in mf2keys:
750 if mf1.has_key(fn):
750 if mf1.has_key(fn):
751 if mf1.flags(fn) != mf2.flags(fn) or \
751 if mf1.flags(fn) != mf2.flags(fn) or \
752 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
752 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
753 modified.append(fn)
753 modified.append(fn)
754 elif list_clean:
754 elif list_clean:
755 clean.append(fn)
755 clean.append(fn)
756 del mf1[fn]
756 del mf1[fn]
757 else:
757 else:
758 added.append(fn)
758 added.append(fn)
759
759
760 removed = mf1.keys()
760 removed = mf1.keys()
761
761
762 # sort and return results:
762 # sort and return results:
763 for l in modified, added, removed, deleted, unknown, ignored, clean:
763 for l in modified, added, removed, deleted, unknown, ignored, clean:
764 l.sort()
764 l.sort()
765 return (modified, added, removed, deleted, unknown, ignored, clean)
765 return (modified, added, removed, deleted, unknown, ignored, clean)
766
766
767 def add(self, list, wlock=None):
767 def add(self, list, wlock=None):
768 if not wlock:
768 if not wlock:
769 wlock = self.wlock()
769 wlock = self.wlock()
770 for f in list:
770 for f in list:
771 p = self.wjoin(f)
771 p = self.wjoin(f)
772 if not os.path.exists(p):
772 if not os.path.exists(p):
773 self.ui.warn(_("%s does not exist!\n") % f)
773 self.ui.warn(_("%s does not exist!\n") % f)
774 elif not os.path.isfile(p):
774 elif not os.path.isfile(p):
775 self.ui.warn(_("%s not added: only files supported currently\n")
775 self.ui.warn(_("%s not added: only files supported currently\n")
776 % f)
776 % f)
777 elif self.dirstate.state(f) in 'an':
777 elif self.dirstate.state(f) in 'an':
778 self.ui.warn(_("%s already tracked!\n") % f)
778 self.ui.warn(_("%s already tracked!\n") % f)
779 else:
779 else:
780 self.dirstate.update([f], "a")
780 self.dirstate.update([f], "a")
781
781
782 def forget(self, list, wlock=None):
782 def forget(self, list, wlock=None):
783 if not wlock:
783 if not wlock:
784 wlock = self.wlock()
784 wlock = self.wlock()
785 for f in list:
785 for f in list:
786 if self.dirstate.state(f) not in 'ai':
786 if self.dirstate.state(f) not in 'ai':
787 self.ui.warn(_("%s not added!\n") % f)
787 self.ui.warn(_("%s not added!\n") % f)
788 else:
788 else:
789 self.dirstate.forget([f])
789 self.dirstate.forget([f])
790
790
791 def remove(self, list, unlink=False, wlock=None):
791 def remove(self, list, unlink=False, wlock=None):
792 if unlink:
792 if unlink:
793 for f in list:
793 for f in list:
794 try:
794 try:
795 util.unlink(self.wjoin(f))
795 util.unlink(self.wjoin(f))
796 except OSError, inst:
796 except OSError, inst:
797 if inst.errno != errno.ENOENT:
797 if inst.errno != errno.ENOENT:
798 raise
798 raise
799 if not wlock:
799 if not wlock:
800 wlock = self.wlock()
800 wlock = self.wlock()
801 for f in list:
801 for f in list:
802 p = self.wjoin(f)
802 p = self.wjoin(f)
803 if os.path.exists(p):
803 if os.path.exists(p):
804 self.ui.warn(_("%s still exists!\n") % f)
804 self.ui.warn(_("%s still exists!\n") % f)
805 elif self.dirstate.state(f) == 'a':
805 elif self.dirstate.state(f) == 'a':
806 self.dirstate.forget([f])
806 self.dirstate.forget([f])
807 elif f not in self.dirstate:
807 elif f not in self.dirstate:
808 self.ui.warn(_("%s not tracked!\n") % f)
808 self.ui.warn(_("%s not tracked!\n") % f)
809 else:
809 else:
810 self.dirstate.update([f], "r")
810 self.dirstate.update([f], "r")
811
811
812 def undelete(self, list, wlock=None):
812 def undelete(self, list, wlock=None):
813 p = self.dirstate.parents()[0]
813 p = self.dirstate.parents()[0]
814 mn = self.changelog.read(p)[0]
814 mn = self.changelog.read(p)[0]
815 m = self.manifest.read(mn)
815 m = self.manifest.read(mn)
816 if not wlock:
816 if not wlock:
817 wlock = self.wlock()
817 wlock = self.wlock()
818 for f in list:
818 for f in list:
819 if self.dirstate.state(f) not in "r":
819 if self.dirstate.state(f) not in "r":
820 self.ui.warn("%s not removed!\n" % f)
820 self.ui.warn("%s not removed!\n" % f)
821 else:
821 else:
822 t = self.file(f).read(m[f])
822 t = self.file(f).read(m[f])
823 self.wwrite(f, t)
823 self.wwrite(f, t)
824 util.set_exec(self.wjoin(f), m.execf(f))
824 util.set_exec(self.wjoin(f), m.execf(f))
825 self.dirstate.update([f], "n")
825 self.dirstate.update([f], "n")
826
826
827 def copy(self, source, dest, wlock=None):
827 def copy(self, source, dest, wlock=None):
828 p = self.wjoin(dest)
828 p = self.wjoin(dest)
829 if not os.path.exists(p):
829 if not os.path.exists(p):
830 self.ui.warn(_("%s does not exist!\n") % dest)
830 self.ui.warn(_("%s does not exist!\n") % dest)
831 elif not os.path.isfile(p):
831 elif not os.path.isfile(p):
832 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
832 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
833 else:
833 else:
834 if not wlock:
834 if not wlock:
835 wlock = self.wlock()
835 wlock = self.wlock()
836 if self.dirstate.state(dest) == '?':
836 if self.dirstate.state(dest) == '?':
837 self.dirstate.update([dest], "a")
837 self.dirstate.update([dest], "a")
838 self.dirstate.copy(source, dest)
838 self.dirstate.copy(source, dest)
839
839
840 def heads(self, start=None):
840 def heads(self, start=None):
841 heads = self.changelog.heads(start)
841 heads = self.changelog.heads(start)
842 # sort the output in rev descending order
842 # sort the output in rev descending order
843 heads = [(-self.changelog.rev(h), h) for h in heads]
843 heads = [(-self.changelog.rev(h), h) for h in heads]
844 heads.sort()
844 heads.sort()
845 return [n for (r, n) in heads]
845 return [n for (r, n) in heads]
846
846
847 # branchlookup returns a dict giving a list of branches for
847 # branchlookup returns a dict giving a list of branches for
848 # each head. A branch is defined as the tag of a node or
848 # each head. A branch is defined as the tag of a node or
849 # the branch of the node's parents. If a node has multiple
849 # the branch of the node's parents. If a node has multiple
850 # branch tags, tags are eliminated if they are visible from other
850 # branch tags, tags are eliminated if they are visible from other
851 # branch tags.
851 # branch tags.
852 #
852 #
853 # So, for this graph: a->b->c->d->e
853 # So, for this graph: a->b->c->d->e
854 # \ /
854 # \ /
855 # aa -----/
855 # aa -----/
856 # a has tag 2.6.12
856 # a has tag 2.6.12
857 # d has tag 2.6.13
857 # d has tag 2.6.13
858 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
858 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
859 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
859 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
860 # from the list.
860 # from the list.
861 #
861 #
862 # It is possible that more than one head will have the same branch tag.
862 # It is possible that more than one head will have the same branch tag.
863 # callers need to check the result for multiple heads under the same
863 # callers need to check the result for multiple heads under the same
864 # branch tag if that is a problem for them (ie checkout of a specific
864 # branch tag if that is a problem for them (ie checkout of a specific
865 # branch).
865 # branch).
866 #
866 #
867 # passing in a specific branch will limit the depth of the search
867 # passing in a specific branch will limit the depth of the search
868 # through the parents. It won't limit the branches returned in the
868 # through the parents. It won't limit the branches returned in the
869 # result though.
869 # result though.
870 def branchlookup(self, heads=None, branch=None):
870 def branchlookup(self, heads=None, branch=None):
871 if not heads:
871 if not heads:
872 heads = self.heads()
872 heads = self.heads()
873 headt = [ h for h in heads ]
873 headt = [ h for h in heads ]
874 chlog = self.changelog
874 chlog = self.changelog
875 branches = {}
875 branches = {}
876 merges = []
876 merges = []
877 seenmerge = {}
877 seenmerge = {}
878
878
879 # traverse the tree once for each head, recording in the branches
879 # traverse the tree once for each head, recording in the branches
880 # dict which tags are visible from this head. The branches
880 # dict which tags are visible from this head. The branches
881 # dict also records which tags are visible from each tag
881 # dict also records which tags are visible from each tag
882 # while we traverse.
882 # while we traverse.
883 while headt or merges:
883 while headt or merges:
884 if merges:
884 if merges:
885 n, found = merges.pop()
885 n, found = merges.pop()
886 visit = [n]
886 visit = [n]
887 else:
887 else:
888 h = headt.pop()
888 h = headt.pop()
889 visit = [h]
889 visit = [h]
890 found = [h]
890 found = [h]
891 seen = {}
891 seen = {}
892 while visit:
892 while visit:
893 n = visit.pop()
893 n = visit.pop()
894 if n in seen:
894 if n in seen:
895 continue
895 continue
896 pp = chlog.parents(n)
896 pp = chlog.parents(n)
897 tags = self.nodetags(n)
897 tags = self.nodetags(n)
898 if tags:
898 if tags:
899 for x in tags:
899 for x in tags:
900 if x == 'tip':
900 if x == 'tip':
901 continue
901 continue
902 for f in found:
902 for f in found:
903 branches.setdefault(f, {})[n] = 1
903 branches.setdefault(f, {})[n] = 1
904 branches.setdefault(n, {})[n] = 1
904 branches.setdefault(n, {})[n] = 1
905 break
905 break
906 if n not in found:
906 if n not in found:
907 found.append(n)
907 found.append(n)
908 if branch in tags:
908 if branch in tags:
909 continue
909 continue
910 seen[n] = 1
910 seen[n] = 1
911 if pp[1] != nullid and n not in seenmerge:
911 if pp[1] != nullid and n not in seenmerge:
912 merges.append((pp[1], [x for x in found]))
912 merges.append((pp[1], [x for x in found]))
913 seenmerge[n] = 1
913 seenmerge[n] = 1
914 if pp[0] != nullid:
914 if pp[0] != nullid:
915 visit.append(pp[0])
915 visit.append(pp[0])
916 # traverse the branches dict, eliminating branch tags from each
916 # traverse the branches dict, eliminating branch tags from each
917 # head that are visible from another branch tag for that head.
917 # head that are visible from another branch tag for that head.
918 out = {}
918 out = {}
919 viscache = {}
919 viscache = {}
920 for h in heads:
920 for h in heads:
921 def visible(node):
921 def visible(node):
922 if node in viscache:
922 if node in viscache:
923 return viscache[node]
923 return viscache[node]
924 ret = {}
924 ret = {}
925 visit = [node]
925 visit = [node]
926 while visit:
926 while visit:
927 x = visit.pop()
927 x = visit.pop()
928 if x in viscache:
928 if x in viscache:
929 ret.update(viscache[x])
929 ret.update(viscache[x])
930 elif x not in ret:
930 elif x not in ret:
931 ret[x] = 1
931 ret[x] = 1
932 if x in branches:
932 if x in branches:
933 visit[len(visit):] = branches[x].keys()
933 visit[len(visit):] = branches[x].keys()
934 viscache[node] = ret
934 viscache[node] = ret
935 return ret
935 return ret
936 if h not in branches:
936 if h not in branches:
937 continue
937 continue
938 # O(n^2), but somewhat limited. This only searches the
938 # O(n^2), but somewhat limited. This only searches the
939 # tags visible from a specific head, not all the tags in the
939 # tags visible from a specific head, not all the tags in the
940 # whole repo.
940 # whole repo.
941 for b in branches[h]:
941 for b in branches[h]:
942 vis = False
942 vis = False
943 for bb in branches[h].keys():
943 for bb in branches[h].keys():
944 if b != bb:
944 if b != bb:
945 if b in visible(bb):
945 if b in visible(bb):
946 vis = True
946 vis = True
947 break
947 break
948 if not vis:
948 if not vis:
949 l = out.setdefault(h, [])
949 l = out.setdefault(h, [])
950 l[len(l):] = self.nodetags(b)
950 l[len(l):] = self.nodetags(b)
951 return out
951 return out
952
952
953 def branches(self, nodes):
953 def branches(self, nodes):
954 if not nodes:
954 if not nodes:
955 nodes = [self.changelog.tip()]
955 nodes = [self.changelog.tip()]
956 b = []
956 b = []
957 for n in nodes:
957 for n in nodes:
958 t = n
958 t = n
959 while 1:
959 while 1:
960 p = self.changelog.parents(n)
960 p = self.changelog.parents(n)
961 if p[1] != nullid or p[0] == nullid:
961 if p[1] != nullid or p[0] == nullid:
962 b.append((t, n, p[0], p[1]))
962 b.append((t, n, p[0], p[1]))
963 break
963 break
964 n = p[0]
964 n = p[0]
965 return b
965 return b
966
966
967 def between(self, pairs):
967 def between(self, pairs):
968 r = []
968 r = []
969
969
970 for top, bottom in pairs:
970 for top, bottom in pairs:
971 n, l, i = top, [], 0
971 n, l, i = top, [], 0
972 f = 1
972 f = 1
973
973
974 while n != bottom:
974 while n != bottom:
975 p = self.changelog.parents(n)[0]
975 p = self.changelog.parents(n)[0]
976 if i == f:
976 if i == f:
977 l.append(n)
977 l.append(n)
978 f = f * 2
978 f = f * 2
979 n = p
979 n = p
980 i += 1
980 i += 1
981
981
982 r.append(l)
982 r.append(l)
983
983
984 return r
984 return r
985
985
986 def findincoming(self, remote, base=None, heads=None, force=False):
986 def findincoming(self, remote, base=None, heads=None, force=False):
987 """Return list of roots of the subsets of missing nodes from remote
987 """Return list of roots of the subsets of missing nodes from remote
988
988
989 If base dict is specified, assume that these nodes and their parents
989 If base dict is specified, assume that these nodes and their parents
990 exist on the remote side and that no child of a node of base exists
990 exist on the remote side and that no child of a node of base exists
991 in both remote and self.
991 in both remote and self.
992 Furthermore base will be updated to include the nodes that exists
992 Furthermore base will be updated to include the nodes that exists
993 in self and remote but no children exists in self and remote.
993 in self and remote but no children exists in self and remote.
994 If a list of heads is specified, return only nodes which are heads
994 If a list of heads is specified, return only nodes which are heads
995 or ancestors of these heads.
995 or ancestors of these heads.
996
996
997 All the ancestors of base are in self and in remote.
997 All the ancestors of base are in self and in remote.
998 All the descendants of the list returned are missing in self.
998 All the descendants of the list returned are missing in self.
999 (and so we know that the rest of the nodes are missing in remote, see
999 (and so we know that the rest of the nodes are missing in remote, see
1000 outgoing)
1000 outgoing)
1001 """
1001 """
1002 m = self.changelog.nodemap
1002 m = self.changelog.nodemap
1003 search = []
1003 search = []
1004 fetch = {}
1004 fetch = {}
1005 seen = {}
1005 seen = {}
1006 seenbranch = {}
1006 seenbranch = {}
1007 if base == None:
1007 if base == None:
1008 base = {}
1008 base = {}
1009
1009
1010 if not heads:
1010 if not heads:
1011 heads = remote.heads()
1011 heads = remote.heads()
1012
1012
1013 if self.changelog.tip() == nullid:
1013 if self.changelog.tip() == nullid:
1014 base[nullid] = 1
1014 base[nullid] = 1
1015 if heads != [nullid]:
1015 if heads != [nullid]:
1016 return [nullid]
1016 return [nullid]
1017 return []
1017 return []
1018
1018
1019 # assume we're closer to the tip than the root
1019 # assume we're closer to the tip than the root
1020 # and start by examining the heads
1020 # and start by examining the heads
1021 self.ui.status(_("searching for changes\n"))
1021 self.ui.status(_("searching for changes\n"))
1022
1022
1023 unknown = []
1023 unknown = []
1024 for h in heads:
1024 for h in heads:
1025 if h not in m:
1025 if h not in m:
1026 unknown.append(h)
1026 unknown.append(h)
1027 else:
1027 else:
1028 base[h] = 1
1028 base[h] = 1
1029
1029
1030 if not unknown:
1030 if not unknown:
1031 return []
1031 return []
1032
1032
1033 req = dict.fromkeys(unknown)
1033 req = dict.fromkeys(unknown)
1034 reqcnt = 0
1034 reqcnt = 0
1035
1035
1036 # search through remote branches
1036 # search through remote branches
1037 # a 'branch' here is a linear segment of history, with four parts:
1037 # a 'branch' here is a linear segment of history, with four parts:
1038 # head, root, first parent, second parent
1038 # head, root, first parent, second parent
1039 # (a branch always has two parents (or none) by definition)
1039 # (a branch always has two parents (or none) by definition)
1040 unknown = remote.branches(unknown)
1040 unknown = remote.branches(unknown)
1041 while unknown:
1041 while unknown:
1042 r = []
1042 r = []
1043 while unknown:
1043 while unknown:
1044 n = unknown.pop(0)
1044 n = unknown.pop(0)
1045 if n[0] in seen:
1045 if n[0] in seen:
1046 continue
1046 continue
1047
1047
1048 self.ui.debug(_("examining %s:%s\n")
1048 self.ui.debug(_("examining %s:%s\n")
1049 % (short(n[0]), short(n[1])))
1049 % (short(n[0]), short(n[1])))
1050 if n[0] == nullid: # found the end of the branch
1050 if n[0] == nullid: # found the end of the branch
1051 pass
1051 pass
1052 elif n in seenbranch:
1052 elif n in seenbranch:
1053 self.ui.debug(_("branch already found\n"))
1053 self.ui.debug(_("branch already found\n"))
1054 continue
1054 continue
1055 elif n[1] and n[1] in m: # do we know the base?
1055 elif n[1] and n[1] in m: # do we know the base?
1056 self.ui.debug(_("found incomplete branch %s:%s\n")
1056 self.ui.debug(_("found incomplete branch %s:%s\n")
1057 % (short(n[0]), short(n[1])))
1057 % (short(n[0]), short(n[1])))
1058 search.append(n) # schedule branch range for scanning
1058 search.append(n) # schedule branch range for scanning
1059 seenbranch[n] = 1
1059 seenbranch[n] = 1
1060 else:
1060 else:
1061 if n[1] not in seen and n[1] not in fetch:
1061 if n[1] not in seen and n[1] not in fetch:
1062 if n[2] in m and n[3] in m:
1062 if n[2] in m and n[3] in m:
1063 self.ui.debug(_("found new changeset %s\n") %
1063 self.ui.debug(_("found new changeset %s\n") %
1064 short(n[1]))
1064 short(n[1]))
1065 fetch[n[1]] = 1 # earliest unknown
1065 fetch[n[1]] = 1 # earliest unknown
1066 for p in n[2:4]:
1066 for p in n[2:4]:
1067 if p in m:
1067 if p in m:
1068 base[p] = 1 # latest known
1068 base[p] = 1 # latest known
1069
1069
1070 for p in n[2:4]:
1070 for p in n[2:4]:
1071 if p not in req and p not in m:
1071 if p not in req and p not in m:
1072 r.append(p)
1072 r.append(p)
1073 req[p] = 1
1073 req[p] = 1
1074 seen[n[0]] = 1
1074 seen[n[0]] = 1
1075
1075
1076 if r:
1076 if r:
1077 reqcnt += 1
1077 reqcnt += 1
1078 self.ui.debug(_("request %d: %s\n") %
1078 self.ui.debug(_("request %d: %s\n") %
1079 (reqcnt, " ".join(map(short, r))))
1079 (reqcnt, " ".join(map(short, r))))
1080 for p in range(0, len(r), 10):
1080 for p in range(0, len(r), 10):
1081 for b in remote.branches(r[p:p+10]):
1081 for b in remote.branches(r[p:p+10]):
1082 self.ui.debug(_("received %s:%s\n") %
1082 self.ui.debug(_("received %s:%s\n") %
1083 (short(b[0]), short(b[1])))
1083 (short(b[0]), short(b[1])))
1084 unknown.append(b)
1084 unknown.append(b)
1085
1085
1086 # do binary search on the branches we found
1086 # do binary search on the branches we found
1087 while search:
1087 while search:
1088 n = search.pop(0)
1088 n = search.pop(0)
1089 reqcnt += 1
1089 reqcnt += 1
1090 l = remote.between([(n[0], n[1])])[0]
1090 l = remote.between([(n[0], n[1])])[0]
1091 l.append(n[1])
1091 l.append(n[1])
1092 p = n[0]
1092 p = n[0]
1093 f = 1
1093 f = 1
1094 for i in l:
1094 for i in l:
1095 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1095 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1096 if i in m:
1096 if i in m:
1097 if f <= 2:
1097 if f <= 2:
1098 self.ui.debug(_("found new branch changeset %s\n") %
1098 self.ui.debug(_("found new branch changeset %s\n") %
1099 short(p))
1099 short(p))
1100 fetch[p] = 1
1100 fetch[p] = 1
1101 base[i] = 1
1101 base[i] = 1
1102 else:
1102 else:
1103 self.ui.debug(_("narrowed branch search to %s:%s\n")
1103 self.ui.debug(_("narrowed branch search to %s:%s\n")
1104 % (short(p), short(i)))
1104 % (short(p), short(i)))
1105 search.append((p, i))
1105 search.append((p, i))
1106 break
1106 break
1107 p, f = i, f * 2
1107 p, f = i, f * 2
1108
1108
1109 # sanity check our fetch list
1109 # sanity check our fetch list
1110 for f in fetch.keys():
1110 for f in fetch.keys():
1111 if f in m:
1111 if f in m:
1112 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1112 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1113
1113
1114 if base.keys() == [nullid]:
1114 if base.keys() == [nullid]:
1115 if force:
1115 if force:
1116 self.ui.warn(_("warning: repository is unrelated\n"))
1116 self.ui.warn(_("warning: repository is unrelated\n"))
1117 else:
1117 else:
1118 raise util.Abort(_("repository is unrelated"))
1118 raise util.Abort(_("repository is unrelated"))
1119
1119
1120 self.ui.debug(_("found new changesets starting at ") +
1120 self.ui.debug(_("found new changesets starting at ") +
1121 " ".join([short(f) for f in fetch]) + "\n")
1121 " ".join([short(f) for f in fetch]) + "\n")
1122
1122
1123 self.ui.debug(_("%d total queries\n") % reqcnt)
1123 self.ui.debug(_("%d total queries\n") % reqcnt)
1124
1124
1125 return fetch.keys()
1125 return fetch.keys()
1126
1126
1127 def findoutgoing(self, remote, base=None, heads=None, force=False):
1127 def findoutgoing(self, remote, base=None, heads=None, force=False):
1128 """Return list of nodes that are roots of subsets not in remote
1128 """Return list of nodes that are roots of subsets not in remote
1129
1129
1130 If base dict is specified, assume that these nodes and their parents
1130 If base dict is specified, assume that these nodes and their parents
1131 exist on the remote side.
1131 exist on the remote side.
1132 If a list of heads is specified, return only nodes which are heads
1132 If a list of heads is specified, return only nodes which are heads
1133 or ancestors of these heads, and return a second element which
1133 or ancestors of these heads, and return a second element which
1134 contains all remote heads which get new children.
1134 contains all remote heads which get new children.
1135 """
1135 """
1136 if base == None:
1136 if base == None:
1137 base = {}
1137 base = {}
1138 self.findincoming(remote, base, heads, force=force)
1138 self.findincoming(remote, base, heads, force=force)
1139
1139
1140 self.ui.debug(_("common changesets up to ")
1140 self.ui.debug(_("common changesets up to ")
1141 + " ".join(map(short, base.keys())) + "\n")
1141 + " ".join(map(short, base.keys())) + "\n")
1142
1142
1143 remain = dict.fromkeys(self.changelog.nodemap)
1143 remain = dict.fromkeys(self.changelog.nodemap)
1144
1144
1145 # prune everything remote has from the tree
1145 # prune everything remote has from the tree
1146 del remain[nullid]
1146 del remain[nullid]
1147 remove = base.keys()
1147 remove = base.keys()
1148 while remove:
1148 while remove:
1149 n = remove.pop(0)
1149 n = remove.pop(0)
1150 if n in remain:
1150 if n in remain:
1151 del remain[n]
1151 del remain[n]
1152 for p in self.changelog.parents(n):
1152 for p in self.changelog.parents(n):
1153 remove.append(p)
1153 remove.append(p)
1154
1154
1155 # find every node whose parents have been pruned
1155 # find every node whose parents have been pruned
1156 subset = []
1156 subset = []
1157 # find every remote head that will get new children
1157 # find every remote head that will get new children
1158 updated_heads = {}
1158 updated_heads = {}
1159 for n in remain:
1159 for n in remain:
1160 p1, p2 = self.changelog.parents(n)
1160 p1, p2 = self.changelog.parents(n)
1161 if p1 not in remain and p2 not in remain:
1161 if p1 not in remain and p2 not in remain:
1162 subset.append(n)
1162 subset.append(n)
1163 if heads:
1163 if heads:
1164 if p1 in heads:
1164 if p1 in heads:
1165 updated_heads[p1] = True
1165 updated_heads[p1] = True
1166 if p2 in heads:
1166 if p2 in heads:
1167 updated_heads[p2] = True
1167 updated_heads[p2] = True
1168
1168
1169 # this is the set of all roots we have to push
1169 # this is the set of all roots we have to push
1170 if heads:
1170 if heads:
1171 return subset, updated_heads.keys()
1171 return subset, updated_heads.keys()
1172 else:
1172 else:
1173 return subset
1173 return subset
1174
1174
1175 def pull(self, remote, heads=None, force=False, lock=None):
1175 def pull(self, remote, heads=None, force=False, lock=None):
1176 mylock = False
1176 mylock = False
1177 if not lock:
1177 if not lock:
1178 lock = self.lock()
1178 lock = self.lock()
1179 mylock = True
1179 mylock = True
1180
1180
1181 try:
1181 try:
1182 fetch = self.findincoming(remote, force=force)
1182 fetch = self.findincoming(remote, force=force)
1183 if fetch == [nullid]:
1183 if fetch == [nullid]:
1184 self.ui.status(_("requesting all changes\n"))
1184 self.ui.status(_("requesting all changes\n"))
1185
1185
1186 if not fetch:
1186 if not fetch:
1187 self.ui.status(_("no changes found\n"))
1187 self.ui.status(_("no changes found\n"))
1188 return 0
1188 return 0
1189
1189
1190 if heads is None:
1190 if heads is None:
1191 cg = remote.changegroup(fetch, 'pull')
1191 cg = remote.changegroup(fetch, 'pull')
1192 else:
1192 else:
1193 cg = remote.changegroupsubset(fetch, heads, 'pull')
1193 cg = remote.changegroupsubset(fetch, heads, 'pull')
1194 return self.addchangegroup(cg, 'pull', remote.url())
1194 return self.addchangegroup(cg, 'pull', remote.url())
1195 finally:
1195 finally:
1196 if mylock:
1196 if mylock:
1197 lock.release()
1197 lock.release()
1198
1198
1199 def push(self, remote, force=False, revs=None):
1199 def push(self, remote, force=False, revs=None):
1200 # there are two ways to push to remote repo:
1200 # there are two ways to push to remote repo:
1201 #
1201 #
1202 # addchangegroup assumes local user can lock remote
1202 # addchangegroup assumes local user can lock remote
1203 # repo (local filesystem, old ssh servers).
1203 # repo (local filesystem, old ssh servers).
1204 #
1204 #
1205 # unbundle assumes local user cannot lock remote repo (new ssh
1205 # unbundle assumes local user cannot lock remote repo (new ssh
1206 # servers, http servers).
1206 # servers, http servers).
1207
1207
1208 if remote.capable('unbundle'):
1208 if remote.capable('unbundle'):
1209 return self.push_unbundle(remote, force, revs)
1209 return self.push_unbundle(remote, force, revs)
1210 return self.push_addchangegroup(remote, force, revs)
1210 return self.push_addchangegroup(remote, force, revs)
1211
1211
1212 def prepush(self, remote, force, revs):
1212 def prepush(self, remote, force, revs):
1213 base = {}
1213 base = {}
1214 remote_heads = remote.heads()
1214 remote_heads = remote.heads()
1215 inc = self.findincoming(remote, base, remote_heads, force=force)
1215 inc = self.findincoming(remote, base, remote_heads, force=force)
1216 if not force and inc:
1216 if not force and inc:
1217 self.ui.warn(_("abort: unsynced remote changes!\n"))
1217 self.ui.warn(_("abort: unsynced remote changes!\n"))
1218 self.ui.status(_("(did you forget to sync?"
1218 self.ui.status(_("(did you forget to sync?"
1219 " use push -f to force)\n"))
1219 " use push -f to force)\n"))
1220 return None, 1
1220 return None, 1
1221
1221
1222 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1222 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1223 if revs is not None:
1223 if revs is not None:
1224 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1224 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1225 else:
1225 else:
1226 bases, heads = update, self.changelog.heads()
1226 bases, heads = update, self.changelog.heads()
1227
1227
1228 if not bases:
1228 if not bases:
1229 self.ui.status(_("no changes found\n"))
1229 self.ui.status(_("no changes found\n"))
1230 return None, 1
1230 return None, 1
1231 elif not force:
1231 elif not force:
1232 # FIXME we don't properly detect creation of new heads
1232 # FIXME we don't properly detect creation of new heads
1233 # in the push -r case, assume the user knows what he's doing
1233 # in the push -r case, assume the user knows what he's doing
1234 if not revs and len(remote_heads) < len(heads) \
1234 if not revs and len(remote_heads) < len(heads) \
1235 and remote_heads != [nullid]:
1235 and remote_heads != [nullid]:
1236 self.ui.warn(_("abort: push creates new remote branches!\n"))
1236 self.ui.warn(_("abort: push creates new remote branches!\n"))
1237 self.ui.status(_("(did you forget to merge?"
1237 self.ui.status(_("(did you forget to merge?"
1238 " use push -f to force)\n"))
1238 " use push -f to force)\n"))
1239 return None, 1
1239 return None, 1
1240
1240
1241 if revs is None:
1241 if revs is None:
1242 cg = self.changegroup(update, 'push')
1242 cg = self.changegroup(update, 'push')
1243 else:
1243 else:
1244 cg = self.changegroupsubset(update, revs, 'push')
1244 cg = self.changegroupsubset(update, revs, 'push')
1245 return cg, remote_heads
1245 return cg, remote_heads
1246
1246
1247 def push_addchangegroup(self, remote, force, revs):
1247 def push_addchangegroup(self, remote, force, revs):
1248 lock = remote.lock()
1248 lock = remote.lock()
1249
1249
1250 ret = self.prepush(remote, force, revs)
1250 ret = self.prepush(remote, force, revs)
1251 if ret[0] is not None:
1251 if ret[0] is not None:
1252 cg, remote_heads = ret
1252 cg, remote_heads = ret
1253 return remote.addchangegroup(cg, 'push', self.url())
1253 return remote.addchangegroup(cg, 'push', self.url())
1254 return ret[1]
1254 return ret[1]
1255
1255
1256 def push_unbundle(self, remote, force, revs):
1256 def push_unbundle(self, remote, force, revs):
1257 # local repo finds heads on server, finds out what revs it
1257 # local repo finds heads on server, finds out what revs it
1258 # must push. once revs transferred, if server finds it has
1258 # must push. once revs transferred, if server finds it has
1259 # different heads (someone else won commit/push race), server
1259 # different heads (someone else won commit/push race), server
1260 # aborts.
1260 # aborts.
1261
1261
1262 ret = self.prepush(remote, force, revs)
1262 ret = self.prepush(remote, force, revs)
1263 if ret[0] is not None:
1263 if ret[0] is not None:
1264 cg, remote_heads = ret
1264 cg, remote_heads = ret
1265 if force: remote_heads = ['force']
1265 if force: remote_heads = ['force']
1266 return remote.unbundle(cg, remote_heads, 'push')
1266 return remote.unbundle(cg, remote_heads, 'push')
1267 return ret[1]
1267 return ret[1]
1268
1268
1269 def changegroupsubset(self, bases, heads, source):
1269 def changegroupsubset(self, bases, heads, source):
1270 """This function generates a changegroup consisting of all the nodes
1270 """This function generates a changegroup consisting of all the nodes
1271 that are descendents of any of the bases, and ancestors of any of
1271 that are descendents of any of the bases, and ancestors of any of
1272 the heads.
1272 the heads.
1273
1273
1274 It is fairly complex as determining which filenodes and which
1274 It is fairly complex as determining which filenodes and which
1275 manifest nodes need to be included for the changeset to be complete
1275 manifest nodes need to be included for the changeset to be complete
1276 is non-trivial.
1276 is non-trivial.
1277
1277
1278 Another wrinkle is doing the reverse, figuring out which changeset in
1278 Another wrinkle is doing the reverse, figuring out which changeset in
1279 the changegroup a particular filenode or manifestnode belongs to."""
1279 the changegroup a particular filenode or manifestnode belongs to."""
1280
1280
1281 self.hook('preoutgoing', throw=True, source=source)
1281 self.hook('preoutgoing', throw=True, source=source)
1282
1282
1283 # Set up some initial variables
1283 # Set up some initial variables
1284 # Make it easy to refer to self.changelog
1284 # Make it easy to refer to self.changelog
1285 cl = self.changelog
1285 cl = self.changelog
1286 # msng is short for missing - compute the list of changesets in this
1286 # msng is short for missing - compute the list of changesets in this
1287 # changegroup.
1287 # changegroup.
1288 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1288 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1289 # Some bases may turn out to be superfluous, and some heads may be
1289 # Some bases may turn out to be superfluous, and some heads may be
1290 # too. nodesbetween will return the minimal set of bases and heads
1290 # too. nodesbetween will return the minimal set of bases and heads
1291 # necessary to re-create the changegroup.
1291 # necessary to re-create the changegroup.
1292
1292
1293 # Known heads are the list of heads that it is assumed the recipient
1293 # Known heads are the list of heads that it is assumed the recipient
1294 # of this changegroup will know about.
1294 # of this changegroup will know about.
1295 knownheads = {}
1295 knownheads = {}
1296 # We assume that all parents of bases are known heads.
1296 # We assume that all parents of bases are known heads.
1297 for n in bases:
1297 for n in bases:
1298 for p in cl.parents(n):
1298 for p in cl.parents(n):
1299 if p != nullid:
1299 if p != nullid:
1300 knownheads[p] = 1
1300 knownheads[p] = 1
1301 knownheads = knownheads.keys()
1301 knownheads = knownheads.keys()
1302 if knownheads:
1302 if knownheads:
1303 # Now that we know what heads are known, we can compute which
1303 # Now that we know what heads are known, we can compute which
1304 # changesets are known. The recipient must know about all
1304 # changesets are known. The recipient must know about all
1305 # changesets required to reach the known heads from the null
1305 # changesets required to reach the known heads from the null
1306 # changeset.
1306 # changeset.
1307 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1307 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1308 junk = None
1308 junk = None
1309 # Transform the list into an ersatz set.
1309 # Transform the list into an ersatz set.
1310 has_cl_set = dict.fromkeys(has_cl_set)
1310 has_cl_set = dict.fromkeys(has_cl_set)
1311 else:
1311 else:
1312 # If there were no known heads, the recipient cannot be assumed to
1312 # If there were no known heads, the recipient cannot be assumed to
1313 # know about any changesets.
1313 # know about any changesets.
1314 has_cl_set = {}
1314 has_cl_set = {}
1315
1315
1316 # Make it easy to refer to self.manifest
1316 # Make it easy to refer to self.manifest
1317 mnfst = self.manifest
1317 mnfst = self.manifest
1318 # We don't know which manifests are missing yet
1318 # We don't know which manifests are missing yet
1319 msng_mnfst_set = {}
1319 msng_mnfst_set = {}
1320 # Nor do we know which filenodes are missing.
1320 # Nor do we know which filenodes are missing.
1321 msng_filenode_set = {}
1321 msng_filenode_set = {}
1322
1322
1323 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1323 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1324 junk = None
1324 junk = None
1325
1325
1326 # A changeset always belongs to itself, so the changenode lookup
1326 # A changeset always belongs to itself, so the changenode lookup
1327 # function for a changenode is identity.
1327 # function for a changenode is identity.
1328 def identity(x):
1328 def identity(x):
1329 return x
1329 return x
1330
1330
1331 # A function generating function. Sets up an environment for the
1331 # A function generating function. Sets up an environment for the
1332 # inner function.
1332 # inner function.
1333 def cmp_by_rev_func(revlog):
1333 def cmp_by_rev_func(revlog):
1334 # Compare two nodes by their revision number in the environment's
1334 # Compare two nodes by their revision number in the environment's
1335 # revision history. Since the revision number both represents the
1335 # revision history. Since the revision number both represents the
1336 # most efficient order to read the nodes in, and represents a
1336 # most efficient order to read the nodes in, and represents a
1337 # topological sorting of the nodes, this function is often useful.
1337 # topological sorting of the nodes, this function is often useful.
1338 def cmp_by_rev(a, b):
1338 def cmp_by_rev(a, b):
1339 return cmp(revlog.rev(a), revlog.rev(b))
1339 return cmp(revlog.rev(a), revlog.rev(b))
1340 return cmp_by_rev
1340 return cmp_by_rev
1341
1341
1342 # If we determine that a particular file or manifest node must be a
1342 # If we determine that a particular file or manifest node must be a
1343 # node that the recipient of the changegroup will already have, we can
1343 # node that the recipient of the changegroup will already have, we can
1344 # also assume the recipient will have all the parents. This function
1344 # also assume the recipient will have all the parents. This function
1345 # prunes them from the set of missing nodes.
1345 # prunes them from the set of missing nodes.
1346 def prune_parents(revlog, hasset, msngset):
1346 def prune_parents(revlog, hasset, msngset):
1347 haslst = hasset.keys()
1347 haslst = hasset.keys()
1348 haslst.sort(cmp_by_rev_func(revlog))
1348 haslst.sort(cmp_by_rev_func(revlog))
1349 for node in haslst:
1349 for node in haslst:
1350 parentlst = [p for p in revlog.parents(node) if p != nullid]
1350 parentlst = [p for p in revlog.parents(node) if p != nullid]
1351 while parentlst:
1351 while parentlst:
1352 n = parentlst.pop()
1352 n = parentlst.pop()
1353 if n not in hasset:
1353 if n not in hasset:
1354 hasset[n] = 1
1354 hasset[n] = 1
1355 p = [p for p in revlog.parents(n) if p != nullid]
1355 p = [p for p in revlog.parents(n) if p != nullid]
1356 parentlst.extend(p)
1356 parentlst.extend(p)
1357 for n in hasset:
1357 for n in hasset:
1358 msngset.pop(n, None)
1358 msngset.pop(n, None)
1359
1359
1360 # This is a function generating function used to set up an environment
1360 # This is a function generating function used to set up an environment
1361 # for the inner function to execute in.
1361 # for the inner function to execute in.
1362 def manifest_and_file_collector(changedfileset):
1362 def manifest_and_file_collector(changedfileset):
1363 # This is an information gathering function that gathers
1363 # This is an information gathering function that gathers
1364 # information from each changeset node that goes out as part of
1364 # information from each changeset node that goes out as part of
1365 # the changegroup. The information gathered is a list of which
1365 # the changegroup. The information gathered is a list of which
1366 # manifest nodes are potentially required (the recipient may
1366 # manifest nodes are potentially required (the recipient may
1367 # already have them) and total list of all files which were
1367 # already have them) and total list of all files which were
1368 # changed in any changeset in the changegroup.
1368 # changed in any changeset in the changegroup.
1369 #
1369 #
1370 # We also remember the first changenode we saw any manifest
1370 # We also remember the first changenode we saw any manifest
1371 # referenced by so we can later determine which changenode 'owns'
1371 # referenced by so we can later determine which changenode 'owns'
1372 # the manifest.
1372 # the manifest.
1373 def collect_manifests_and_files(clnode):
1373 def collect_manifests_and_files(clnode):
1374 c = cl.read(clnode)
1374 c = cl.read(clnode)
1375 for f in c[3]:
1375 for f in c[3]:
1376 # This is to make sure we only have one instance of each
1376 # This is to make sure we only have one instance of each
1377 # filename string for each filename.
1377 # filename string for each filename.
1378 changedfileset.setdefault(f, f)
1378 changedfileset.setdefault(f, f)
1379 msng_mnfst_set.setdefault(c[0], clnode)
1379 msng_mnfst_set.setdefault(c[0], clnode)
1380 return collect_manifests_and_files
1380 return collect_manifests_and_files
1381
1381
1382 # Figure out which manifest nodes (of the ones we think might be part
1382 # Figure out which manifest nodes (of the ones we think might be part
1383 # of the changegroup) the recipient must know about and remove them
1383 # of the changegroup) the recipient must know about and remove them
1384 # from the changegroup.
1384 # from the changegroup.
1385 def prune_manifests():
1385 def prune_manifests():
1386 has_mnfst_set = {}
1386 has_mnfst_set = {}
1387 for n in msng_mnfst_set:
1387 for n in msng_mnfst_set:
1388 # If a 'missing' manifest thinks it belongs to a changenode
1388 # If a 'missing' manifest thinks it belongs to a changenode
1389 # the recipient is assumed to have, obviously the recipient
1389 # the recipient is assumed to have, obviously the recipient
1390 # must have that manifest.
1390 # must have that manifest.
1391 linknode = cl.node(mnfst.linkrev(n))
1391 linknode = cl.node(mnfst.linkrev(n))
1392 if linknode in has_cl_set:
1392 if linknode in has_cl_set:
1393 has_mnfst_set[n] = 1
1393 has_mnfst_set[n] = 1
1394 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1394 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1395
1395
1396 # Use the information collected in collect_manifests_and_files to say
1396 # Use the information collected in collect_manifests_and_files to say
1397 # which changenode any manifestnode belongs to.
1397 # which changenode any manifestnode belongs to.
1398 def lookup_manifest_link(mnfstnode):
1398 def lookup_manifest_link(mnfstnode):
1399 return msng_mnfst_set[mnfstnode]
1399 return msng_mnfst_set[mnfstnode]
1400
1400
1401 # A function generating function that sets up the initial environment
1401 # A function generating function that sets up the initial environment
1402 # the inner function.
1402 # the inner function.
1403 def filenode_collector(changedfiles):
1403 def filenode_collector(changedfiles):
1404 next_rev = [0]
1404 next_rev = [0]
1405 # This gathers information from each manifestnode included in the
1405 # This gathers information from each manifestnode included in the
1406 # changegroup about which filenodes the manifest node references
1406 # changegroup about which filenodes the manifest node references
1407 # so we can include those in the changegroup too.
1407 # so we can include those in the changegroup too.
1408 #
1408 #
1409 # It also remembers which changenode each filenode belongs to. It
1409 # It also remembers which changenode each filenode belongs to. It
1410 # does this by assuming the a filenode belongs to the changenode
1410 # does this by assuming the a filenode belongs to the changenode
1411 # the first manifest that references it belongs to.
1411 # the first manifest that references it belongs to.
1412 def collect_msng_filenodes(mnfstnode):
1412 def collect_msng_filenodes(mnfstnode):
1413 r = mnfst.rev(mnfstnode)
1413 r = mnfst.rev(mnfstnode)
1414 if r == next_rev[0]:
1414 if r == next_rev[0]:
1415 # If the last rev we looked at was the one just previous,
1415 # If the last rev we looked at was the one just previous,
1416 # we only need to see a diff.
1416 # we only need to see a diff.
1417 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1417 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1418 # For each line in the delta
1418 # For each line in the delta
1419 for dline in delta.splitlines():
1419 for dline in delta.splitlines():
1420 # get the filename and filenode for that line
1420 # get the filename and filenode for that line
1421 f, fnode = dline.split('\0')
1421 f, fnode = dline.split('\0')
1422 fnode = bin(fnode[:40])
1422 fnode = bin(fnode[:40])
1423 f = changedfiles.get(f, None)
1423 f = changedfiles.get(f, None)
1424 # And if the file is in the list of files we care
1424 # And if the file is in the list of files we care
1425 # about.
1425 # about.
1426 if f is not None:
1426 if f is not None:
1427 # Get the changenode this manifest belongs to
1427 # Get the changenode this manifest belongs to
1428 clnode = msng_mnfst_set[mnfstnode]
1428 clnode = msng_mnfst_set[mnfstnode]
1429 # Create the set of filenodes for the file if
1429 # Create the set of filenodes for the file if
1430 # there isn't one already.
1430 # there isn't one already.
1431 ndset = msng_filenode_set.setdefault(f, {})
1431 ndset = msng_filenode_set.setdefault(f, {})
1432 # And set the filenode's changelog node to the
1432 # And set the filenode's changelog node to the
1433 # manifest's if it hasn't been set already.
1433 # manifest's if it hasn't been set already.
1434 ndset.setdefault(fnode, clnode)
1434 ndset.setdefault(fnode, clnode)
1435 else:
1435 else:
1436 # Otherwise we need a full manifest.
1436 # Otherwise we need a full manifest.
1437 m = mnfst.read(mnfstnode)
1437 m = mnfst.read(mnfstnode)
1438 # For every file in we care about.
1438 # For every file in we care about.
1439 for f in changedfiles:
1439 for f in changedfiles:
1440 fnode = m.get(f, None)
1440 fnode = m.get(f, None)
1441 # If it's in the manifest
1441 # If it's in the manifest
1442 if fnode is not None:
1442 if fnode is not None:
1443 # See comments above.
1443 # See comments above.
1444 clnode = msng_mnfst_set[mnfstnode]
1444 clnode = msng_mnfst_set[mnfstnode]
1445 ndset = msng_filenode_set.setdefault(f, {})
1445 ndset = msng_filenode_set.setdefault(f, {})
1446 ndset.setdefault(fnode, clnode)
1446 ndset.setdefault(fnode, clnode)
1447 # Remember the revision we hope to see next.
1447 # Remember the revision we hope to see next.
1448 next_rev[0] = r + 1
1448 next_rev[0] = r + 1
1449 return collect_msng_filenodes
1449 return collect_msng_filenodes
1450
1450
1451 # We have a list of filenodes we think we need for a file, lets remove
1451 # We have a list of filenodes we think we need for a file, lets remove
1452 # all those we now the recipient must have.
1452 # all those we now the recipient must have.
1453 def prune_filenodes(f, filerevlog):
1453 def prune_filenodes(f, filerevlog):
1454 msngset = msng_filenode_set[f]
1454 msngset = msng_filenode_set[f]
1455 hasset = {}
1455 hasset = {}
1456 # If a 'missing' filenode thinks it belongs to a changenode we
1456 # If a 'missing' filenode thinks it belongs to a changenode we
1457 # assume the recipient must have, then the recipient must have
1457 # assume the recipient must have, then the recipient must have
1458 # that filenode.
1458 # that filenode.
1459 for n in msngset:
1459 for n in msngset:
1460 clnode = cl.node(filerevlog.linkrev(n))
1460 clnode = cl.node(filerevlog.linkrev(n))
1461 if clnode in has_cl_set:
1461 if clnode in has_cl_set:
1462 hasset[n] = 1
1462 hasset[n] = 1
1463 prune_parents(filerevlog, hasset, msngset)
1463 prune_parents(filerevlog, hasset, msngset)
1464
1464
1465 # A function generator function that sets up the a context for the
1465 # A function generator function that sets up the a context for the
1466 # inner function.
1466 # inner function.
1467 def lookup_filenode_link_func(fname):
1467 def lookup_filenode_link_func(fname):
1468 msngset = msng_filenode_set[fname]
1468 msngset = msng_filenode_set[fname]
1469 # Lookup the changenode the filenode belongs to.
1469 # Lookup the changenode the filenode belongs to.
1470 def lookup_filenode_link(fnode):
1470 def lookup_filenode_link(fnode):
1471 return msngset[fnode]
1471 return msngset[fnode]
1472 return lookup_filenode_link
1472 return lookup_filenode_link
1473
1473
1474 # Now that we have all theses utility functions to help out and
1474 # Now that we have all theses utility functions to help out and
1475 # logically divide up the task, generate the group.
1475 # logically divide up the task, generate the group.
1476 def gengroup():
1476 def gengroup():
1477 # The set of changed files starts empty.
1477 # The set of changed files starts empty.
1478 changedfiles = {}
1478 changedfiles = {}
1479 # Create a changenode group generator that will call our functions
1479 # Create a changenode group generator that will call our functions
1480 # back to lookup the owning changenode and collect information.
1480 # back to lookup the owning changenode and collect information.
1481 group = cl.group(msng_cl_lst, identity,
1481 group = cl.group(msng_cl_lst, identity,
1482 manifest_and_file_collector(changedfiles))
1482 manifest_and_file_collector(changedfiles))
1483 for chnk in group:
1483 for chnk in group:
1484 yield chnk
1484 yield chnk
1485
1485
1486 # The list of manifests has been collected by the generator
1486 # The list of manifests has been collected by the generator
1487 # calling our functions back.
1487 # calling our functions back.
1488 prune_manifests()
1488 prune_manifests()
1489 msng_mnfst_lst = msng_mnfst_set.keys()
1489 msng_mnfst_lst = msng_mnfst_set.keys()
1490 # Sort the manifestnodes by revision number.
1490 # Sort the manifestnodes by revision number.
1491 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1491 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1492 # Create a generator for the manifestnodes that calls our lookup
1492 # Create a generator for the manifestnodes that calls our lookup
1493 # and data collection functions back.
1493 # and data collection functions back.
1494 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1494 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1495 filenode_collector(changedfiles))
1495 filenode_collector(changedfiles))
1496 for chnk in group:
1496 for chnk in group:
1497 yield chnk
1497 yield chnk
1498
1498
1499 # These are no longer needed, dereference and toss the memory for
1499 # These are no longer needed, dereference and toss the memory for
1500 # them.
1500 # them.
1501 msng_mnfst_lst = None
1501 msng_mnfst_lst = None
1502 msng_mnfst_set.clear()
1502 msng_mnfst_set.clear()
1503
1503
1504 changedfiles = changedfiles.keys()
1504 changedfiles = changedfiles.keys()
1505 changedfiles.sort()
1505 changedfiles.sort()
1506 # Go through all our files in order sorted by name.
1506 # Go through all our files in order sorted by name.
1507 for fname in changedfiles:
1507 for fname in changedfiles:
1508 filerevlog = self.file(fname)
1508 filerevlog = self.file(fname)
1509 # Toss out the filenodes that the recipient isn't really
1509 # Toss out the filenodes that the recipient isn't really
1510 # missing.
1510 # missing.
1511 if msng_filenode_set.has_key(fname):
1511 if msng_filenode_set.has_key(fname):
1512 prune_filenodes(fname, filerevlog)
1512 prune_filenodes(fname, filerevlog)
1513 msng_filenode_lst = msng_filenode_set[fname].keys()
1513 msng_filenode_lst = msng_filenode_set[fname].keys()
1514 else:
1514 else:
1515 msng_filenode_lst = []
1515 msng_filenode_lst = []
1516 # If any filenodes are left, generate the group for them,
1516 # If any filenodes are left, generate the group for them,
1517 # otherwise don't bother.
1517 # otherwise don't bother.
1518 if len(msng_filenode_lst) > 0:
1518 if len(msng_filenode_lst) > 0:
1519 yield changegroup.genchunk(fname)
1519 yield changegroup.genchunk(fname)
1520 # Sort the filenodes by their revision #
1520 # Sort the filenodes by their revision #
1521 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1521 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1522 # Create a group generator and only pass in a changenode
1522 # Create a group generator and only pass in a changenode
1523 # lookup function as we need to collect no information
1523 # lookup function as we need to collect no information
1524 # from filenodes.
1524 # from filenodes.
1525 group = filerevlog.group(msng_filenode_lst,
1525 group = filerevlog.group(msng_filenode_lst,
1526 lookup_filenode_link_func(fname))
1526 lookup_filenode_link_func(fname))
1527 for chnk in group:
1527 for chnk in group:
1528 yield chnk
1528 yield chnk
1529 if msng_filenode_set.has_key(fname):
1529 if msng_filenode_set.has_key(fname):
1530 # Don't need this anymore, toss it to free memory.
1530 # Don't need this anymore, toss it to free memory.
1531 del msng_filenode_set[fname]
1531 del msng_filenode_set[fname]
1532 # Signal that no more groups are left.
1532 # Signal that no more groups are left.
1533 yield changegroup.closechunk()
1533 yield changegroup.closechunk()
1534
1534
1535 if msng_cl_lst:
1535 if msng_cl_lst:
1536 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1536 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1537
1537
1538 return util.chunkbuffer(gengroup())
1538 return util.chunkbuffer(gengroup())
1539
1539
1540 def changegroup(self, basenodes, source):
1540 def changegroup(self, basenodes, source):
1541 """Generate a changegroup of all nodes that we have that a recipient
1541 """Generate a changegroup of all nodes that we have that a recipient
1542 doesn't.
1542 doesn't.
1543
1543
1544 This is much easier than the previous function as we can assume that
1544 This is much easier than the previous function as we can assume that
1545 the recipient has any changenode we aren't sending them."""
1545 the recipient has any changenode we aren't sending them."""
1546
1546
1547 self.hook('preoutgoing', throw=True, source=source)
1547 self.hook('preoutgoing', throw=True, source=source)
1548
1548
1549 cl = self.changelog
1549 cl = self.changelog
1550 nodes = cl.nodesbetween(basenodes, None)[0]
1550 nodes = cl.nodesbetween(basenodes, None)[0]
1551 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1551 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1552
1552
1553 def identity(x):
1553 def identity(x):
1554 return x
1554 return x
1555
1555
1556 def gennodelst(revlog):
1556 def gennodelst(revlog):
1557 for r in xrange(0, revlog.count()):
1557 for r in xrange(0, revlog.count()):
1558 n = revlog.node(r)
1558 n = revlog.node(r)
1559 if revlog.linkrev(n) in revset:
1559 if revlog.linkrev(n) in revset:
1560 yield n
1560 yield n
1561
1561
1562 def changed_file_collector(changedfileset):
1562 def changed_file_collector(changedfileset):
1563 def collect_changed_files(clnode):
1563 def collect_changed_files(clnode):
1564 c = cl.read(clnode)
1564 c = cl.read(clnode)
1565 for fname in c[3]:
1565 for fname in c[3]:
1566 changedfileset[fname] = 1
1566 changedfileset[fname] = 1
1567 return collect_changed_files
1567 return collect_changed_files
1568
1568
1569 def lookuprevlink_func(revlog):
1569 def lookuprevlink_func(revlog):
1570 def lookuprevlink(n):
1570 def lookuprevlink(n):
1571 return cl.node(revlog.linkrev(n))
1571 return cl.node(revlog.linkrev(n))
1572 return lookuprevlink
1572 return lookuprevlink
1573
1573
1574 def gengroup():
1574 def gengroup():
1575 # construct a list of all changed files
1575 # construct a list of all changed files
1576 changedfiles = {}
1576 changedfiles = {}
1577
1577
1578 for chnk in cl.group(nodes, identity,
1578 for chnk in cl.group(nodes, identity,
1579 changed_file_collector(changedfiles)):
1579 changed_file_collector(changedfiles)):
1580 yield chnk
1580 yield chnk
1581 changedfiles = changedfiles.keys()
1581 changedfiles = changedfiles.keys()
1582 changedfiles.sort()
1582 changedfiles.sort()
1583
1583
1584 mnfst = self.manifest
1584 mnfst = self.manifest
1585 nodeiter = gennodelst(mnfst)
1585 nodeiter = gennodelst(mnfst)
1586 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1586 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1587 yield chnk
1587 yield chnk
1588
1588
1589 for fname in changedfiles:
1589 for fname in changedfiles:
1590 filerevlog = self.file(fname)
1590 filerevlog = self.file(fname)
1591 nodeiter = gennodelst(filerevlog)
1591 nodeiter = gennodelst(filerevlog)
1592 nodeiter = list(nodeiter)
1592 nodeiter = list(nodeiter)
1593 if nodeiter:
1593 if nodeiter:
1594 yield changegroup.genchunk(fname)
1594 yield changegroup.genchunk(fname)
1595 lookup = lookuprevlink_func(filerevlog)
1595 lookup = lookuprevlink_func(filerevlog)
1596 for chnk in filerevlog.group(nodeiter, lookup):
1596 for chnk in filerevlog.group(nodeiter, lookup):
1597 yield chnk
1597 yield chnk
1598
1598
1599 yield changegroup.closechunk()
1599 yield changegroup.closechunk()
1600
1600
1601 if nodes:
1601 if nodes:
1602 self.hook('outgoing', node=hex(nodes[0]), source=source)
1602 self.hook('outgoing', node=hex(nodes[0]), source=source)
1603
1603
1604 return util.chunkbuffer(gengroup())
1604 return util.chunkbuffer(gengroup())
1605
1605
1606 def addchangegroup(self, source, srctype, url):
1606 def addchangegroup(self, source, srctype, url):
1607 """add changegroup to repo.
1607 """add changegroup to repo.
1608 returns number of heads modified or added + 1."""
1608 returns number of heads modified or added + 1."""
1609
1609
1610 def csmap(x):
1610 def csmap(x):
1611 self.ui.debug(_("add changeset %s\n") % short(x))
1611 self.ui.debug(_("add changeset %s\n") % short(x))
1612 return cl.count()
1612 return cl.count()
1613
1613
1614 def revmap(x):
1614 def revmap(x):
1615 return cl.rev(x)
1615 return cl.rev(x)
1616
1616
1617 if not source:
1617 if not source:
1618 return 0
1618 return 0
1619
1619
1620 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1620 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1621
1621
1622 changesets = files = revisions = 0
1622 changesets = files = revisions = 0
1623
1623
1624 tr = self.transaction()
1624 tr = self.transaction()
1625
1625
1626 # write changelog data to temp files so concurrent readers will not see
1626 # write changelog data to temp files so concurrent readers will not see
1627 # inconsistent view
1627 # inconsistent view
1628 cl = None
1628 cl = None
1629 try:
1629 try:
1630 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1630 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1631
1631
1632 oldheads = len(cl.heads())
1632 oldheads = len(cl.heads())
1633
1633
1634 # pull off the changeset group
1634 # pull off the changeset group
1635 self.ui.status(_("adding changesets\n"))
1635 self.ui.status(_("adding changesets\n"))
1636 cor = cl.count() - 1
1636 cor = cl.count() - 1
1637 chunkiter = changegroup.chunkiter(source)
1637 chunkiter = changegroup.chunkiter(source)
1638 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1638 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1639 raise util.Abort(_("received changelog group is empty"))
1639 raise util.Abort(_("received changelog group is empty"))
1640 cnr = cl.count() - 1
1640 cnr = cl.count() - 1
1641 changesets = cnr - cor
1641 changesets = cnr - cor
1642
1642
1643 # pull off the manifest group
1643 # pull off the manifest group
1644 self.ui.status(_("adding manifests\n"))
1644 self.ui.status(_("adding manifests\n"))
1645 chunkiter = changegroup.chunkiter(source)
1645 chunkiter = changegroup.chunkiter(source)
1646 # no need to check for empty manifest group here:
1646 # no need to check for empty manifest group here:
1647 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1647 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1648 # no new manifest will be created and the manifest group will
1648 # no new manifest will be created and the manifest group will
1649 # be empty during the pull
1649 # be empty during the pull
1650 self.manifest.addgroup(chunkiter, revmap, tr)
1650 self.manifest.addgroup(chunkiter, revmap, tr)
1651
1651
1652 # process the files
1652 # process the files
1653 self.ui.status(_("adding file changes\n"))
1653 self.ui.status(_("adding file changes\n"))
1654 while 1:
1654 while 1:
1655 f = changegroup.getchunk(source)
1655 f = changegroup.getchunk(source)
1656 if not f:
1656 if not f:
1657 break
1657 break
1658 self.ui.debug(_("adding %s revisions\n") % f)
1658 self.ui.debug(_("adding %s revisions\n") % f)
1659 fl = self.file(f)
1659 fl = self.file(f)
1660 o = fl.count()
1660 o = fl.count()
1661 chunkiter = changegroup.chunkiter(source)
1661 chunkiter = changegroup.chunkiter(source)
1662 if fl.addgroup(chunkiter, revmap, tr) is None:
1662 if fl.addgroup(chunkiter, revmap, tr) is None:
1663 raise util.Abort(_("received file revlog group is empty"))
1663 raise util.Abort(_("received file revlog group is empty"))
1664 revisions += fl.count() - o
1664 revisions += fl.count() - o
1665 files += 1
1665 files += 1
1666
1666
1667 cl.writedata()
1667 cl.writedata()
1668 finally:
1668 finally:
1669 if cl:
1669 if cl:
1670 cl.cleanup()
1670 cl.cleanup()
1671
1671
1672 # make changelog see real files again
1672 # make changelog see real files again
1673 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1673 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1674 self.changelog.checkinlinesize(tr)
1674 self.changelog.checkinlinesize(tr)
1675
1675
1676 newheads = len(self.changelog.heads())
1676 newheads = len(self.changelog.heads())
1677 heads = ""
1677 heads = ""
1678 if oldheads and newheads != oldheads:
1678 if oldheads and newheads != oldheads:
1679 heads = _(" (%+d heads)") % (newheads - oldheads)
1679 heads = _(" (%+d heads)") % (newheads - oldheads)
1680
1680
1681 self.ui.status(_("added %d changesets"
1681 self.ui.status(_("added %d changesets"
1682 " with %d changes to %d files%s\n")
1682 " with %d changes to %d files%s\n")
1683 % (changesets, revisions, files, heads))
1683 % (changesets, revisions, files, heads))
1684
1684
1685 if changesets > 0:
1685 if changesets > 0:
1686 self.hook('pretxnchangegroup', throw=True,
1686 self.hook('pretxnchangegroup', throw=True,
1687 node=hex(self.changelog.node(cor+1)), source=srctype,
1687 node=hex(self.changelog.node(cor+1)), source=srctype,
1688 url=url)
1688 url=url)
1689
1689
1690 tr.close()
1690 tr.close()
1691
1691
1692 if changesets > 0:
1692 if changesets > 0:
1693 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1693 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1694 source=srctype, url=url)
1694 source=srctype, url=url)
1695
1695
1696 for i in range(cor + 1, cnr + 1):
1696 for i in range(cor + 1, cnr + 1):
1697 self.hook("incoming", node=hex(self.changelog.node(i)),
1697 self.hook("incoming", node=hex(self.changelog.node(i)),
1698 source=srctype, url=url)
1698 source=srctype, url=url)
1699
1699
1700 return newheads - oldheads + 1
1700 return newheads - oldheads + 1
1701
1701
1702
1702
1703 def stream_in(self, remote):
1703 def stream_in(self, remote):
1704 fp = remote.stream_out()
1704 fp = remote.stream_out()
1705 resp = int(fp.readline())
1705 resp = int(fp.readline())
1706 if resp != 0:
1706 if resp != 0:
1707 raise util.Abort(_('operation forbidden by server'))
1707 raise util.Abort(_('operation forbidden by server'))
1708 self.ui.status(_('streaming all changes\n'))
1708 self.ui.status(_('streaming all changes\n'))
1709 total_files, total_bytes = map(int, fp.readline().split(' ', 1))
1709 total_files, total_bytes = map(int, fp.readline().split(' ', 1))
1710 self.ui.status(_('%d files to transfer, %s of data\n') %
1710 self.ui.status(_('%d files to transfer, %s of data\n') %
1711 (total_files, util.bytecount(total_bytes)))
1711 (total_files, util.bytecount(total_bytes)))
1712 start = time.time()
1712 start = time.time()
1713 for i in xrange(total_files):
1713 for i in xrange(total_files):
1714 name, size = fp.readline().split('\0', 1)
1714 name, size = fp.readline().split('\0', 1)
1715 size = int(size)
1715 size = int(size)
1716 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1716 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1717 ofp = self.opener(name, 'w')
1717 ofp = self.opener(name, 'w')
1718 for chunk in util.filechunkiter(fp, limit=size):
1718 for chunk in util.filechunkiter(fp, limit=size):
1719 ofp.write(chunk)
1719 ofp.write(chunk)
1720 ofp.close()
1720 ofp.close()
1721 elapsed = time.time() - start
1721 elapsed = time.time() - start
1722 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1722 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1723 (util.bytecount(total_bytes), elapsed,
1723 (util.bytecount(total_bytes), elapsed,
1724 util.bytecount(total_bytes / elapsed)))
1724 util.bytecount(total_bytes / elapsed)))
1725 self.reload()
1725 self.reload()
1726 return len(self.heads()) + 1
1726 return len(self.heads()) + 1
1727
1727
1728 def clone(self, remote, heads=[], stream=False):
1728 def clone(self, remote, heads=[], stream=False):
1729 '''clone remote repository.
1729 '''clone remote repository.
1730
1730
1731 keyword arguments:
1731 keyword arguments:
1732 heads: list of revs to clone (forces use of pull)
1732 heads: list of revs to clone (forces use of pull)
1733 stream: use streaming clone if possible'''
1733 stream: use streaming clone if possible'''
1734
1734
1735 # now, all clients that can request uncompressed clones can
1735 # now, all clients that can request uncompressed clones can
1736 # read repo formats supported by all servers that can serve
1736 # read repo formats supported by all servers that can serve
1737 # them.
1737 # them.
1738
1738
1739 # if revlog format changes, client will have to check version
1739 # if revlog format changes, client will have to check version
1740 # and format flags on "stream" capability, and use
1740 # and format flags on "stream" capability, and use
1741 # uncompressed only if compatible.
1741 # uncompressed only if compatible.
1742
1742
1743 if stream and not heads and remote.capable('stream'):
1743 if stream and not heads and remote.capable('stream'):
1744 return self.stream_in(remote)
1744 return self.stream_in(remote)
1745 return self.pull(remote, heads)
1745 return self.pull(remote, heads)
1746
1746
1747 # used to avoid circular references so destructors work
1747 # used to avoid circular references so destructors work
1748 def aftertrans(base):
1748 def aftertrans(base):
1749 p = base
1749 p = base
1750 def a():
1750 def a():
1751 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1751 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1752 util.rename(os.path.join(p, "journal.dirstate"),
1752 util.rename(os.path.join(p, "journal.dirstate"),
1753 os.path.join(p, "undo.dirstate"))
1753 os.path.join(p, "undo.dirstate"))
1754 return a
1754 return a
1755
1755
1756 def instance(ui, path, create):
1756 def instance(ui, path, create):
1757 return localrepository(ui, util.drop_scheme('file', path), create)
1757 return localrepository(ui, util.drop_scheme('file', path), create)
1758
1758
1759 def islocal(path):
1759 def islocal(path):
1760 return True
1760 return True
@@ -1,306 +1,304 b''
1 # ui.py - user interface bits for mercurial
1 # ui.py - user interface bits for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from i18n import gettext as _
8 from i18n import gettext as _
9 from demandload import *
9 from demandload import *
10 demandload(globals(), "errno getpass os re socket sys tempfile")
10 demandload(globals(), "errno getpass os re socket sys tempfile")
11 demandload(globals(), "ConfigParser mdiff templater traceback util")
11 demandload(globals(), "ConfigParser mdiff templater traceback util")
12
12
13 class ui(object):
13 class ui(object):
14 def __init__(self, verbose=False, debug=False, quiet=False,
14 def __init__(self, verbose=False, debug=False, quiet=False,
15 interactive=True, traceback=False, parentui=None):
15 interactive=True, traceback=False, parentui=None):
16 self.overlay = {}
16 self.overlay = {}
17 self.header = []
17 self.header = []
18 self.prev_header = []
18 self.prev_header = []
19 if parentui is None:
19 if parentui is None:
20 # this is the parent of all ui children
20 # this is the parent of all ui children
21 self.parentui = None
21 self.parentui = None
22 self.readhooks = []
22 self.readhooks = []
23 self.cdata = ConfigParser.SafeConfigParser()
23 self.cdata = ConfigParser.SafeConfigParser()
24 self.readconfig(util.rcpath())
24 self.readconfig(util.rcpath())
25
25
26 self.quiet = self.configbool("ui", "quiet")
26 self.quiet = self.configbool("ui", "quiet")
27 self.verbose = self.configbool("ui", "verbose")
27 self.verbose = self.configbool("ui", "verbose")
28 self.debugflag = self.configbool("ui", "debug")
28 self.debugflag = self.configbool("ui", "debug")
29 self.interactive = self.configbool("ui", "interactive", True)
29 self.interactive = self.configbool("ui", "interactive", True)
30 self.traceback = traceback
30 self.traceback = traceback
31
31
32 self.updateopts(verbose, debug, quiet, interactive)
32 self.updateopts(verbose, debug, quiet, interactive)
33 self.diffcache = None
34 self.revlogopts = self.configrevlog()
35 else:
33 else:
36 # parentui may point to an ui object which is already a child
34 # parentui may point to an ui object which is already a child
37 self.parentui = parentui.parentui or parentui
35 self.parentui = parentui.parentui or parentui
38 self.readhooks = self.parentui.readhooks[:]
36 self.readhooks = self.parentui.readhooks[:]
39 parent_cdata = self.parentui.cdata
37 parent_cdata = self.parentui.cdata
40 self.cdata = ConfigParser.SafeConfigParser(parent_cdata.defaults())
38 self.cdata = ConfigParser.SafeConfigParser(parent_cdata.defaults())
41 # make interpolation work
39 # make interpolation work
42 for section in parent_cdata.sections():
40 for section in parent_cdata.sections():
43 self.cdata.add_section(section)
41 self.cdata.add_section(section)
44 for name, value in parent_cdata.items(section, raw=True):
42 for name, value in parent_cdata.items(section, raw=True):
45 self.cdata.set(section, name, value)
43 self.cdata.set(section, name, value)
46
44
47 def __getattr__(self, key):
45 def __getattr__(self, key):
48 return getattr(self.parentui, key)
46 return getattr(self.parentui, key)
49
47
50 def updateopts(self, verbose=False, debug=False, quiet=False,
48 def updateopts(self, verbose=False, debug=False, quiet=False,
51 interactive=True, traceback=False, config=[]):
49 interactive=True, traceback=False, config=[]):
52 self.quiet = (self.quiet or quiet) and not verbose and not debug
50 self.quiet = (self.quiet or quiet) and not verbose and not debug
53 self.verbose = ((self.verbose or verbose) or debug) and not self.quiet
51 self.verbose = ((self.verbose or verbose) or debug) and not self.quiet
54 self.debugflag = (self.debugflag or debug)
52 self.debugflag = (self.debugflag or debug)
55 self.interactive = (self.interactive and interactive)
53 self.interactive = (self.interactive and interactive)
56 self.traceback = self.traceback or traceback
54 self.traceback = self.traceback or traceback
57 for cfg in config:
55 for cfg in config:
58 try:
56 try:
59 name, value = cfg.split('=', 1)
57 name, value = cfg.split('=', 1)
60 section, name = name.split('.', 1)
58 section, name = name.split('.', 1)
61 if not self.cdata.has_section(section):
59 if not self.cdata.has_section(section):
62 self.cdata.add_section(section)
60 self.cdata.add_section(section)
63 if not section or not name:
61 if not section or not name:
64 raise IndexError
62 raise IndexError
65 self.cdata.set(section, name, value)
63 self.cdata.set(section, name, value)
66 except (IndexError, ValueError):
64 except (IndexError, ValueError):
67 raise util.Abort(_('malformed --config option: %s') % cfg)
65 raise util.Abort(_('malformed --config option: %s') % cfg)
68
66
69 def readconfig(self, fn, root=None):
67 def readconfig(self, fn, root=None):
70 if isinstance(fn, basestring):
68 if isinstance(fn, basestring):
71 fn = [fn]
69 fn = [fn]
72 for f in fn:
70 for f in fn:
73 try:
71 try:
74 self.cdata.read(f)
72 self.cdata.read(f)
75 except ConfigParser.ParsingError, inst:
73 except ConfigParser.ParsingError, inst:
76 raise util.Abort(_("Failed to parse %s\n%s") % (f, inst))
74 raise util.Abort(_("Failed to parse %s\n%s") % (f, inst))
77 # translate paths relative to root (or home) into absolute paths
75 # translate paths relative to root (or home) into absolute paths
78 if root is None:
76 if root is None:
79 root = os.path.expanduser('~')
77 root = os.path.expanduser('~')
80 for name, path in self.configitems("paths"):
78 for name, path in self.configitems("paths"):
81 if path and "://" not in path and not os.path.isabs(path):
79 if path and "://" not in path and not os.path.isabs(path):
82 self.cdata.set("paths", name, os.path.join(root, path))
80 self.cdata.set("paths", name, os.path.join(root, path))
83 for hook in self.readhooks:
81 for hook in self.readhooks:
84 hook(self)
82 hook(self)
85
83
86 def addreadhook(self, hook):
84 def addreadhook(self, hook):
87 self.readhooks.append(hook)
85 self.readhooks.append(hook)
88
86
89 def setconfig(self, section, name, val):
87 def setconfig(self, section, name, val):
90 self.overlay[(section, name)] = val
88 self.overlay[(section, name)] = val
91
89
92 def config(self, section, name, default=None):
90 def config(self, section, name, default=None):
93 if self.overlay.has_key((section, name)):
91 if self.overlay.has_key((section, name)):
94 return self.overlay[(section, name)]
92 return self.overlay[(section, name)]
95 if self.cdata.has_option(section, name):
93 if self.cdata.has_option(section, name):
96 try:
94 try:
97 return self.cdata.get(section, name)
95 return self.cdata.get(section, name)
98 except ConfigParser.InterpolationError, inst:
96 except ConfigParser.InterpolationError, inst:
99 raise util.Abort(_("Error in configuration section [%s] "
97 raise util.Abort(_("Error in configuration section [%s] "
100 "parameter '%s':\n%s")
98 "parameter '%s':\n%s")
101 % (section, name, inst))
99 % (section, name, inst))
102 if self.parentui is None:
100 if self.parentui is None:
103 return default
101 return default
104 else:
102 else:
105 return self.parentui.config(section, name, default)
103 return self.parentui.config(section, name, default)
106
104
107 def configlist(self, section, name, default=None):
105 def configlist(self, section, name, default=None):
108 """Return a list of comma/space separated strings"""
106 """Return a list of comma/space separated strings"""
109 result = self.config(section, name)
107 result = self.config(section, name)
110 if result is None:
108 if result is None:
111 result = default or []
109 result = default or []
112 if isinstance(result, basestring):
110 if isinstance(result, basestring):
113 result = result.replace(",", " ").split()
111 result = result.replace(",", " ").split()
114 return result
112 return result
115
113
116 def configbool(self, section, name, default=False):
114 def configbool(self, section, name, default=False):
117 if self.overlay.has_key((section, name)):
115 if self.overlay.has_key((section, name)):
118 return self.overlay[(section, name)]
116 return self.overlay[(section, name)]
119 if self.cdata.has_option(section, name):
117 if self.cdata.has_option(section, name):
120 try:
118 try:
121 return self.cdata.getboolean(section, name)
119 return self.cdata.getboolean(section, name)
122 except ConfigParser.InterpolationError, inst:
120 except ConfigParser.InterpolationError, inst:
123 raise util.Abort(_("Error in configuration section [%s] "
121 raise util.Abort(_("Error in configuration section [%s] "
124 "parameter '%s':\n%s")
122 "parameter '%s':\n%s")
125 % (section, name, inst))
123 % (section, name, inst))
126 if self.parentui is None:
124 if self.parentui is None:
127 return default
125 return default
128 else:
126 else:
129 return self.parentui.configbool(section, name, default)
127 return self.parentui.configbool(section, name, default)
130
128
131 def has_config(self, section):
129 def has_config(self, section):
132 '''tell whether section exists in config.'''
130 '''tell whether section exists in config.'''
133 return self.cdata.has_section(section)
131 return self.cdata.has_section(section)
134
132
135 def configitems(self, section):
133 def configitems(self, section):
136 items = {}
134 items = {}
137 if self.parentui is not None:
135 if self.parentui is not None:
138 items = dict(self.parentui.configitems(section))
136 items = dict(self.parentui.configitems(section))
139 if self.cdata.has_section(section):
137 if self.cdata.has_section(section):
140 try:
138 try:
141 items.update(dict(self.cdata.items(section)))
139 items.update(dict(self.cdata.items(section)))
142 except ConfigParser.InterpolationError, inst:
140 except ConfigParser.InterpolationError, inst:
143 raise util.Abort(_("Error in configuration section [%s]:\n%s")
141 raise util.Abort(_("Error in configuration section [%s]:\n%s")
144 % (section, inst))
142 % (section, inst))
145 x = items.items()
143 x = items.items()
146 x.sort()
144 x.sort()
147 return x
145 return x
148
146
149 def walkconfig(self, seen=None):
147 def walkconfig(self, seen=None):
150 if seen is None:
148 if seen is None:
151 seen = {}
149 seen = {}
152 for (section, name), value in self.overlay.iteritems():
150 for (section, name), value in self.overlay.iteritems():
153 yield section, name, value
151 yield section, name, value
154 seen[section, name] = 1
152 seen[section, name] = 1
155 for section in self.cdata.sections():
153 for section in self.cdata.sections():
156 try:
154 try:
157 for name, value in self.cdata.items(section):
155 for name, value in self.cdata.items(section):
158 if (section, name) in seen: continue
156 if (section, name) in seen: continue
159 yield section, name, value.replace('\n', '\\n')
157 yield section, name, value.replace('\n', '\\n')
160 seen[section, name] = 1
158 seen[section, name] = 1
161 except ConfigParser.InterpolationError, inst:
159 except ConfigParser.InterpolationError, inst:
162 raise util.Abort(_("Error in configuration section [%s]:\n%s")
160 raise util.Abort(_("Error in configuration section [%s]:\n%s")
163 % (section, inst))
161 % (section, inst))
164 if self.parentui is not None:
162 if self.parentui is not None:
165 for parent in self.parentui.walkconfig(seen):
163 for parent in self.parentui.walkconfig(seen):
166 yield parent
164 yield parent
167
165
168 def extensions(self):
166 def extensions(self):
169 result = self.configitems("extensions")
167 result = self.configitems("extensions")
170 for i, (key, value) in enumerate(result):
168 for i, (key, value) in enumerate(result):
171 if value:
169 if value:
172 result[i] = (key, os.path.expanduser(value))
170 result[i] = (key, os.path.expanduser(value))
173 return result
171 return result
174
172
175 def hgignorefiles(self):
173 def hgignorefiles(self):
176 result = []
174 result = []
177 for key, value in self.configitems("ui"):
175 for key, value in self.configitems("ui"):
178 if key == 'ignore' or key.startswith('ignore.'):
176 if key == 'ignore' or key.startswith('ignore.'):
179 result.append(os.path.expanduser(value))
177 result.append(os.path.expanduser(value))
180 return result
178 return result
181
179
182 def configrevlog(self):
180 def configrevlog(self):
183 result = {}
181 result = {}
184 for key, value in self.configitems("revlog"):
182 for key, value in self.configitems("revlog"):
185 result[key.lower()] = value
183 result[key.lower()] = value
186 return result
184 return result
187
185
188 def username(self):
186 def username(self):
189 """Return default username to be used in commits.
187 """Return default username to be used in commits.
190
188
191 Searched in this order: $HGUSER, [ui] section of hgrcs, $EMAIL
189 Searched in this order: $HGUSER, [ui] section of hgrcs, $EMAIL
192 and stop searching if one of these is set.
190 and stop searching if one of these is set.
193 Abort if found username is an empty string to force specifying
191 Abort if found username is an empty string to force specifying
194 the commit user elsewhere, e.g. with line option or repo hgrc.
192 the commit user elsewhere, e.g. with line option or repo hgrc.
195 If not found, use ($LOGNAME or $USER or $LNAME or
193 If not found, use ($LOGNAME or $USER or $LNAME or
196 $USERNAME) +"@full.hostname".
194 $USERNAME) +"@full.hostname".
197 """
195 """
198 user = os.environ.get("HGUSER")
196 user = os.environ.get("HGUSER")
199 if user is None:
197 if user is None:
200 user = self.config("ui", "username")
198 user = self.config("ui", "username")
201 if user is None:
199 if user is None:
202 user = os.environ.get("EMAIL")
200 user = os.environ.get("EMAIL")
203 if user is None:
201 if user is None:
204 try:
202 try:
205 user = '%s@%s' % (util.getuser(), socket.getfqdn())
203 user = '%s@%s' % (util.getuser(), socket.getfqdn())
206 except KeyError:
204 except KeyError:
207 raise util.Abort(_("Please specify a username."))
205 raise util.Abort(_("Please specify a username."))
208 return user
206 return user
209
207
210 def shortuser(self, user):
208 def shortuser(self, user):
211 """Return a short representation of a user name or email address."""
209 """Return a short representation of a user name or email address."""
212 if not self.verbose: user = util.shortuser(user)
210 if not self.verbose: user = util.shortuser(user)
213 return user
211 return user
214
212
215 def expandpath(self, loc, default=None):
213 def expandpath(self, loc, default=None):
216 """Return repository location relative to cwd or from [paths]"""
214 """Return repository location relative to cwd or from [paths]"""
217 if "://" in loc or os.path.isdir(loc):
215 if "://" in loc or os.path.isdir(loc):
218 return loc
216 return loc
219
217
220 path = self.config("paths", loc)
218 path = self.config("paths", loc)
221 if not path and default is not None:
219 if not path and default is not None:
222 path = self.config("paths", default)
220 path = self.config("paths", default)
223 return path or loc
221 return path or loc
224
222
225 def write(self, *args):
223 def write(self, *args):
226 if self.header:
224 if self.header:
227 if self.header != self.prev_header:
225 if self.header != self.prev_header:
228 self.prev_header = self.header
226 self.prev_header = self.header
229 self.write(*self.header)
227 self.write(*self.header)
230 self.header = []
228 self.header = []
231 for a in args:
229 for a in args:
232 sys.stdout.write(str(a))
230 sys.stdout.write(str(a))
233
231
234 def write_header(self, *args):
232 def write_header(self, *args):
235 for a in args:
233 for a in args:
236 self.header.append(str(a))
234 self.header.append(str(a))
237
235
238 def write_err(self, *args):
236 def write_err(self, *args):
239 try:
237 try:
240 if not sys.stdout.closed: sys.stdout.flush()
238 if not sys.stdout.closed: sys.stdout.flush()
241 for a in args:
239 for a in args:
242 sys.stderr.write(str(a))
240 sys.stderr.write(str(a))
243 except IOError, inst:
241 except IOError, inst:
244 if inst.errno != errno.EPIPE:
242 if inst.errno != errno.EPIPE:
245 raise
243 raise
246
244
247 def flush(self):
245 def flush(self):
248 try: sys.stdout.flush()
246 try: sys.stdout.flush()
249 except: pass
247 except: pass
250 try: sys.stderr.flush()
248 try: sys.stderr.flush()
251 except: pass
249 except: pass
252
250
253 def readline(self):
251 def readline(self):
254 return sys.stdin.readline()[:-1]
252 return sys.stdin.readline()[:-1]
255 def prompt(self, msg, pat=None, default="y"):
253 def prompt(self, msg, pat=None, default="y"):
256 if not self.interactive: return default
254 if not self.interactive: return default
257 while 1:
255 while 1:
258 self.write(msg, " ")
256 self.write(msg, " ")
259 r = self.readline()
257 r = self.readline()
260 if not pat or re.match(pat, r):
258 if not pat or re.match(pat, r):
261 return r
259 return r
262 else:
260 else:
263 self.write(_("unrecognized response\n"))
261 self.write(_("unrecognized response\n"))
264 def getpass(self, prompt=None, default=None):
262 def getpass(self, prompt=None, default=None):
265 if not self.interactive: return default
263 if not self.interactive: return default
266 return getpass.getpass(prompt or _('password: '))
264 return getpass.getpass(prompt or _('password: '))
267 def status(self, *msg):
265 def status(self, *msg):
268 if not self.quiet: self.write(*msg)
266 if not self.quiet: self.write(*msg)
269 def warn(self, *msg):
267 def warn(self, *msg):
270 self.write_err(*msg)
268 self.write_err(*msg)
271 def note(self, *msg):
269 def note(self, *msg):
272 if self.verbose: self.write(*msg)
270 if self.verbose: self.write(*msg)
273 def debug(self, *msg):
271 def debug(self, *msg):
274 if self.debugflag: self.write(*msg)
272 if self.debugflag: self.write(*msg)
275 def edit(self, text, user):
273 def edit(self, text, user):
276 (fd, name) = tempfile.mkstemp(prefix="hg-editor-", suffix=".txt",
274 (fd, name) = tempfile.mkstemp(prefix="hg-editor-", suffix=".txt",
277 text=True)
275 text=True)
278 try:
276 try:
279 f = os.fdopen(fd, "w")
277 f = os.fdopen(fd, "w")
280 f.write(text)
278 f.write(text)
281 f.close()
279 f.close()
282
280
283 editor = (os.environ.get("HGEDITOR") or
281 editor = (os.environ.get("HGEDITOR") or
284 self.config("ui", "editor") or
282 self.config("ui", "editor") or
285 os.environ.get("EDITOR", "vi"))
283 os.environ.get("EDITOR", "vi"))
286
284
287 util.system("%s \"%s\"" % (editor, name),
285 util.system("%s \"%s\"" % (editor, name),
288 environ={'HGUSER': user},
286 environ={'HGUSER': user},
289 onerr=util.Abort, errprefix=_("edit failed"))
287 onerr=util.Abort, errprefix=_("edit failed"))
290
288
291 f = open(name)
289 f = open(name)
292 t = f.read()
290 t = f.read()
293 f.close()
291 f.close()
294 t = re.sub("(?m)^HG:.*\n", "", t)
292 t = re.sub("(?m)^HG:.*\n", "", t)
295 finally:
293 finally:
296 os.unlink(name)
294 os.unlink(name)
297
295
298 return t
296 return t
299
297
300 def print_exc(self):
298 def print_exc(self):
301 '''print exception traceback if traceback printing enabled.
299 '''print exception traceback if traceback printing enabled.
302 only to call in exception handler. returns true if traceback
300 only to call in exception handler. returns true if traceback
303 printed.'''
301 printed.'''
304 if self.traceback:
302 if self.traceback:
305 traceback.print_exc()
303 traceback.print_exc()
306 return self.traceback
304 return self.traceback
General Comments 0
You need to be logged in to leave comments. Login now