##// END OF EJS Templates
merge with main
Thomas Arendsen Hein -
r4582:7de7a80e merge default
parent child Browse files
Show More
@@ -1,76 +1,76 b''
1 # extensions.py - extension handling for mercurial
1 # extensions.py - extension handling for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 import imp, os
8 import imp, os
9 import commands, hg, util, sys
9 import commands, hg, util, sys
10 from i18n import _
10 from i18n import _
11
11
12 _extensions = {}
12 _extensions = {}
13
13
14 def find(name):
14 def find(name):
15 '''return module with given extension name'''
15 '''return module with given extension name'''
16 try:
16 try:
17 return _extensions[name]
17 return _extensions[name]
18 except KeyError:
18 except KeyError:
19 for k, v in _extensions.iteritems():
19 for k, v in _extensions.iteritems():
20 if k.endswith('.' + name) or k.endswith('/' + name) or v == name:
20 if k.endswith('.' + name) or k.endswith('/' + name):
21 return sys.modules[v]
21 return v
22 raise KeyError(name)
22 raise KeyError(name)
23
23
24 def load(ui, name, path):
24 def load(ui, name, path):
25 if name in _extensions:
25 if name in _extensions:
26 return
26 return
27 if path:
27 if path:
28 # the module will be loaded in sys.modules
28 # the module will be loaded in sys.modules
29 # choose an unique name so that it doesn't
29 # choose an unique name so that it doesn't
30 # conflicts with other modules
30 # conflicts with other modules
31 module_name = "hgext_%s" % name.replace('.', '_')
31 module_name = "hgext_%s" % name.replace('.', '_')
32 if os.path.isdir(path):
32 if os.path.isdir(path):
33 # module/__init__.py style
33 # module/__init__.py style
34 d, f = os.path.split(path)
34 d, f = os.path.split(path)
35 fd, fpath, desc = imp.find_module(f, [d])
35 fd, fpath, desc = imp.find_module(f, [d])
36 mod = imp.load_module(module_name, fd, fpath, desc)
36 mod = imp.load_module(module_name, fd, fpath, desc)
37 else:
37 else:
38 mod = imp.load_source(module_name, path)
38 mod = imp.load_source(module_name, path)
39 else:
39 else:
40 def importh(name):
40 def importh(name):
41 mod = __import__(name)
41 mod = __import__(name)
42 components = name.split('.')
42 components = name.split('.')
43 for comp in components[1:]:
43 for comp in components[1:]:
44 mod = getattr(mod, comp)
44 mod = getattr(mod, comp)
45 return mod
45 return mod
46 try:
46 try:
47 mod = importh("hgext.%s" % name)
47 mod = importh("hgext.%s" % name)
48 except ImportError:
48 except ImportError:
49 mod = importh(name)
49 mod = importh(name)
50 _extensions[name] = mod
50 _extensions[name] = mod
51
51
52 uisetup = getattr(mod, 'uisetup', None)
52 uisetup = getattr(mod, 'uisetup', None)
53 if uisetup:
53 if uisetup:
54 uisetup(ui)
54 uisetup(ui)
55 reposetup = getattr(mod, 'reposetup', None)
55 reposetup = getattr(mod, 'reposetup', None)
56 if reposetup:
56 if reposetup:
57 hg.repo_setup_hooks.append(reposetup)
57 hg.repo_setup_hooks.append(reposetup)
58 cmdtable = getattr(mod, 'cmdtable', {})
58 cmdtable = getattr(mod, 'cmdtable', {})
59 overrides = [cmd for cmd in cmdtable if cmd in commands.table]
59 overrides = [cmd for cmd in cmdtable if cmd in commands.table]
60 if overrides:
60 if overrides:
61 ui.warn(_("extension '%s' overrides commands: %s\n")
61 ui.warn(_("extension '%s' overrides commands: %s\n")
62 % (name, " ".join(overrides)))
62 % (name, " ".join(overrides)))
63 commands.table.update(cmdtable)
63 commands.table.update(cmdtable)
64
64
65 def loadall(ui):
65 def loadall(ui):
66 for name, path in ui.extensions():
66 for name, path in ui.extensions():
67 try:
67 try:
68 load(ui, name, path)
68 load(ui, name, path)
69 except (util.SignalInterrupt, KeyboardInterrupt):
69 except (util.SignalInterrupt, KeyboardInterrupt):
70 raise
70 raise
71 except Exception, inst:
71 except Exception, inst:
72 ui.warn(_("*** failed to import extension %s: %s\n") %
72 ui.warn(_("*** failed to import extension %s: %s\n") %
73 (name, inst))
73 (name, inst))
74 if ui.print_exc():
74 if ui.print_exc():
75 return 1
75 return 1
76
76
@@ -1,1960 +1,1969 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
13 import os, revlog, time, util
13 import os, revlog, time, util
14
14
15 class localrepository(repo.repository):
15 class localrepository(repo.repository):
16 capabilities = ('lookup', 'changegroupsubset')
16 capabilities = ('lookup', 'changegroupsubset')
17 supported = ('revlogv1', 'store')
17 supported = ('revlogv1', 'store')
18
18
19 def __del__(self):
19 def __del__(self):
20 self.transhandle = None
20 self.transhandle = None
21 def __init__(self, parentui, path=None, create=0):
21 def __init__(self, parentui, path=None, create=0):
22 repo.repository.__init__(self)
22 repo.repository.__init__(self)
23 self.path = path
23 self.path = path
24 self.root = os.path.realpath(path)
24 self.root = os.path.realpath(path)
25 self.path = os.path.join(self.root, ".hg")
25 self.path = os.path.join(self.root, ".hg")
26 self.origroot = path
26 self.origroot = path
27 self.opener = util.opener(self.path)
27 self.opener = util.opener(self.path)
28 self.wopener = util.opener(self.root)
28 self.wopener = util.opener(self.root)
29
29
30 if not os.path.isdir(self.path):
30 if not os.path.isdir(self.path):
31 if create:
31 if create:
32 if not os.path.exists(path):
32 if not os.path.exists(path):
33 os.mkdir(path)
33 os.mkdir(path)
34 os.mkdir(self.path)
34 os.mkdir(self.path)
35 requirements = ["revlogv1"]
35 requirements = ["revlogv1"]
36 if parentui.configbool('format', 'usestore', True):
36 if parentui.configbool('format', 'usestore', True):
37 os.mkdir(os.path.join(self.path, "store"))
37 os.mkdir(os.path.join(self.path, "store"))
38 requirements.append("store")
38 requirements.append("store")
39 # create an invalid changelog
39 # create an invalid changelog
40 self.opener("00changelog.i", "a").write(
40 self.opener("00changelog.i", "a").write(
41 '\0\0\0\2' # represents revlogv2
41 '\0\0\0\2' # represents revlogv2
42 ' dummy changelog to prevent using the old repo layout'
42 ' dummy changelog to prevent using the old repo layout'
43 )
43 )
44 reqfile = self.opener("requires", "w")
44 reqfile = self.opener("requires", "w")
45 for r in requirements:
45 for r in requirements:
46 reqfile.write("%s\n" % r)
46 reqfile.write("%s\n" % r)
47 reqfile.close()
47 reqfile.close()
48 else:
48 else:
49 raise repo.RepoError(_("repository %s not found") % path)
49 raise repo.RepoError(_("repository %s not found") % path)
50 elif create:
50 elif create:
51 raise repo.RepoError(_("repository %s already exists") % path)
51 raise repo.RepoError(_("repository %s already exists") % path)
52 else:
52 else:
53 # find requirements
53 # find requirements
54 try:
54 try:
55 requirements = self.opener("requires").read().splitlines()
55 requirements = self.opener("requires").read().splitlines()
56 except IOError, inst:
56 except IOError, inst:
57 if inst.errno != errno.ENOENT:
57 if inst.errno != errno.ENOENT:
58 raise
58 raise
59 requirements = []
59 requirements = []
60 # check them
60 # check them
61 for r in requirements:
61 for r in requirements:
62 if r not in self.supported:
62 if r not in self.supported:
63 raise repo.RepoError(_("requirement '%s' not supported") % r)
63 raise repo.RepoError(_("requirement '%s' not supported") % r)
64
64
65 # setup store
65 # setup store
66 if "store" in requirements:
66 if "store" in requirements:
67 self.encodefn = util.encodefilename
67 self.encodefn = util.encodefilename
68 self.decodefn = util.decodefilename
68 self.decodefn = util.decodefilename
69 self.spath = os.path.join(self.path, "store")
69 self.spath = os.path.join(self.path, "store")
70 else:
70 else:
71 self.encodefn = lambda x: x
71 self.encodefn = lambda x: x
72 self.decodefn = lambda x: x
72 self.decodefn = lambda x: x
73 self.spath = self.path
73 self.spath = self.path
74 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
74 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
75
75
76 self.ui = ui.ui(parentui=parentui)
76 self.ui = ui.ui(parentui=parentui)
77 try:
77 try:
78 self.ui.readconfig(self.join("hgrc"), self.root)
78 self.ui.readconfig(self.join("hgrc"), self.root)
79 except IOError:
79 except IOError:
80 pass
80 pass
81
81
82 self.changelog = changelog.changelog(self.sopener)
83 self.sopener.defversion = self.changelog.version
84 self.manifest = manifest.manifest(self.sopener)
85
86 fallback = self.ui.config('ui', 'fallbackencoding')
82 fallback = self.ui.config('ui', 'fallbackencoding')
87 if fallback:
83 if fallback:
88 util._fallbackencoding = fallback
84 util._fallbackencoding = fallback
89
85
90 self.tagscache = None
86 self.tagscache = None
91 self.branchcache = None
87 self.branchcache = None
92 self.nodetagscache = None
88 self.nodetagscache = None
93 self.filterpats = {}
89 self.filterpats = {}
94 self.transhandle = None
90 self.transhandle = None
95
91
96 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
92 def __getattr__(self, name):
93 if name == 'changelog':
94 self.changelog = changelog.changelog(self.sopener)
95 self.sopener.defversion = self.changelog.version
96 return self.changelog
97 if name == 'manifest':
98 self.changelog
99 self.manifest = manifest.manifest(self.sopener)
100 return self.manifest
101 if name == 'dirstate':
102 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
103 return self.dirstate
104 else:
105 raise AttributeError, name
97
106
98 def url(self):
107 def url(self):
99 return 'file:' + self.root
108 return 'file:' + self.root
100
109
101 def hook(self, name, throw=False, **args):
110 def hook(self, name, throw=False, **args):
102 def callhook(hname, funcname):
111 def callhook(hname, funcname):
103 '''call python hook. hook is callable object, looked up as
112 '''call python hook. hook is callable object, looked up as
104 name in python module. if callable returns "true", hook
113 name in python module. if callable returns "true", hook
105 fails, else passes. if hook raises exception, treated as
114 fails, else passes. if hook raises exception, treated as
106 hook failure. exception propagates if throw is "true".
115 hook failure. exception propagates if throw is "true".
107
116
108 reason for "true" meaning "hook failed" is so that
117 reason for "true" meaning "hook failed" is so that
109 unmodified commands (e.g. mercurial.commands.update) can
118 unmodified commands (e.g. mercurial.commands.update) can
110 be run as hooks without wrappers to convert return values.'''
119 be run as hooks without wrappers to convert return values.'''
111
120
112 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
121 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
113 obj = funcname
122 obj = funcname
114 if not callable(obj):
123 if not callable(obj):
115 d = funcname.rfind('.')
124 d = funcname.rfind('.')
116 if d == -1:
125 if d == -1:
117 raise util.Abort(_('%s hook is invalid ("%s" not in '
126 raise util.Abort(_('%s hook is invalid ("%s" not in '
118 'a module)') % (hname, funcname))
127 'a module)') % (hname, funcname))
119 modname = funcname[:d]
128 modname = funcname[:d]
120 try:
129 try:
121 obj = __import__(modname)
130 obj = __import__(modname)
122 except ImportError:
131 except ImportError:
123 try:
132 try:
124 # extensions are loaded with hgext_ prefix
133 # extensions are loaded with hgext_ prefix
125 obj = __import__("hgext_%s" % modname)
134 obj = __import__("hgext_%s" % modname)
126 except ImportError:
135 except ImportError:
127 raise util.Abort(_('%s hook is invalid '
136 raise util.Abort(_('%s hook is invalid '
128 '(import of "%s" failed)') %
137 '(import of "%s" failed)') %
129 (hname, modname))
138 (hname, modname))
130 try:
139 try:
131 for p in funcname.split('.')[1:]:
140 for p in funcname.split('.')[1:]:
132 obj = getattr(obj, p)
141 obj = getattr(obj, p)
133 except AttributeError, err:
142 except AttributeError, err:
134 raise util.Abort(_('%s hook is invalid '
143 raise util.Abort(_('%s hook is invalid '
135 '("%s" is not defined)') %
144 '("%s" is not defined)') %
136 (hname, funcname))
145 (hname, funcname))
137 if not callable(obj):
146 if not callable(obj):
138 raise util.Abort(_('%s hook is invalid '
147 raise util.Abort(_('%s hook is invalid '
139 '("%s" is not callable)') %
148 '("%s" is not callable)') %
140 (hname, funcname))
149 (hname, funcname))
141 try:
150 try:
142 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
151 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
143 except (KeyboardInterrupt, util.SignalInterrupt):
152 except (KeyboardInterrupt, util.SignalInterrupt):
144 raise
153 raise
145 except Exception, exc:
154 except Exception, exc:
146 if isinstance(exc, util.Abort):
155 if isinstance(exc, util.Abort):
147 self.ui.warn(_('error: %s hook failed: %s\n') %
156 self.ui.warn(_('error: %s hook failed: %s\n') %
148 (hname, exc.args[0]))
157 (hname, exc.args[0]))
149 else:
158 else:
150 self.ui.warn(_('error: %s hook raised an exception: '
159 self.ui.warn(_('error: %s hook raised an exception: '
151 '%s\n') % (hname, exc))
160 '%s\n') % (hname, exc))
152 if throw:
161 if throw:
153 raise
162 raise
154 self.ui.print_exc()
163 self.ui.print_exc()
155 return True
164 return True
156 if r:
165 if r:
157 if throw:
166 if throw:
158 raise util.Abort(_('%s hook failed') % hname)
167 raise util.Abort(_('%s hook failed') % hname)
159 self.ui.warn(_('warning: %s hook failed\n') % hname)
168 self.ui.warn(_('warning: %s hook failed\n') % hname)
160 return r
169 return r
161
170
162 def runhook(name, cmd):
171 def runhook(name, cmd):
163 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
172 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
164 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
173 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
165 r = util.system(cmd, environ=env, cwd=self.root)
174 r = util.system(cmd, environ=env, cwd=self.root)
166 if r:
175 if r:
167 desc, r = util.explain_exit(r)
176 desc, r = util.explain_exit(r)
168 if throw:
177 if throw:
169 raise util.Abort(_('%s hook %s') % (name, desc))
178 raise util.Abort(_('%s hook %s') % (name, desc))
170 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
179 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
171 return r
180 return r
172
181
173 r = False
182 r = False
174 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
183 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
175 if hname.split(".", 1)[0] == name and cmd]
184 if hname.split(".", 1)[0] == name and cmd]
176 hooks.sort()
185 hooks.sort()
177 for hname, cmd in hooks:
186 for hname, cmd in hooks:
178 if callable(cmd):
187 if callable(cmd):
179 r = callhook(hname, cmd) or r
188 r = callhook(hname, cmd) or r
180 elif cmd.startswith('python:'):
189 elif cmd.startswith('python:'):
181 r = callhook(hname, cmd[7:].strip()) or r
190 r = callhook(hname, cmd[7:].strip()) or r
182 else:
191 else:
183 r = runhook(hname, cmd) or r
192 r = runhook(hname, cmd) or r
184 return r
193 return r
185
194
186 tag_disallowed = ':\r\n'
195 tag_disallowed = ':\r\n'
187
196
188 def _tag(self, name, node, message, local, user, date, parent=None):
197 def _tag(self, name, node, message, local, user, date, parent=None):
189 use_dirstate = parent is None
198 use_dirstate = parent is None
190
199
191 for c in self.tag_disallowed:
200 for c in self.tag_disallowed:
192 if c in name:
201 if c in name:
193 raise util.Abort(_('%r cannot be used in a tag name') % c)
202 raise util.Abort(_('%r cannot be used in a tag name') % c)
194
203
195 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
204 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
196
205
197 if local:
206 if local:
198 # local tags are stored in the current charset
207 # local tags are stored in the current charset
199 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
208 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
200 self.hook('tag', node=hex(node), tag=name, local=local)
209 self.hook('tag', node=hex(node), tag=name, local=local)
201 return
210 return
202
211
203 # committed tags are stored in UTF-8
212 # committed tags are stored in UTF-8
204 line = '%s %s\n' % (hex(node), util.fromlocal(name))
213 line = '%s %s\n' % (hex(node), util.fromlocal(name))
205 if use_dirstate:
214 if use_dirstate:
206 self.wfile('.hgtags', 'ab').write(line)
215 self.wfile('.hgtags', 'ab').write(line)
207 else:
216 else:
208 ntags = self.filectx('.hgtags', parent).data()
217 ntags = self.filectx('.hgtags', parent).data()
209 self.wfile('.hgtags', 'ab').write(ntags + line)
218 self.wfile('.hgtags', 'ab').write(ntags + line)
210 if use_dirstate and self.dirstate.state('.hgtags') == '?':
219 if use_dirstate and self.dirstate.state('.hgtags') == '?':
211 self.add(['.hgtags'])
220 self.add(['.hgtags'])
212
221
213 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent)
222 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent)
214
223
215 self.hook('tag', node=hex(node), tag=name, local=local)
224 self.hook('tag', node=hex(node), tag=name, local=local)
216
225
217 return tagnode
226 return tagnode
218
227
219 def tag(self, name, node, message, local, user, date):
228 def tag(self, name, node, message, local, user, date):
220 '''tag a revision with a symbolic name.
229 '''tag a revision with a symbolic name.
221
230
222 if local is True, the tag is stored in a per-repository file.
231 if local is True, the tag is stored in a per-repository file.
223 otherwise, it is stored in the .hgtags file, and a new
232 otherwise, it is stored in the .hgtags file, and a new
224 changeset is committed with the change.
233 changeset is committed with the change.
225
234
226 keyword arguments:
235 keyword arguments:
227
236
228 local: whether to store tag in non-version-controlled file
237 local: whether to store tag in non-version-controlled file
229 (default False)
238 (default False)
230
239
231 message: commit message to use if committing
240 message: commit message to use if committing
232
241
233 user: name of user to use if committing
242 user: name of user to use if committing
234
243
235 date: date tuple to use if committing'''
244 date: date tuple to use if committing'''
236
245
237 for x in self.status()[:5]:
246 for x in self.status()[:5]:
238 if '.hgtags' in x:
247 if '.hgtags' in x:
239 raise util.Abort(_('working copy of .hgtags is changed '
248 raise util.Abort(_('working copy of .hgtags is changed '
240 '(please commit .hgtags manually)'))
249 '(please commit .hgtags manually)'))
241
250
242
251
243 self._tag(name, node, message, local, user, date)
252 self._tag(name, node, message, local, user, date)
244
253
245 def tags(self):
254 def tags(self):
246 '''return a mapping of tag to node'''
255 '''return a mapping of tag to node'''
247 if self.tagscache:
256 if self.tagscache:
248 return self.tagscache
257 return self.tagscache
249
258
250 globaltags = {}
259 globaltags = {}
251
260
252 def readtags(lines, fn):
261 def readtags(lines, fn):
253 filetags = {}
262 filetags = {}
254 count = 0
263 count = 0
255
264
256 def warn(msg):
265 def warn(msg):
257 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
266 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
258
267
259 for l in lines:
268 for l in lines:
260 count += 1
269 count += 1
261 if not l:
270 if not l:
262 continue
271 continue
263 s = l.split(" ", 1)
272 s = l.split(" ", 1)
264 if len(s) != 2:
273 if len(s) != 2:
265 warn(_("cannot parse entry"))
274 warn(_("cannot parse entry"))
266 continue
275 continue
267 node, key = s
276 node, key = s
268 key = util.tolocal(key.strip()) # stored in UTF-8
277 key = util.tolocal(key.strip()) # stored in UTF-8
269 try:
278 try:
270 bin_n = bin(node)
279 bin_n = bin(node)
271 except TypeError:
280 except TypeError:
272 warn(_("node '%s' is not well formed") % node)
281 warn(_("node '%s' is not well formed") % node)
273 continue
282 continue
274 if bin_n not in self.changelog.nodemap:
283 if bin_n not in self.changelog.nodemap:
275 warn(_("tag '%s' refers to unknown node") % key)
284 warn(_("tag '%s' refers to unknown node") % key)
276 continue
285 continue
277
286
278 h = []
287 h = []
279 if key in filetags:
288 if key in filetags:
280 n, h = filetags[key]
289 n, h = filetags[key]
281 h.append(n)
290 h.append(n)
282 filetags[key] = (bin_n, h)
291 filetags[key] = (bin_n, h)
283
292
284 for k,nh in filetags.items():
293 for k,nh in filetags.items():
285 if k not in globaltags:
294 if k not in globaltags:
286 globaltags[k] = nh
295 globaltags[k] = nh
287 continue
296 continue
288 # we prefer the global tag if:
297 # we prefer the global tag if:
289 # it supercedes us OR
298 # it supercedes us OR
290 # mutual supercedes and it has a higher rank
299 # mutual supercedes and it has a higher rank
291 # otherwise we win because we're tip-most
300 # otherwise we win because we're tip-most
292 an, ah = nh
301 an, ah = nh
293 bn, bh = globaltags[k]
302 bn, bh = globaltags[k]
294 if bn != an and an in bh and \
303 if bn != an and an in bh and \
295 (bn not in ah or len(bh) > len(ah)):
304 (bn not in ah or len(bh) > len(ah)):
296 an = bn
305 an = bn
297 ah.extend([n for n in bh if n not in ah])
306 ah.extend([n for n in bh if n not in ah])
298 globaltags[k] = an, ah
307 globaltags[k] = an, ah
299
308
300 # read the tags file from each head, ending with the tip
309 # read the tags file from each head, ending with the tip
301 f = None
310 f = None
302 for rev, node, fnode in self._hgtagsnodes():
311 for rev, node, fnode in self._hgtagsnodes():
303 f = (f and f.filectx(fnode) or
312 f = (f and f.filectx(fnode) or
304 self.filectx('.hgtags', fileid=fnode))
313 self.filectx('.hgtags', fileid=fnode))
305 readtags(f.data().splitlines(), f)
314 readtags(f.data().splitlines(), f)
306
315
307 try:
316 try:
308 data = util.fromlocal(self.opener("localtags").read())
317 data = util.fromlocal(self.opener("localtags").read())
309 # localtags are stored in the local character set
318 # localtags are stored in the local character set
310 # while the internal tag table is stored in UTF-8
319 # while the internal tag table is stored in UTF-8
311 readtags(data.splitlines(), "localtags")
320 readtags(data.splitlines(), "localtags")
312 except IOError:
321 except IOError:
313 pass
322 pass
314
323
315 self.tagscache = {}
324 self.tagscache = {}
316 for k,nh in globaltags.items():
325 for k,nh in globaltags.items():
317 n = nh[0]
326 n = nh[0]
318 if n != nullid:
327 if n != nullid:
319 self.tagscache[k] = n
328 self.tagscache[k] = n
320 self.tagscache['tip'] = self.changelog.tip()
329 self.tagscache['tip'] = self.changelog.tip()
321
330
322 return self.tagscache
331 return self.tagscache
323
332
324 def _hgtagsnodes(self):
333 def _hgtagsnodes(self):
325 heads = self.heads()
334 heads = self.heads()
326 heads.reverse()
335 heads.reverse()
327 last = {}
336 last = {}
328 ret = []
337 ret = []
329 for node in heads:
338 for node in heads:
330 c = self.changectx(node)
339 c = self.changectx(node)
331 rev = c.rev()
340 rev = c.rev()
332 try:
341 try:
333 fnode = c.filenode('.hgtags')
342 fnode = c.filenode('.hgtags')
334 except revlog.LookupError:
343 except revlog.LookupError:
335 continue
344 continue
336 ret.append((rev, node, fnode))
345 ret.append((rev, node, fnode))
337 if fnode in last:
346 if fnode in last:
338 ret[last[fnode]] = None
347 ret[last[fnode]] = None
339 last[fnode] = len(ret) - 1
348 last[fnode] = len(ret) - 1
340 return [item for item in ret if item]
349 return [item for item in ret if item]
341
350
342 def tagslist(self):
351 def tagslist(self):
343 '''return a list of tags ordered by revision'''
352 '''return a list of tags ordered by revision'''
344 l = []
353 l = []
345 for t, n in self.tags().items():
354 for t, n in self.tags().items():
346 try:
355 try:
347 r = self.changelog.rev(n)
356 r = self.changelog.rev(n)
348 except:
357 except:
349 r = -2 # sort to the beginning of the list if unknown
358 r = -2 # sort to the beginning of the list if unknown
350 l.append((r, t, n))
359 l.append((r, t, n))
351 l.sort()
360 l.sort()
352 return [(t, n) for r, t, n in l]
361 return [(t, n) for r, t, n in l]
353
362
354 def nodetags(self, node):
363 def nodetags(self, node):
355 '''return the tags associated with a node'''
364 '''return the tags associated with a node'''
356 if not self.nodetagscache:
365 if not self.nodetagscache:
357 self.nodetagscache = {}
366 self.nodetagscache = {}
358 for t, n in self.tags().items():
367 for t, n in self.tags().items():
359 self.nodetagscache.setdefault(n, []).append(t)
368 self.nodetagscache.setdefault(n, []).append(t)
360 return self.nodetagscache.get(node, [])
369 return self.nodetagscache.get(node, [])
361
370
362 def _branchtags(self):
371 def _branchtags(self):
363 partial, last, lrev = self._readbranchcache()
372 partial, last, lrev = self._readbranchcache()
364
373
365 tiprev = self.changelog.count() - 1
374 tiprev = self.changelog.count() - 1
366 if lrev != tiprev:
375 if lrev != tiprev:
367 self._updatebranchcache(partial, lrev+1, tiprev+1)
376 self._updatebranchcache(partial, lrev+1, tiprev+1)
368 self._writebranchcache(partial, self.changelog.tip(), tiprev)
377 self._writebranchcache(partial, self.changelog.tip(), tiprev)
369
378
370 return partial
379 return partial
371
380
372 def branchtags(self):
381 def branchtags(self):
373 if self.branchcache is not None:
382 if self.branchcache is not None:
374 return self.branchcache
383 return self.branchcache
375
384
376 self.branchcache = {} # avoid recursion in changectx
385 self.branchcache = {} # avoid recursion in changectx
377 partial = self._branchtags()
386 partial = self._branchtags()
378
387
379 # the branch cache is stored on disk as UTF-8, but in the local
388 # the branch cache is stored on disk as UTF-8, but in the local
380 # charset internally
389 # charset internally
381 for k, v in partial.items():
390 for k, v in partial.items():
382 self.branchcache[util.tolocal(k)] = v
391 self.branchcache[util.tolocal(k)] = v
383 return self.branchcache
392 return self.branchcache
384
393
385 def _readbranchcache(self):
394 def _readbranchcache(self):
386 partial = {}
395 partial = {}
387 try:
396 try:
388 f = self.opener("branch.cache")
397 f = self.opener("branch.cache")
389 lines = f.read().split('\n')
398 lines = f.read().split('\n')
390 f.close()
399 f.close()
391 except (IOError, OSError):
400 except (IOError, OSError):
392 return {}, nullid, nullrev
401 return {}, nullid, nullrev
393
402
394 try:
403 try:
395 last, lrev = lines.pop(0).split(" ", 1)
404 last, lrev = lines.pop(0).split(" ", 1)
396 last, lrev = bin(last), int(lrev)
405 last, lrev = bin(last), int(lrev)
397 if not (lrev < self.changelog.count() and
406 if not (lrev < self.changelog.count() and
398 self.changelog.node(lrev) == last): # sanity check
407 self.changelog.node(lrev) == last): # sanity check
399 # invalidate the cache
408 # invalidate the cache
400 raise ValueError('Invalid branch cache: unknown tip')
409 raise ValueError('Invalid branch cache: unknown tip')
401 for l in lines:
410 for l in lines:
402 if not l: continue
411 if not l: continue
403 node, label = l.split(" ", 1)
412 node, label = l.split(" ", 1)
404 partial[label.strip()] = bin(node)
413 partial[label.strip()] = bin(node)
405 except (KeyboardInterrupt, util.SignalInterrupt):
414 except (KeyboardInterrupt, util.SignalInterrupt):
406 raise
415 raise
407 except Exception, inst:
416 except Exception, inst:
408 if self.ui.debugflag:
417 if self.ui.debugflag:
409 self.ui.warn(str(inst), '\n')
418 self.ui.warn(str(inst), '\n')
410 partial, last, lrev = {}, nullid, nullrev
419 partial, last, lrev = {}, nullid, nullrev
411 return partial, last, lrev
420 return partial, last, lrev
412
421
413 def _writebranchcache(self, branches, tip, tiprev):
422 def _writebranchcache(self, branches, tip, tiprev):
414 try:
423 try:
415 f = self.opener("branch.cache", "w", atomictemp=True)
424 f = self.opener("branch.cache", "w", atomictemp=True)
416 f.write("%s %s\n" % (hex(tip), tiprev))
425 f.write("%s %s\n" % (hex(tip), tiprev))
417 for label, node in branches.iteritems():
426 for label, node in branches.iteritems():
418 f.write("%s %s\n" % (hex(node), label))
427 f.write("%s %s\n" % (hex(node), label))
419 f.rename()
428 f.rename()
420 except (IOError, OSError):
429 except (IOError, OSError):
421 pass
430 pass
422
431
423 def _updatebranchcache(self, partial, start, end):
432 def _updatebranchcache(self, partial, start, end):
424 for r in xrange(start, end):
433 for r in xrange(start, end):
425 c = self.changectx(r)
434 c = self.changectx(r)
426 b = c.branch()
435 b = c.branch()
427 partial[b] = c.node()
436 partial[b] = c.node()
428
437
429 def lookup(self, key):
438 def lookup(self, key):
430 if key == '.':
439 if key == '.':
431 key, second = self.dirstate.parents()
440 key, second = self.dirstate.parents()
432 if key == nullid:
441 if key == nullid:
433 raise repo.RepoError(_("no revision checked out"))
442 raise repo.RepoError(_("no revision checked out"))
434 if second != nullid:
443 if second != nullid:
435 self.ui.warn(_("warning: working directory has two parents, "
444 self.ui.warn(_("warning: working directory has two parents, "
436 "tag '.' uses the first\n"))
445 "tag '.' uses the first\n"))
437 elif key == 'null':
446 elif key == 'null':
438 return nullid
447 return nullid
439 n = self.changelog._match(key)
448 n = self.changelog._match(key)
440 if n:
449 if n:
441 return n
450 return n
442 if key in self.tags():
451 if key in self.tags():
443 return self.tags()[key]
452 return self.tags()[key]
444 if key in self.branchtags():
453 if key in self.branchtags():
445 return self.branchtags()[key]
454 return self.branchtags()[key]
446 n = self.changelog._partialmatch(key)
455 n = self.changelog._partialmatch(key)
447 if n:
456 if n:
448 return n
457 return n
449 raise repo.RepoError(_("unknown revision '%s'") % key)
458 raise repo.RepoError(_("unknown revision '%s'") % key)
450
459
451 def dev(self):
460 def dev(self):
452 return os.lstat(self.path).st_dev
461 return os.lstat(self.path).st_dev
453
462
454 def local(self):
463 def local(self):
455 return True
464 return True
456
465
457 def join(self, f):
466 def join(self, f):
458 return os.path.join(self.path, f)
467 return os.path.join(self.path, f)
459
468
460 def sjoin(self, f):
469 def sjoin(self, f):
461 f = self.encodefn(f)
470 f = self.encodefn(f)
462 return os.path.join(self.spath, f)
471 return os.path.join(self.spath, f)
463
472
464 def wjoin(self, f):
473 def wjoin(self, f):
465 return os.path.join(self.root, f)
474 return os.path.join(self.root, f)
466
475
467 def file(self, f):
476 def file(self, f):
468 if f[0] == '/':
477 if f[0] == '/':
469 f = f[1:]
478 f = f[1:]
470 return filelog.filelog(self.sopener, f)
479 return filelog.filelog(self.sopener, f)
471
480
472 def changectx(self, changeid=None):
481 def changectx(self, changeid=None):
473 return context.changectx(self, changeid)
482 return context.changectx(self, changeid)
474
483
475 def workingctx(self):
484 def workingctx(self):
476 return context.workingctx(self)
485 return context.workingctx(self)
477
486
478 def parents(self, changeid=None):
487 def parents(self, changeid=None):
479 '''
488 '''
480 get list of changectxs for parents of changeid or working directory
489 get list of changectxs for parents of changeid or working directory
481 '''
490 '''
482 if changeid is None:
491 if changeid is None:
483 pl = self.dirstate.parents()
492 pl = self.dirstate.parents()
484 else:
493 else:
485 n = self.changelog.lookup(changeid)
494 n = self.changelog.lookup(changeid)
486 pl = self.changelog.parents(n)
495 pl = self.changelog.parents(n)
487 if pl[1] == nullid:
496 if pl[1] == nullid:
488 return [self.changectx(pl[0])]
497 return [self.changectx(pl[0])]
489 return [self.changectx(pl[0]), self.changectx(pl[1])]
498 return [self.changectx(pl[0]), self.changectx(pl[1])]
490
499
491 def filectx(self, path, changeid=None, fileid=None):
500 def filectx(self, path, changeid=None, fileid=None):
492 """changeid can be a changeset revision, node, or tag.
501 """changeid can be a changeset revision, node, or tag.
493 fileid can be a file revision or node."""
502 fileid can be a file revision or node."""
494 return context.filectx(self, path, changeid, fileid)
503 return context.filectx(self, path, changeid, fileid)
495
504
496 def getcwd(self):
505 def getcwd(self):
497 return self.dirstate.getcwd()
506 return self.dirstate.getcwd()
498
507
499 def pathto(self, f, cwd=None):
508 def pathto(self, f, cwd=None):
500 return self.dirstate.pathto(f, cwd)
509 return self.dirstate.pathto(f, cwd)
501
510
502 def wfile(self, f, mode='r'):
511 def wfile(self, f, mode='r'):
503 return self.wopener(f, mode)
512 return self.wopener(f, mode)
504
513
505 def _link(self, f):
514 def _link(self, f):
506 return os.path.islink(self.wjoin(f))
515 return os.path.islink(self.wjoin(f))
507
516
508 def _filter(self, filter, filename, data):
517 def _filter(self, filter, filename, data):
509 if filter not in self.filterpats:
518 if filter not in self.filterpats:
510 l = []
519 l = []
511 for pat, cmd in self.ui.configitems(filter):
520 for pat, cmd in self.ui.configitems(filter):
512 mf = util.matcher(self.root, "", [pat], [], [])[1]
521 mf = util.matcher(self.root, "", [pat], [], [])[1]
513 l.append((mf, cmd))
522 l.append((mf, cmd))
514 self.filterpats[filter] = l
523 self.filterpats[filter] = l
515
524
516 for mf, cmd in self.filterpats[filter]:
525 for mf, cmd in self.filterpats[filter]:
517 if mf(filename):
526 if mf(filename):
518 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
527 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
519 data = util.filter(data, cmd)
528 data = util.filter(data, cmd)
520 break
529 break
521
530
522 return data
531 return data
523
532
524 def wread(self, filename):
533 def wread(self, filename):
525 if self._link(filename):
534 if self._link(filename):
526 data = os.readlink(self.wjoin(filename))
535 data = os.readlink(self.wjoin(filename))
527 else:
536 else:
528 data = self.wopener(filename, 'r').read()
537 data = self.wopener(filename, 'r').read()
529 return self._filter("encode", filename, data)
538 return self._filter("encode", filename, data)
530
539
531 def wwrite(self, filename, data, flags):
540 def wwrite(self, filename, data, flags):
532 data = self._filter("decode", filename, data)
541 data = self._filter("decode", filename, data)
533 if "l" in flags:
542 if "l" in flags:
534 f = self.wjoin(filename)
543 f = self.wjoin(filename)
535 try:
544 try:
536 os.unlink(f)
545 os.unlink(f)
537 except OSError:
546 except OSError:
538 pass
547 pass
539 d = os.path.dirname(f)
548 d = os.path.dirname(f)
540 if not os.path.exists(d):
549 if not os.path.exists(d):
541 os.makedirs(d)
550 os.makedirs(d)
542 os.symlink(data, f)
551 os.symlink(data, f)
543 else:
552 else:
544 try:
553 try:
545 if self._link(filename):
554 if self._link(filename):
546 os.unlink(self.wjoin(filename))
555 os.unlink(self.wjoin(filename))
547 except OSError:
556 except OSError:
548 pass
557 pass
549 self.wopener(filename, 'w').write(data)
558 self.wopener(filename, 'w').write(data)
550 util.set_exec(self.wjoin(filename), "x" in flags)
559 util.set_exec(self.wjoin(filename), "x" in flags)
551
560
552 def wwritedata(self, filename, data):
561 def wwritedata(self, filename, data):
553 return self._filter("decode", filename, data)
562 return self._filter("decode", filename, data)
554
563
555 def transaction(self):
564 def transaction(self):
556 tr = self.transhandle
565 tr = self.transhandle
557 if tr != None and tr.running():
566 if tr != None and tr.running():
558 return tr.nest()
567 return tr.nest()
559
568
560 # save dirstate for rollback
569 # save dirstate for rollback
561 try:
570 try:
562 ds = self.opener("dirstate").read()
571 ds = self.opener("dirstate").read()
563 except IOError:
572 except IOError:
564 ds = ""
573 ds = ""
565 self.opener("journal.dirstate", "w").write(ds)
574 self.opener("journal.dirstate", "w").write(ds)
566
575
567 renames = [(self.sjoin("journal"), self.sjoin("undo")),
576 renames = [(self.sjoin("journal"), self.sjoin("undo")),
568 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
577 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
569 tr = transaction.transaction(self.ui.warn, self.sopener,
578 tr = transaction.transaction(self.ui.warn, self.sopener,
570 self.sjoin("journal"),
579 self.sjoin("journal"),
571 aftertrans(renames))
580 aftertrans(renames))
572 self.transhandle = tr
581 self.transhandle = tr
573 return tr
582 return tr
574
583
575 def recover(self):
584 def recover(self):
576 l = self.lock()
585 l = self.lock()
577 if os.path.exists(self.sjoin("journal")):
586 if os.path.exists(self.sjoin("journal")):
578 self.ui.status(_("rolling back interrupted transaction\n"))
587 self.ui.status(_("rolling back interrupted transaction\n"))
579 transaction.rollback(self.sopener, self.sjoin("journal"))
588 transaction.rollback(self.sopener, self.sjoin("journal"))
580 self.reload()
589 self.reload()
581 return True
590 return True
582 else:
591 else:
583 self.ui.warn(_("no interrupted transaction available\n"))
592 self.ui.warn(_("no interrupted transaction available\n"))
584 return False
593 return False
585
594
586 def rollback(self, wlock=None, lock=None):
595 def rollback(self, wlock=None, lock=None):
587 if not wlock:
596 if not wlock:
588 wlock = self.wlock()
597 wlock = self.wlock()
589 if not lock:
598 if not lock:
590 lock = self.lock()
599 lock = self.lock()
591 if os.path.exists(self.sjoin("undo")):
600 if os.path.exists(self.sjoin("undo")):
592 self.ui.status(_("rolling back last transaction\n"))
601 self.ui.status(_("rolling back last transaction\n"))
593 transaction.rollback(self.sopener, self.sjoin("undo"))
602 transaction.rollback(self.sopener, self.sjoin("undo"))
594 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
603 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
595 self.reload()
604 self.reload()
596 self.wreload()
605 self.wreload()
597 else:
606 else:
598 self.ui.warn(_("no rollback information available\n"))
607 self.ui.warn(_("no rollback information available\n"))
599
608
600 def wreload(self):
609 def wreload(self):
601 self.dirstate.reload()
610 self.dirstate.reload()
602
611
603 def reload(self):
612 def reload(self):
604 self.changelog.load()
613 self.changelog.load()
605 self.manifest.load()
614 self.manifest.load()
606 self.tagscache = None
615 self.tagscache = None
607 self.nodetagscache = None
616 self.nodetagscache = None
608
617
609 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
618 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
610 desc=None):
619 desc=None):
611 try:
620 try:
612 l = lock.lock(lockname, 0, releasefn, desc=desc)
621 l = lock.lock(lockname, 0, releasefn, desc=desc)
613 except lock.LockHeld, inst:
622 except lock.LockHeld, inst:
614 if not wait:
623 if not wait:
615 raise
624 raise
616 self.ui.warn(_("waiting for lock on %s held by %r\n") %
625 self.ui.warn(_("waiting for lock on %s held by %r\n") %
617 (desc, inst.locker))
626 (desc, inst.locker))
618 # default to 600 seconds timeout
627 # default to 600 seconds timeout
619 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
628 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
620 releasefn, desc=desc)
629 releasefn, desc=desc)
621 if acquirefn:
630 if acquirefn:
622 acquirefn()
631 acquirefn()
623 return l
632 return l
624
633
625 def lock(self, wait=1):
634 def lock(self, wait=1):
626 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
635 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
627 desc=_('repository %s') % self.origroot)
636 desc=_('repository %s') % self.origroot)
628
637
629 def wlock(self, wait=1):
638 def wlock(self, wait=1):
630 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
639 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
631 self.wreload,
640 self.wreload,
632 desc=_('working directory of %s') % self.origroot)
641 desc=_('working directory of %s') % self.origroot)
633
642
634 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
643 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
635 """
644 """
636 commit an individual file as part of a larger transaction
645 commit an individual file as part of a larger transaction
637 """
646 """
638
647
639 t = self.wread(fn)
648 t = self.wread(fn)
640 fl = self.file(fn)
649 fl = self.file(fn)
641 fp1 = manifest1.get(fn, nullid)
650 fp1 = manifest1.get(fn, nullid)
642 fp2 = manifest2.get(fn, nullid)
651 fp2 = manifest2.get(fn, nullid)
643
652
644 meta = {}
653 meta = {}
645 cp = self.dirstate.copied(fn)
654 cp = self.dirstate.copied(fn)
646 if cp:
655 if cp:
647 # Mark the new revision of this file as a copy of another
656 # Mark the new revision of this file as a copy of another
648 # file. This copy data will effectively act as a parent
657 # file. This copy data will effectively act as a parent
649 # of this new revision. If this is a merge, the first
658 # of this new revision. If this is a merge, the first
650 # parent will be the nullid (meaning "look up the copy data")
659 # parent will be the nullid (meaning "look up the copy data")
651 # and the second one will be the other parent. For example:
660 # and the second one will be the other parent. For example:
652 #
661 #
653 # 0 --- 1 --- 3 rev1 changes file foo
662 # 0 --- 1 --- 3 rev1 changes file foo
654 # \ / rev2 renames foo to bar and changes it
663 # \ / rev2 renames foo to bar and changes it
655 # \- 2 -/ rev3 should have bar with all changes and
664 # \- 2 -/ rev3 should have bar with all changes and
656 # should record that bar descends from
665 # should record that bar descends from
657 # bar in rev2 and foo in rev1
666 # bar in rev2 and foo in rev1
658 #
667 #
659 # this allows this merge to succeed:
668 # this allows this merge to succeed:
660 #
669 #
661 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
670 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
662 # \ / merging rev3 and rev4 should use bar@rev2
671 # \ / merging rev3 and rev4 should use bar@rev2
663 # \- 2 --- 4 as the merge base
672 # \- 2 --- 4 as the merge base
664 #
673 #
665 meta["copy"] = cp
674 meta["copy"] = cp
666 if not manifest2: # not a branch merge
675 if not manifest2: # not a branch merge
667 meta["copyrev"] = hex(manifest1.get(cp, nullid))
676 meta["copyrev"] = hex(manifest1.get(cp, nullid))
668 fp2 = nullid
677 fp2 = nullid
669 elif fp2 != nullid: # copied on remote side
678 elif fp2 != nullid: # copied on remote side
670 meta["copyrev"] = hex(manifest1.get(cp, nullid))
679 meta["copyrev"] = hex(manifest1.get(cp, nullid))
671 elif fp1 != nullid: # copied on local side, reversed
680 elif fp1 != nullid: # copied on local side, reversed
672 meta["copyrev"] = hex(manifest2.get(cp))
681 meta["copyrev"] = hex(manifest2.get(cp))
673 fp2 = fp1
682 fp2 = fp1
674 else: # directory rename
683 else: # directory rename
675 meta["copyrev"] = hex(manifest1.get(cp, nullid))
684 meta["copyrev"] = hex(manifest1.get(cp, nullid))
676 self.ui.debug(_(" %s: copy %s:%s\n") %
685 self.ui.debug(_(" %s: copy %s:%s\n") %
677 (fn, cp, meta["copyrev"]))
686 (fn, cp, meta["copyrev"]))
678 fp1 = nullid
687 fp1 = nullid
679 elif fp2 != nullid:
688 elif fp2 != nullid:
680 # is one parent an ancestor of the other?
689 # is one parent an ancestor of the other?
681 fpa = fl.ancestor(fp1, fp2)
690 fpa = fl.ancestor(fp1, fp2)
682 if fpa == fp1:
691 if fpa == fp1:
683 fp1, fp2 = fp2, nullid
692 fp1, fp2 = fp2, nullid
684 elif fpa == fp2:
693 elif fpa == fp2:
685 fp2 = nullid
694 fp2 = nullid
686
695
687 # is the file unmodified from the parent? report existing entry
696 # is the file unmodified from the parent? report existing entry
688 if fp2 == nullid and not fl.cmp(fp1, t):
697 if fp2 == nullid and not fl.cmp(fp1, t):
689 return fp1
698 return fp1
690
699
691 changelist.append(fn)
700 changelist.append(fn)
692 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
701 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
693
702
694 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
703 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
695 if p1 is None:
704 if p1 is None:
696 p1, p2 = self.dirstate.parents()
705 p1, p2 = self.dirstate.parents()
697 return self.commit(files=files, text=text, user=user, date=date,
706 return self.commit(files=files, text=text, user=user, date=date,
698 p1=p1, p2=p2, wlock=wlock, extra=extra)
707 p1=p1, p2=p2, wlock=wlock, extra=extra)
699
708
700 def commit(self, files=None, text="", user=None, date=None,
709 def commit(self, files=None, text="", user=None, date=None,
701 match=util.always, force=False, lock=None, wlock=None,
710 match=util.always, force=False, lock=None, wlock=None,
702 force_editor=False, p1=None, p2=None, extra={}):
711 force_editor=False, p1=None, p2=None, extra={}):
703
712
704 commit = []
713 commit = []
705 remove = []
714 remove = []
706 changed = []
715 changed = []
707 use_dirstate = (p1 is None) # not rawcommit
716 use_dirstate = (p1 is None) # not rawcommit
708 extra = extra.copy()
717 extra = extra.copy()
709
718
710 if use_dirstate:
719 if use_dirstate:
711 if files:
720 if files:
712 for f in files:
721 for f in files:
713 s = self.dirstate.state(f)
722 s = self.dirstate.state(f)
714 if s in 'nmai':
723 if s in 'nmai':
715 commit.append(f)
724 commit.append(f)
716 elif s == 'r':
725 elif s == 'r':
717 remove.append(f)
726 remove.append(f)
718 else:
727 else:
719 self.ui.warn(_("%s not tracked!\n") % f)
728 self.ui.warn(_("%s not tracked!\n") % f)
720 else:
729 else:
721 changes = self.status(match=match)[:5]
730 changes = self.status(match=match)[:5]
722 modified, added, removed, deleted, unknown = changes
731 modified, added, removed, deleted, unknown = changes
723 commit = modified + added
732 commit = modified + added
724 remove = removed
733 remove = removed
725 else:
734 else:
726 commit = files
735 commit = files
727
736
728 if use_dirstate:
737 if use_dirstate:
729 p1, p2 = self.dirstate.parents()
738 p1, p2 = self.dirstate.parents()
730 update_dirstate = True
739 update_dirstate = True
731 else:
740 else:
732 p1, p2 = p1, p2 or nullid
741 p1, p2 = p1, p2 or nullid
733 update_dirstate = (self.dirstate.parents()[0] == p1)
742 update_dirstate = (self.dirstate.parents()[0] == p1)
734
743
735 c1 = self.changelog.read(p1)
744 c1 = self.changelog.read(p1)
736 c2 = self.changelog.read(p2)
745 c2 = self.changelog.read(p2)
737 m1 = self.manifest.read(c1[0]).copy()
746 m1 = self.manifest.read(c1[0]).copy()
738 m2 = self.manifest.read(c2[0])
747 m2 = self.manifest.read(c2[0])
739
748
740 if use_dirstate:
749 if use_dirstate:
741 branchname = self.workingctx().branch()
750 branchname = self.workingctx().branch()
742 try:
751 try:
743 branchname = branchname.decode('UTF-8').encode('UTF-8')
752 branchname = branchname.decode('UTF-8').encode('UTF-8')
744 except UnicodeDecodeError:
753 except UnicodeDecodeError:
745 raise util.Abort(_('branch name not in UTF-8!'))
754 raise util.Abort(_('branch name not in UTF-8!'))
746 else:
755 else:
747 branchname = ""
756 branchname = ""
748
757
749 if use_dirstate:
758 if use_dirstate:
750 oldname = c1[5].get("branch") # stored in UTF-8
759 oldname = c1[5].get("branch") # stored in UTF-8
751 if not commit and not remove and not force and p2 == nullid and \
760 if not commit and not remove and not force and p2 == nullid and \
752 branchname == oldname:
761 branchname == oldname:
753 self.ui.status(_("nothing changed\n"))
762 self.ui.status(_("nothing changed\n"))
754 return None
763 return None
755
764
756 xp1 = hex(p1)
765 xp1 = hex(p1)
757 if p2 == nullid: xp2 = ''
766 if p2 == nullid: xp2 = ''
758 else: xp2 = hex(p2)
767 else: xp2 = hex(p2)
759
768
760 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
769 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
761
770
762 if not wlock:
771 if not wlock:
763 wlock = self.wlock()
772 wlock = self.wlock()
764 if not lock:
773 if not lock:
765 lock = self.lock()
774 lock = self.lock()
766 tr = self.transaction()
775 tr = self.transaction()
767
776
768 # check in files
777 # check in files
769 new = {}
778 new = {}
770 linkrev = self.changelog.count()
779 linkrev = self.changelog.count()
771 commit.sort()
780 commit.sort()
772 is_exec = util.execfunc(self.root, m1.execf)
781 is_exec = util.execfunc(self.root, m1.execf)
773 is_link = util.linkfunc(self.root, m1.linkf)
782 is_link = util.linkfunc(self.root, m1.linkf)
774 for f in commit:
783 for f in commit:
775 self.ui.note(f + "\n")
784 self.ui.note(f + "\n")
776 try:
785 try:
777 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
786 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
778 new_exec = is_exec(f)
787 new_exec = is_exec(f)
779 new_link = is_link(f)
788 new_link = is_link(f)
780 if not changed or changed[-1] != f:
789 if not changed or changed[-1] != f:
781 # mention the file in the changelog if some flag changed,
790 # mention the file in the changelog if some flag changed,
782 # even if there was no content change.
791 # even if there was no content change.
783 old_exec = m1.execf(f)
792 old_exec = m1.execf(f)
784 old_link = m1.linkf(f)
793 old_link = m1.linkf(f)
785 if old_exec != new_exec or old_link != new_link:
794 if old_exec != new_exec or old_link != new_link:
786 changed.append(f)
795 changed.append(f)
787 m1.set(f, new_exec, new_link)
796 m1.set(f, new_exec, new_link)
788 except (OSError, IOError):
797 except (OSError, IOError):
789 if use_dirstate:
798 if use_dirstate:
790 self.ui.warn(_("trouble committing %s!\n") % f)
799 self.ui.warn(_("trouble committing %s!\n") % f)
791 raise
800 raise
792 else:
801 else:
793 remove.append(f)
802 remove.append(f)
794
803
795 # update manifest
804 # update manifest
796 m1.update(new)
805 m1.update(new)
797 remove.sort()
806 remove.sort()
798 removed = []
807 removed = []
799
808
800 for f in remove:
809 for f in remove:
801 if f in m1:
810 if f in m1:
802 del m1[f]
811 del m1[f]
803 removed.append(f)
812 removed.append(f)
804 elif f in m2:
813 elif f in m2:
805 removed.append(f)
814 removed.append(f)
806 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
815 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
807
816
808 # add changeset
817 # add changeset
809 new = new.keys()
818 new = new.keys()
810 new.sort()
819 new.sort()
811
820
812 user = user or self.ui.username()
821 user = user or self.ui.username()
813 if not text or force_editor:
822 if not text or force_editor:
814 edittext = []
823 edittext = []
815 if text:
824 if text:
816 edittext.append(text)
825 edittext.append(text)
817 edittext.append("")
826 edittext.append("")
818 edittext.append("HG: user: %s" % user)
827 edittext.append("HG: user: %s" % user)
819 if p2 != nullid:
828 if p2 != nullid:
820 edittext.append("HG: branch merge")
829 edittext.append("HG: branch merge")
821 if branchname:
830 if branchname:
822 edittext.append("HG: branch %s" % util.tolocal(branchname))
831 edittext.append("HG: branch %s" % util.tolocal(branchname))
823 edittext.extend(["HG: changed %s" % f for f in changed])
832 edittext.extend(["HG: changed %s" % f for f in changed])
824 edittext.extend(["HG: removed %s" % f for f in removed])
833 edittext.extend(["HG: removed %s" % f for f in removed])
825 if not changed and not remove:
834 if not changed and not remove:
826 edittext.append("HG: no files changed")
835 edittext.append("HG: no files changed")
827 edittext.append("")
836 edittext.append("")
828 # run editor in the repository root
837 # run editor in the repository root
829 olddir = os.getcwd()
838 olddir = os.getcwd()
830 os.chdir(self.root)
839 os.chdir(self.root)
831 text = self.ui.edit("\n".join(edittext), user)
840 text = self.ui.edit("\n".join(edittext), user)
832 os.chdir(olddir)
841 os.chdir(olddir)
833
842
834 lines = [line.rstrip() for line in text.rstrip().splitlines()]
843 lines = [line.rstrip() for line in text.rstrip().splitlines()]
835 while lines and not lines[0]:
844 while lines and not lines[0]:
836 del lines[0]
845 del lines[0]
837 if not lines:
846 if not lines:
838 return None
847 return None
839 text = '\n'.join(lines)
848 text = '\n'.join(lines)
840 if branchname:
849 if branchname:
841 extra["branch"] = branchname
850 extra["branch"] = branchname
842 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
851 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
843 user, date, extra)
852 user, date, extra)
844 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
853 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
845 parent2=xp2)
854 parent2=xp2)
846 tr.close()
855 tr.close()
847
856
848 if self.branchcache and "branch" in extra:
857 if self.branchcache and "branch" in extra:
849 self.branchcache[util.tolocal(extra["branch"])] = n
858 self.branchcache[util.tolocal(extra["branch"])] = n
850
859
851 if use_dirstate or update_dirstate:
860 if use_dirstate or update_dirstate:
852 self.dirstate.setparents(n)
861 self.dirstate.setparents(n)
853 if use_dirstate:
862 if use_dirstate:
854 self.dirstate.update(new, "n")
863 self.dirstate.update(new, "n")
855 self.dirstate.forget(removed)
864 self.dirstate.forget(removed)
856
865
857 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
866 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
858 return n
867 return n
859
868
860 def walk(self, node=None, files=[], match=util.always, badmatch=None):
869 def walk(self, node=None, files=[], match=util.always, badmatch=None):
861 '''
870 '''
862 walk recursively through the directory tree or a given
871 walk recursively through the directory tree or a given
863 changeset, finding all files matched by the match
872 changeset, finding all files matched by the match
864 function
873 function
865
874
866 results are yielded in a tuple (src, filename), where src
875 results are yielded in a tuple (src, filename), where src
867 is one of:
876 is one of:
868 'f' the file was found in the directory tree
877 'f' the file was found in the directory tree
869 'm' the file was only in the dirstate and not in the tree
878 'm' the file was only in the dirstate and not in the tree
870 'b' file was not found and matched badmatch
879 'b' file was not found and matched badmatch
871 '''
880 '''
872
881
873 if node:
882 if node:
874 fdict = dict.fromkeys(files)
883 fdict = dict.fromkeys(files)
875 # for dirstate.walk, files=['.'] means "walk the whole tree".
884 # for dirstate.walk, files=['.'] means "walk the whole tree".
876 # follow that here, too
885 # follow that here, too
877 fdict.pop('.', None)
886 fdict.pop('.', None)
878 mdict = self.manifest.read(self.changelog.read(node)[0])
887 mdict = self.manifest.read(self.changelog.read(node)[0])
879 mfiles = mdict.keys()
888 mfiles = mdict.keys()
880 mfiles.sort()
889 mfiles.sort()
881 for fn in mfiles:
890 for fn in mfiles:
882 for ffn in fdict:
891 for ffn in fdict:
883 # match if the file is the exact name or a directory
892 # match if the file is the exact name or a directory
884 if ffn == fn or fn.startswith("%s/" % ffn):
893 if ffn == fn or fn.startswith("%s/" % ffn):
885 del fdict[ffn]
894 del fdict[ffn]
886 break
895 break
887 if match(fn):
896 if match(fn):
888 yield 'm', fn
897 yield 'm', fn
889 ffiles = fdict.keys()
898 ffiles = fdict.keys()
890 ffiles.sort()
899 ffiles.sort()
891 for fn in ffiles:
900 for fn in ffiles:
892 if badmatch and badmatch(fn):
901 if badmatch and badmatch(fn):
893 if match(fn):
902 if match(fn):
894 yield 'b', fn
903 yield 'b', fn
895 else:
904 else:
896 self.ui.warn(_('%s: No such file in rev %s\n')
905 self.ui.warn(_('%s: No such file in rev %s\n')
897 % (self.pathto(fn), short(node)))
906 % (self.pathto(fn), short(node)))
898 else:
907 else:
899 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
908 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
900 yield src, fn
909 yield src, fn
901
910
902 def status(self, node1=None, node2=None, files=[], match=util.always,
911 def status(self, node1=None, node2=None, files=[], match=util.always,
903 wlock=None, list_ignored=False, list_clean=False):
912 wlock=None, list_ignored=False, list_clean=False):
904 """return status of files between two nodes or node and working directory
913 """return status of files between two nodes or node and working directory
905
914
906 If node1 is None, use the first dirstate parent instead.
915 If node1 is None, use the first dirstate parent instead.
907 If node2 is None, compare node1 with working directory.
916 If node2 is None, compare node1 with working directory.
908 """
917 """
909
918
910 def fcmp(fn, getnode):
919 def fcmp(fn, getnode):
911 t1 = self.wread(fn)
920 t1 = self.wread(fn)
912 return self.file(fn).cmp(getnode(fn), t1)
921 return self.file(fn).cmp(getnode(fn), t1)
913
922
914 def mfmatches(node):
923 def mfmatches(node):
915 change = self.changelog.read(node)
924 change = self.changelog.read(node)
916 mf = self.manifest.read(change[0]).copy()
925 mf = self.manifest.read(change[0]).copy()
917 for fn in mf.keys():
926 for fn in mf.keys():
918 if not match(fn):
927 if not match(fn):
919 del mf[fn]
928 del mf[fn]
920 return mf
929 return mf
921
930
922 modified, added, removed, deleted, unknown = [], [], [], [], []
931 modified, added, removed, deleted, unknown = [], [], [], [], []
923 ignored, clean = [], []
932 ignored, clean = [], []
924
933
925 compareworking = False
934 compareworking = False
926 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
935 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
927 compareworking = True
936 compareworking = True
928
937
929 if not compareworking:
938 if not compareworking:
930 # read the manifest from node1 before the manifest from node2,
939 # read the manifest from node1 before the manifest from node2,
931 # so that we'll hit the manifest cache if we're going through
940 # so that we'll hit the manifest cache if we're going through
932 # all the revisions in parent->child order.
941 # all the revisions in parent->child order.
933 mf1 = mfmatches(node1)
942 mf1 = mfmatches(node1)
934
943
935 mywlock = False
944 mywlock = False
936
945
937 # are we comparing the working directory?
946 # are we comparing the working directory?
938 if not node2:
947 if not node2:
939 (lookup, modified, added, removed, deleted, unknown,
948 (lookup, modified, added, removed, deleted, unknown,
940 ignored, clean) = self.dirstate.status(files, match,
949 ignored, clean) = self.dirstate.status(files, match,
941 list_ignored, list_clean)
950 list_ignored, list_clean)
942
951
943 # are we comparing working dir against its parent?
952 # are we comparing working dir against its parent?
944 if compareworking:
953 if compareworking:
945 if lookup:
954 if lookup:
946 # do a full compare of any files that might have changed
955 # do a full compare of any files that might have changed
947 mnode = self.changelog.read(self.dirstate.parents()[0])[0]
956 mnode = self.changelog.read(self.dirstate.parents()[0])[0]
948 getnode = lambda fn: (self.manifest.find(mnode, fn)[0] or
957 getnode = lambda fn: (self.manifest.find(mnode, fn)[0] or
949 nullid)
958 nullid)
950 for f in lookup:
959 for f in lookup:
951 if fcmp(f, getnode):
960 if fcmp(f, getnode):
952 modified.append(f)
961 modified.append(f)
953 else:
962 else:
954 if list_clean:
963 if list_clean:
955 clean.append(f)
964 clean.append(f)
956 if not wlock and not mywlock:
965 if not wlock and not mywlock:
957 mywlock = True
966 mywlock = True
958 try:
967 try:
959 wlock = self.wlock(wait=0)
968 wlock = self.wlock(wait=0)
960 except lock.LockException:
969 except lock.LockException:
961 pass
970 pass
962 if wlock:
971 if wlock:
963 self.dirstate.update([f], "n")
972 self.dirstate.update([f], "n")
964 else:
973 else:
965 # we are comparing working dir against non-parent
974 # we are comparing working dir against non-parent
966 # generate a pseudo-manifest for the working dir
975 # generate a pseudo-manifest for the working dir
967 # XXX: create it in dirstate.py ?
976 # XXX: create it in dirstate.py ?
968 mf2 = mfmatches(self.dirstate.parents()[0])
977 mf2 = mfmatches(self.dirstate.parents()[0])
969 is_exec = util.execfunc(self.root, mf2.execf)
978 is_exec = util.execfunc(self.root, mf2.execf)
970 is_link = util.linkfunc(self.root, mf2.linkf)
979 is_link = util.linkfunc(self.root, mf2.linkf)
971 for f in lookup + modified + added:
980 for f in lookup + modified + added:
972 mf2[f] = ""
981 mf2[f] = ""
973 mf2.set(f, is_exec(f), is_link(f))
982 mf2.set(f, is_exec(f), is_link(f))
974 for f in removed:
983 for f in removed:
975 if f in mf2:
984 if f in mf2:
976 del mf2[f]
985 del mf2[f]
977
986
978 if mywlock and wlock:
987 if mywlock and wlock:
979 wlock.release()
988 wlock.release()
980 else:
989 else:
981 # we are comparing two revisions
990 # we are comparing two revisions
982 mf2 = mfmatches(node2)
991 mf2 = mfmatches(node2)
983
992
984 if not compareworking:
993 if not compareworking:
985 # flush lists from dirstate before comparing manifests
994 # flush lists from dirstate before comparing manifests
986 modified, added, clean = [], [], []
995 modified, added, clean = [], [], []
987
996
988 # make sure to sort the files so we talk to the disk in a
997 # make sure to sort the files so we talk to the disk in a
989 # reasonable order
998 # reasonable order
990 mf2keys = mf2.keys()
999 mf2keys = mf2.keys()
991 mf2keys.sort()
1000 mf2keys.sort()
992 getnode = lambda fn: mf1.get(fn, nullid)
1001 getnode = lambda fn: mf1.get(fn, nullid)
993 for fn in mf2keys:
1002 for fn in mf2keys:
994 if mf1.has_key(fn):
1003 if mf1.has_key(fn):
995 if mf1.flags(fn) != mf2.flags(fn) or \
1004 if mf1.flags(fn) != mf2.flags(fn) or \
996 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or
1005 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or
997 fcmp(fn, getnode))):
1006 fcmp(fn, getnode))):
998 modified.append(fn)
1007 modified.append(fn)
999 elif list_clean:
1008 elif list_clean:
1000 clean.append(fn)
1009 clean.append(fn)
1001 del mf1[fn]
1010 del mf1[fn]
1002 else:
1011 else:
1003 added.append(fn)
1012 added.append(fn)
1004
1013
1005 removed = mf1.keys()
1014 removed = mf1.keys()
1006
1015
1007 # sort and return results:
1016 # sort and return results:
1008 for l in modified, added, removed, deleted, unknown, ignored, clean:
1017 for l in modified, added, removed, deleted, unknown, ignored, clean:
1009 l.sort()
1018 l.sort()
1010 return (modified, added, removed, deleted, unknown, ignored, clean)
1019 return (modified, added, removed, deleted, unknown, ignored, clean)
1011
1020
1012 def add(self, list, wlock=None):
1021 def add(self, list, wlock=None):
1013 if not wlock:
1022 if not wlock:
1014 wlock = self.wlock()
1023 wlock = self.wlock()
1015 for f in list:
1024 for f in list:
1016 p = self.wjoin(f)
1025 p = self.wjoin(f)
1017 try:
1026 try:
1018 st = os.lstat(p)
1027 st = os.lstat(p)
1019 except:
1028 except:
1020 self.ui.warn(_("%s does not exist!\n") % f)
1029 self.ui.warn(_("%s does not exist!\n") % f)
1021 continue
1030 continue
1022 if st.st_size > 10000000:
1031 if st.st_size > 10000000:
1023 self.ui.warn(_("%s: files over 10MB may cause memory and"
1032 self.ui.warn(_("%s: files over 10MB may cause memory and"
1024 " performance problems\n"
1033 " performance problems\n"
1025 "(use 'hg revert %s' to unadd the file)\n")
1034 "(use 'hg revert %s' to unadd the file)\n")
1026 % (f, f))
1035 % (f, f))
1027 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1036 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1028 self.ui.warn(_("%s not added: only files and symlinks "
1037 self.ui.warn(_("%s not added: only files and symlinks "
1029 "supported currently\n") % f)
1038 "supported currently\n") % f)
1030 elif self.dirstate.state(f) in 'an':
1039 elif self.dirstate.state(f) in 'an':
1031 self.ui.warn(_("%s already tracked!\n") % f)
1040 self.ui.warn(_("%s already tracked!\n") % f)
1032 else:
1041 else:
1033 self.dirstate.update([f], "a")
1042 self.dirstate.update([f], "a")
1034
1043
1035 def forget(self, list, wlock=None):
1044 def forget(self, list, wlock=None):
1036 if not wlock:
1045 if not wlock:
1037 wlock = self.wlock()
1046 wlock = self.wlock()
1038 for f in list:
1047 for f in list:
1039 if self.dirstate.state(f) not in 'ai':
1048 if self.dirstate.state(f) not in 'ai':
1040 self.ui.warn(_("%s not added!\n") % f)
1049 self.ui.warn(_("%s not added!\n") % f)
1041 else:
1050 else:
1042 self.dirstate.forget([f])
1051 self.dirstate.forget([f])
1043
1052
1044 def remove(self, list, unlink=False, wlock=None):
1053 def remove(self, list, unlink=False, wlock=None):
1045 if unlink:
1054 if unlink:
1046 for f in list:
1055 for f in list:
1047 try:
1056 try:
1048 util.unlink(self.wjoin(f))
1057 util.unlink(self.wjoin(f))
1049 except OSError, inst:
1058 except OSError, inst:
1050 if inst.errno != errno.ENOENT:
1059 if inst.errno != errno.ENOENT:
1051 raise
1060 raise
1052 if not wlock:
1061 if not wlock:
1053 wlock = self.wlock()
1062 wlock = self.wlock()
1054 for f in list:
1063 for f in list:
1055 if unlink and os.path.exists(self.wjoin(f)):
1064 if unlink and os.path.exists(self.wjoin(f)):
1056 self.ui.warn(_("%s still exists!\n") % f)
1065 self.ui.warn(_("%s still exists!\n") % f)
1057 elif self.dirstate.state(f) == 'a':
1066 elif self.dirstate.state(f) == 'a':
1058 self.dirstate.forget([f])
1067 self.dirstate.forget([f])
1059 elif f not in self.dirstate:
1068 elif f not in self.dirstate:
1060 self.ui.warn(_("%s not tracked!\n") % f)
1069 self.ui.warn(_("%s not tracked!\n") % f)
1061 else:
1070 else:
1062 self.dirstate.update([f], "r")
1071 self.dirstate.update([f], "r")
1063
1072
1064 def undelete(self, list, wlock=None):
1073 def undelete(self, list, wlock=None):
1065 p = self.dirstate.parents()[0]
1074 p = self.dirstate.parents()[0]
1066 mn = self.changelog.read(p)[0]
1075 mn = self.changelog.read(p)[0]
1067 m = self.manifest.read(mn)
1076 m = self.manifest.read(mn)
1068 if not wlock:
1077 if not wlock:
1069 wlock = self.wlock()
1078 wlock = self.wlock()
1070 for f in list:
1079 for f in list:
1071 if self.dirstate.state(f) not in "r":
1080 if self.dirstate.state(f) not in "r":
1072 self.ui.warn("%s not removed!\n" % f)
1081 self.ui.warn("%s not removed!\n" % f)
1073 else:
1082 else:
1074 t = self.file(f).read(m[f])
1083 t = self.file(f).read(m[f])
1075 self.wwrite(f, t, m.flags(f))
1084 self.wwrite(f, t, m.flags(f))
1076 self.dirstate.update([f], "n")
1085 self.dirstate.update([f], "n")
1077
1086
1078 def copy(self, source, dest, wlock=None):
1087 def copy(self, source, dest, wlock=None):
1079 p = self.wjoin(dest)
1088 p = self.wjoin(dest)
1080 if not (os.path.exists(p) or os.path.islink(p)):
1089 if not (os.path.exists(p) or os.path.islink(p)):
1081 self.ui.warn(_("%s does not exist!\n") % dest)
1090 self.ui.warn(_("%s does not exist!\n") % dest)
1082 elif not (os.path.isfile(p) or os.path.islink(p)):
1091 elif not (os.path.isfile(p) or os.path.islink(p)):
1083 self.ui.warn(_("copy failed: %s is not a file or a "
1092 self.ui.warn(_("copy failed: %s is not a file or a "
1084 "symbolic link\n") % dest)
1093 "symbolic link\n") % dest)
1085 else:
1094 else:
1086 if not wlock:
1095 if not wlock:
1087 wlock = self.wlock()
1096 wlock = self.wlock()
1088 if self.dirstate.state(dest) == '?':
1097 if self.dirstate.state(dest) == '?':
1089 self.dirstate.update([dest], "a")
1098 self.dirstate.update([dest], "a")
1090 self.dirstate.copy(source, dest)
1099 self.dirstate.copy(source, dest)
1091
1100
1092 def heads(self, start=None):
1101 def heads(self, start=None):
1093 heads = self.changelog.heads(start)
1102 heads = self.changelog.heads(start)
1094 # sort the output in rev descending order
1103 # sort the output in rev descending order
1095 heads = [(-self.changelog.rev(h), h) for h in heads]
1104 heads = [(-self.changelog.rev(h), h) for h in heads]
1096 heads.sort()
1105 heads.sort()
1097 return [n for (r, n) in heads]
1106 return [n for (r, n) in heads]
1098
1107
1099 def branches(self, nodes):
1108 def branches(self, nodes):
1100 if not nodes:
1109 if not nodes:
1101 nodes = [self.changelog.tip()]
1110 nodes = [self.changelog.tip()]
1102 b = []
1111 b = []
1103 for n in nodes:
1112 for n in nodes:
1104 t = n
1113 t = n
1105 while 1:
1114 while 1:
1106 p = self.changelog.parents(n)
1115 p = self.changelog.parents(n)
1107 if p[1] != nullid or p[0] == nullid:
1116 if p[1] != nullid or p[0] == nullid:
1108 b.append((t, n, p[0], p[1]))
1117 b.append((t, n, p[0], p[1]))
1109 break
1118 break
1110 n = p[0]
1119 n = p[0]
1111 return b
1120 return b
1112
1121
1113 def between(self, pairs):
1122 def between(self, pairs):
1114 r = []
1123 r = []
1115
1124
1116 for top, bottom in pairs:
1125 for top, bottom in pairs:
1117 n, l, i = top, [], 0
1126 n, l, i = top, [], 0
1118 f = 1
1127 f = 1
1119
1128
1120 while n != bottom:
1129 while n != bottom:
1121 p = self.changelog.parents(n)[0]
1130 p = self.changelog.parents(n)[0]
1122 if i == f:
1131 if i == f:
1123 l.append(n)
1132 l.append(n)
1124 f = f * 2
1133 f = f * 2
1125 n = p
1134 n = p
1126 i += 1
1135 i += 1
1127
1136
1128 r.append(l)
1137 r.append(l)
1129
1138
1130 return r
1139 return r
1131
1140
1132 def findincoming(self, remote, base=None, heads=None, force=False):
1141 def findincoming(self, remote, base=None, heads=None, force=False):
1133 """Return list of roots of the subsets of missing nodes from remote
1142 """Return list of roots of the subsets of missing nodes from remote
1134
1143
1135 If base dict is specified, assume that these nodes and their parents
1144 If base dict is specified, assume that these nodes and their parents
1136 exist on the remote side and that no child of a node of base exists
1145 exist on the remote side and that no child of a node of base exists
1137 in both remote and self.
1146 in both remote and self.
1138 Furthermore base will be updated to include the nodes that exists
1147 Furthermore base will be updated to include the nodes that exists
1139 in self and remote but no children exists in self and remote.
1148 in self and remote but no children exists in self and remote.
1140 If a list of heads is specified, return only nodes which are heads
1149 If a list of heads is specified, return only nodes which are heads
1141 or ancestors of these heads.
1150 or ancestors of these heads.
1142
1151
1143 All the ancestors of base are in self and in remote.
1152 All the ancestors of base are in self and in remote.
1144 All the descendants of the list returned are missing in self.
1153 All the descendants of the list returned are missing in self.
1145 (and so we know that the rest of the nodes are missing in remote, see
1154 (and so we know that the rest of the nodes are missing in remote, see
1146 outgoing)
1155 outgoing)
1147 """
1156 """
1148 m = self.changelog.nodemap
1157 m = self.changelog.nodemap
1149 search = []
1158 search = []
1150 fetch = {}
1159 fetch = {}
1151 seen = {}
1160 seen = {}
1152 seenbranch = {}
1161 seenbranch = {}
1153 if base == None:
1162 if base == None:
1154 base = {}
1163 base = {}
1155
1164
1156 if not heads:
1165 if not heads:
1157 heads = remote.heads()
1166 heads = remote.heads()
1158
1167
1159 if self.changelog.tip() == nullid:
1168 if self.changelog.tip() == nullid:
1160 base[nullid] = 1
1169 base[nullid] = 1
1161 if heads != [nullid]:
1170 if heads != [nullid]:
1162 return [nullid]
1171 return [nullid]
1163 return []
1172 return []
1164
1173
1165 # assume we're closer to the tip than the root
1174 # assume we're closer to the tip than the root
1166 # and start by examining the heads
1175 # and start by examining the heads
1167 self.ui.status(_("searching for changes\n"))
1176 self.ui.status(_("searching for changes\n"))
1168
1177
1169 unknown = []
1178 unknown = []
1170 for h in heads:
1179 for h in heads:
1171 if h not in m:
1180 if h not in m:
1172 unknown.append(h)
1181 unknown.append(h)
1173 else:
1182 else:
1174 base[h] = 1
1183 base[h] = 1
1175
1184
1176 if not unknown:
1185 if not unknown:
1177 return []
1186 return []
1178
1187
1179 req = dict.fromkeys(unknown)
1188 req = dict.fromkeys(unknown)
1180 reqcnt = 0
1189 reqcnt = 0
1181
1190
1182 # search through remote branches
1191 # search through remote branches
1183 # a 'branch' here is a linear segment of history, with four parts:
1192 # a 'branch' here is a linear segment of history, with four parts:
1184 # head, root, first parent, second parent
1193 # head, root, first parent, second parent
1185 # (a branch always has two parents (or none) by definition)
1194 # (a branch always has two parents (or none) by definition)
1186 unknown = remote.branches(unknown)
1195 unknown = remote.branches(unknown)
1187 while unknown:
1196 while unknown:
1188 r = []
1197 r = []
1189 while unknown:
1198 while unknown:
1190 n = unknown.pop(0)
1199 n = unknown.pop(0)
1191 if n[0] in seen:
1200 if n[0] in seen:
1192 continue
1201 continue
1193
1202
1194 self.ui.debug(_("examining %s:%s\n")
1203 self.ui.debug(_("examining %s:%s\n")
1195 % (short(n[0]), short(n[1])))
1204 % (short(n[0]), short(n[1])))
1196 if n[0] == nullid: # found the end of the branch
1205 if n[0] == nullid: # found the end of the branch
1197 pass
1206 pass
1198 elif n in seenbranch:
1207 elif n in seenbranch:
1199 self.ui.debug(_("branch already found\n"))
1208 self.ui.debug(_("branch already found\n"))
1200 continue
1209 continue
1201 elif n[1] and n[1] in m: # do we know the base?
1210 elif n[1] and n[1] in m: # do we know the base?
1202 self.ui.debug(_("found incomplete branch %s:%s\n")
1211 self.ui.debug(_("found incomplete branch %s:%s\n")
1203 % (short(n[0]), short(n[1])))
1212 % (short(n[0]), short(n[1])))
1204 search.append(n) # schedule branch range for scanning
1213 search.append(n) # schedule branch range for scanning
1205 seenbranch[n] = 1
1214 seenbranch[n] = 1
1206 else:
1215 else:
1207 if n[1] not in seen and n[1] not in fetch:
1216 if n[1] not in seen and n[1] not in fetch:
1208 if n[2] in m and n[3] in m:
1217 if n[2] in m and n[3] in m:
1209 self.ui.debug(_("found new changeset %s\n") %
1218 self.ui.debug(_("found new changeset %s\n") %
1210 short(n[1]))
1219 short(n[1]))
1211 fetch[n[1]] = 1 # earliest unknown
1220 fetch[n[1]] = 1 # earliest unknown
1212 for p in n[2:4]:
1221 for p in n[2:4]:
1213 if p in m:
1222 if p in m:
1214 base[p] = 1 # latest known
1223 base[p] = 1 # latest known
1215
1224
1216 for p in n[2:4]:
1225 for p in n[2:4]:
1217 if p not in req and p not in m:
1226 if p not in req and p not in m:
1218 r.append(p)
1227 r.append(p)
1219 req[p] = 1
1228 req[p] = 1
1220 seen[n[0]] = 1
1229 seen[n[0]] = 1
1221
1230
1222 if r:
1231 if r:
1223 reqcnt += 1
1232 reqcnt += 1
1224 self.ui.debug(_("request %d: %s\n") %
1233 self.ui.debug(_("request %d: %s\n") %
1225 (reqcnt, " ".join(map(short, r))))
1234 (reqcnt, " ".join(map(short, r))))
1226 for p in xrange(0, len(r), 10):
1235 for p in xrange(0, len(r), 10):
1227 for b in remote.branches(r[p:p+10]):
1236 for b in remote.branches(r[p:p+10]):
1228 self.ui.debug(_("received %s:%s\n") %
1237 self.ui.debug(_("received %s:%s\n") %
1229 (short(b[0]), short(b[1])))
1238 (short(b[0]), short(b[1])))
1230 unknown.append(b)
1239 unknown.append(b)
1231
1240
1232 # do binary search on the branches we found
1241 # do binary search on the branches we found
1233 while search:
1242 while search:
1234 n = search.pop(0)
1243 n = search.pop(0)
1235 reqcnt += 1
1244 reqcnt += 1
1236 l = remote.between([(n[0], n[1])])[0]
1245 l = remote.between([(n[0], n[1])])[0]
1237 l.append(n[1])
1246 l.append(n[1])
1238 p = n[0]
1247 p = n[0]
1239 f = 1
1248 f = 1
1240 for i in l:
1249 for i in l:
1241 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1250 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1242 if i in m:
1251 if i in m:
1243 if f <= 2:
1252 if f <= 2:
1244 self.ui.debug(_("found new branch changeset %s\n") %
1253 self.ui.debug(_("found new branch changeset %s\n") %
1245 short(p))
1254 short(p))
1246 fetch[p] = 1
1255 fetch[p] = 1
1247 base[i] = 1
1256 base[i] = 1
1248 else:
1257 else:
1249 self.ui.debug(_("narrowed branch search to %s:%s\n")
1258 self.ui.debug(_("narrowed branch search to %s:%s\n")
1250 % (short(p), short(i)))
1259 % (short(p), short(i)))
1251 search.append((p, i))
1260 search.append((p, i))
1252 break
1261 break
1253 p, f = i, f * 2
1262 p, f = i, f * 2
1254
1263
1255 # sanity check our fetch list
1264 # sanity check our fetch list
1256 for f in fetch.keys():
1265 for f in fetch.keys():
1257 if f in m:
1266 if f in m:
1258 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1267 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1259
1268
1260 if base.keys() == [nullid]:
1269 if base.keys() == [nullid]:
1261 if force:
1270 if force:
1262 self.ui.warn(_("warning: repository is unrelated\n"))
1271 self.ui.warn(_("warning: repository is unrelated\n"))
1263 else:
1272 else:
1264 raise util.Abort(_("repository is unrelated"))
1273 raise util.Abort(_("repository is unrelated"))
1265
1274
1266 self.ui.debug(_("found new changesets starting at ") +
1275 self.ui.debug(_("found new changesets starting at ") +
1267 " ".join([short(f) for f in fetch]) + "\n")
1276 " ".join([short(f) for f in fetch]) + "\n")
1268
1277
1269 self.ui.debug(_("%d total queries\n") % reqcnt)
1278 self.ui.debug(_("%d total queries\n") % reqcnt)
1270
1279
1271 return fetch.keys()
1280 return fetch.keys()
1272
1281
1273 def findoutgoing(self, remote, base=None, heads=None, force=False):
1282 def findoutgoing(self, remote, base=None, heads=None, force=False):
1274 """Return list of nodes that are roots of subsets not in remote
1283 """Return list of nodes that are roots of subsets not in remote
1275
1284
1276 If base dict is specified, assume that these nodes and their parents
1285 If base dict is specified, assume that these nodes and their parents
1277 exist on the remote side.
1286 exist on the remote side.
1278 If a list of heads is specified, return only nodes which are heads
1287 If a list of heads is specified, return only nodes which are heads
1279 or ancestors of these heads, and return a second element which
1288 or ancestors of these heads, and return a second element which
1280 contains all remote heads which get new children.
1289 contains all remote heads which get new children.
1281 """
1290 """
1282 if base == None:
1291 if base == None:
1283 base = {}
1292 base = {}
1284 self.findincoming(remote, base, heads, force=force)
1293 self.findincoming(remote, base, heads, force=force)
1285
1294
1286 self.ui.debug(_("common changesets up to ")
1295 self.ui.debug(_("common changesets up to ")
1287 + " ".join(map(short, base.keys())) + "\n")
1296 + " ".join(map(short, base.keys())) + "\n")
1288
1297
1289 remain = dict.fromkeys(self.changelog.nodemap)
1298 remain = dict.fromkeys(self.changelog.nodemap)
1290
1299
1291 # prune everything remote has from the tree
1300 # prune everything remote has from the tree
1292 del remain[nullid]
1301 del remain[nullid]
1293 remove = base.keys()
1302 remove = base.keys()
1294 while remove:
1303 while remove:
1295 n = remove.pop(0)
1304 n = remove.pop(0)
1296 if n in remain:
1305 if n in remain:
1297 del remain[n]
1306 del remain[n]
1298 for p in self.changelog.parents(n):
1307 for p in self.changelog.parents(n):
1299 remove.append(p)
1308 remove.append(p)
1300
1309
1301 # find every node whose parents have been pruned
1310 # find every node whose parents have been pruned
1302 subset = []
1311 subset = []
1303 # find every remote head that will get new children
1312 # find every remote head that will get new children
1304 updated_heads = {}
1313 updated_heads = {}
1305 for n in remain:
1314 for n in remain:
1306 p1, p2 = self.changelog.parents(n)
1315 p1, p2 = self.changelog.parents(n)
1307 if p1 not in remain and p2 not in remain:
1316 if p1 not in remain and p2 not in remain:
1308 subset.append(n)
1317 subset.append(n)
1309 if heads:
1318 if heads:
1310 if p1 in heads:
1319 if p1 in heads:
1311 updated_heads[p1] = True
1320 updated_heads[p1] = True
1312 if p2 in heads:
1321 if p2 in heads:
1313 updated_heads[p2] = True
1322 updated_heads[p2] = True
1314
1323
1315 # this is the set of all roots we have to push
1324 # this is the set of all roots we have to push
1316 if heads:
1325 if heads:
1317 return subset, updated_heads.keys()
1326 return subset, updated_heads.keys()
1318 else:
1327 else:
1319 return subset
1328 return subset
1320
1329
1321 def pull(self, remote, heads=None, force=False, lock=None):
1330 def pull(self, remote, heads=None, force=False, lock=None):
1322 mylock = False
1331 mylock = False
1323 if not lock:
1332 if not lock:
1324 lock = self.lock()
1333 lock = self.lock()
1325 mylock = True
1334 mylock = True
1326
1335
1327 try:
1336 try:
1328 fetch = self.findincoming(remote, force=force)
1337 fetch = self.findincoming(remote, force=force)
1329 if fetch == [nullid]:
1338 if fetch == [nullid]:
1330 self.ui.status(_("requesting all changes\n"))
1339 self.ui.status(_("requesting all changes\n"))
1331
1340
1332 if not fetch:
1341 if not fetch:
1333 self.ui.status(_("no changes found\n"))
1342 self.ui.status(_("no changes found\n"))
1334 return 0
1343 return 0
1335
1344
1336 if heads is None:
1345 if heads is None:
1337 cg = remote.changegroup(fetch, 'pull')
1346 cg = remote.changegroup(fetch, 'pull')
1338 else:
1347 else:
1339 if 'changegroupsubset' not in remote.capabilities:
1348 if 'changegroupsubset' not in remote.capabilities:
1340 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1349 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1341 cg = remote.changegroupsubset(fetch, heads, 'pull')
1350 cg = remote.changegroupsubset(fetch, heads, 'pull')
1342 return self.addchangegroup(cg, 'pull', remote.url())
1351 return self.addchangegroup(cg, 'pull', remote.url())
1343 finally:
1352 finally:
1344 if mylock:
1353 if mylock:
1345 lock.release()
1354 lock.release()
1346
1355
1347 def push(self, remote, force=False, revs=None):
1356 def push(self, remote, force=False, revs=None):
1348 # there are two ways to push to remote repo:
1357 # there are two ways to push to remote repo:
1349 #
1358 #
1350 # addchangegroup assumes local user can lock remote
1359 # addchangegroup assumes local user can lock remote
1351 # repo (local filesystem, old ssh servers).
1360 # repo (local filesystem, old ssh servers).
1352 #
1361 #
1353 # unbundle assumes local user cannot lock remote repo (new ssh
1362 # unbundle assumes local user cannot lock remote repo (new ssh
1354 # servers, http servers).
1363 # servers, http servers).
1355
1364
1356 if remote.capable('unbundle'):
1365 if remote.capable('unbundle'):
1357 return self.push_unbundle(remote, force, revs)
1366 return self.push_unbundle(remote, force, revs)
1358 return self.push_addchangegroup(remote, force, revs)
1367 return self.push_addchangegroup(remote, force, revs)
1359
1368
1360 def prepush(self, remote, force, revs):
1369 def prepush(self, remote, force, revs):
1361 base = {}
1370 base = {}
1362 remote_heads = remote.heads()
1371 remote_heads = remote.heads()
1363 inc = self.findincoming(remote, base, remote_heads, force=force)
1372 inc = self.findincoming(remote, base, remote_heads, force=force)
1364
1373
1365 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1374 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1366 if revs is not None:
1375 if revs is not None:
1367 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1376 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1368 else:
1377 else:
1369 bases, heads = update, self.changelog.heads()
1378 bases, heads = update, self.changelog.heads()
1370
1379
1371 if not bases:
1380 if not bases:
1372 self.ui.status(_("no changes found\n"))
1381 self.ui.status(_("no changes found\n"))
1373 return None, 1
1382 return None, 1
1374 elif not force:
1383 elif not force:
1375 # check if we're creating new remote heads
1384 # check if we're creating new remote heads
1376 # to be a remote head after push, node must be either
1385 # to be a remote head after push, node must be either
1377 # - unknown locally
1386 # - unknown locally
1378 # - a local outgoing head descended from update
1387 # - a local outgoing head descended from update
1379 # - a remote head that's known locally and not
1388 # - a remote head that's known locally and not
1380 # ancestral to an outgoing head
1389 # ancestral to an outgoing head
1381
1390
1382 warn = 0
1391 warn = 0
1383
1392
1384 if remote_heads == [nullid]:
1393 if remote_heads == [nullid]:
1385 warn = 0
1394 warn = 0
1386 elif not revs and len(heads) > len(remote_heads):
1395 elif not revs and len(heads) > len(remote_heads):
1387 warn = 1
1396 warn = 1
1388 else:
1397 else:
1389 newheads = list(heads)
1398 newheads = list(heads)
1390 for r in remote_heads:
1399 for r in remote_heads:
1391 if r in self.changelog.nodemap:
1400 if r in self.changelog.nodemap:
1392 desc = self.changelog.heads(r, heads)
1401 desc = self.changelog.heads(r, heads)
1393 l = [h for h in heads if h in desc]
1402 l = [h for h in heads if h in desc]
1394 if not l:
1403 if not l:
1395 newheads.append(r)
1404 newheads.append(r)
1396 else:
1405 else:
1397 newheads.append(r)
1406 newheads.append(r)
1398 if len(newheads) > len(remote_heads):
1407 if len(newheads) > len(remote_heads):
1399 warn = 1
1408 warn = 1
1400
1409
1401 if warn:
1410 if warn:
1402 self.ui.warn(_("abort: push creates new remote branches!\n"))
1411 self.ui.warn(_("abort: push creates new remote branches!\n"))
1403 self.ui.status(_("(did you forget to merge?"
1412 self.ui.status(_("(did you forget to merge?"
1404 " use push -f to force)\n"))
1413 " use push -f to force)\n"))
1405 return None, 1
1414 return None, 1
1406 elif inc:
1415 elif inc:
1407 self.ui.warn(_("note: unsynced remote changes!\n"))
1416 self.ui.warn(_("note: unsynced remote changes!\n"))
1408
1417
1409
1418
1410 if revs is None:
1419 if revs is None:
1411 cg = self.changegroup(update, 'push')
1420 cg = self.changegroup(update, 'push')
1412 else:
1421 else:
1413 cg = self.changegroupsubset(update, revs, 'push')
1422 cg = self.changegroupsubset(update, revs, 'push')
1414 return cg, remote_heads
1423 return cg, remote_heads
1415
1424
1416 def push_addchangegroup(self, remote, force, revs):
1425 def push_addchangegroup(self, remote, force, revs):
1417 lock = remote.lock()
1426 lock = remote.lock()
1418
1427
1419 ret = self.prepush(remote, force, revs)
1428 ret = self.prepush(remote, force, revs)
1420 if ret[0] is not None:
1429 if ret[0] is not None:
1421 cg, remote_heads = ret
1430 cg, remote_heads = ret
1422 return remote.addchangegroup(cg, 'push', self.url())
1431 return remote.addchangegroup(cg, 'push', self.url())
1423 return ret[1]
1432 return ret[1]
1424
1433
1425 def push_unbundle(self, remote, force, revs):
1434 def push_unbundle(self, remote, force, revs):
1426 # local repo finds heads on server, finds out what revs it
1435 # local repo finds heads on server, finds out what revs it
1427 # must push. once revs transferred, if server finds it has
1436 # must push. once revs transferred, if server finds it has
1428 # different heads (someone else won commit/push race), server
1437 # different heads (someone else won commit/push race), server
1429 # aborts.
1438 # aborts.
1430
1439
1431 ret = self.prepush(remote, force, revs)
1440 ret = self.prepush(remote, force, revs)
1432 if ret[0] is not None:
1441 if ret[0] is not None:
1433 cg, remote_heads = ret
1442 cg, remote_heads = ret
1434 if force: remote_heads = ['force']
1443 if force: remote_heads = ['force']
1435 return remote.unbundle(cg, remote_heads, 'push')
1444 return remote.unbundle(cg, remote_heads, 'push')
1436 return ret[1]
1445 return ret[1]
1437
1446
1438 def changegroupinfo(self, nodes):
1447 def changegroupinfo(self, nodes):
1439 self.ui.note(_("%d changesets found\n") % len(nodes))
1448 self.ui.note(_("%d changesets found\n") % len(nodes))
1440 if self.ui.debugflag:
1449 if self.ui.debugflag:
1441 self.ui.debug(_("List of changesets:\n"))
1450 self.ui.debug(_("List of changesets:\n"))
1442 for node in nodes:
1451 for node in nodes:
1443 self.ui.debug("%s\n" % hex(node))
1452 self.ui.debug("%s\n" % hex(node))
1444
1453
1445 def changegroupsubset(self, bases, heads, source):
1454 def changegroupsubset(self, bases, heads, source):
1446 """This function generates a changegroup consisting of all the nodes
1455 """This function generates a changegroup consisting of all the nodes
1447 that are descendents of any of the bases, and ancestors of any of
1456 that are descendents of any of the bases, and ancestors of any of
1448 the heads.
1457 the heads.
1449
1458
1450 It is fairly complex as determining which filenodes and which
1459 It is fairly complex as determining which filenodes and which
1451 manifest nodes need to be included for the changeset to be complete
1460 manifest nodes need to be included for the changeset to be complete
1452 is non-trivial.
1461 is non-trivial.
1453
1462
1454 Another wrinkle is doing the reverse, figuring out which changeset in
1463 Another wrinkle is doing the reverse, figuring out which changeset in
1455 the changegroup a particular filenode or manifestnode belongs to."""
1464 the changegroup a particular filenode or manifestnode belongs to."""
1456
1465
1457 self.hook('preoutgoing', throw=True, source=source)
1466 self.hook('preoutgoing', throw=True, source=source)
1458
1467
1459 # Set up some initial variables
1468 # Set up some initial variables
1460 # Make it easy to refer to self.changelog
1469 # Make it easy to refer to self.changelog
1461 cl = self.changelog
1470 cl = self.changelog
1462 # msng is short for missing - compute the list of changesets in this
1471 # msng is short for missing - compute the list of changesets in this
1463 # changegroup.
1472 # changegroup.
1464 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1473 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1465 self.changegroupinfo(msng_cl_lst)
1474 self.changegroupinfo(msng_cl_lst)
1466 # Some bases may turn out to be superfluous, and some heads may be
1475 # Some bases may turn out to be superfluous, and some heads may be
1467 # too. nodesbetween will return the minimal set of bases and heads
1476 # too. nodesbetween will return the minimal set of bases and heads
1468 # necessary to re-create the changegroup.
1477 # necessary to re-create the changegroup.
1469
1478
1470 # Known heads are the list of heads that it is assumed the recipient
1479 # Known heads are the list of heads that it is assumed the recipient
1471 # of this changegroup will know about.
1480 # of this changegroup will know about.
1472 knownheads = {}
1481 knownheads = {}
1473 # We assume that all parents of bases are known heads.
1482 # We assume that all parents of bases are known heads.
1474 for n in bases:
1483 for n in bases:
1475 for p in cl.parents(n):
1484 for p in cl.parents(n):
1476 if p != nullid:
1485 if p != nullid:
1477 knownheads[p] = 1
1486 knownheads[p] = 1
1478 knownheads = knownheads.keys()
1487 knownheads = knownheads.keys()
1479 if knownheads:
1488 if knownheads:
1480 # Now that we know what heads are known, we can compute which
1489 # Now that we know what heads are known, we can compute which
1481 # changesets are known. The recipient must know about all
1490 # changesets are known. The recipient must know about all
1482 # changesets required to reach the known heads from the null
1491 # changesets required to reach the known heads from the null
1483 # changeset.
1492 # changeset.
1484 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1493 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1485 junk = None
1494 junk = None
1486 # Transform the list into an ersatz set.
1495 # Transform the list into an ersatz set.
1487 has_cl_set = dict.fromkeys(has_cl_set)
1496 has_cl_set = dict.fromkeys(has_cl_set)
1488 else:
1497 else:
1489 # If there were no known heads, the recipient cannot be assumed to
1498 # If there were no known heads, the recipient cannot be assumed to
1490 # know about any changesets.
1499 # know about any changesets.
1491 has_cl_set = {}
1500 has_cl_set = {}
1492
1501
1493 # Make it easy to refer to self.manifest
1502 # Make it easy to refer to self.manifest
1494 mnfst = self.manifest
1503 mnfst = self.manifest
1495 # We don't know which manifests are missing yet
1504 # We don't know which manifests are missing yet
1496 msng_mnfst_set = {}
1505 msng_mnfst_set = {}
1497 # Nor do we know which filenodes are missing.
1506 # Nor do we know which filenodes are missing.
1498 msng_filenode_set = {}
1507 msng_filenode_set = {}
1499
1508
1500 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1509 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1501 junk = None
1510 junk = None
1502
1511
1503 # A changeset always belongs to itself, so the changenode lookup
1512 # A changeset always belongs to itself, so the changenode lookup
1504 # function for a changenode is identity.
1513 # function for a changenode is identity.
1505 def identity(x):
1514 def identity(x):
1506 return x
1515 return x
1507
1516
1508 # A function generating function. Sets up an environment for the
1517 # A function generating function. Sets up an environment for the
1509 # inner function.
1518 # inner function.
1510 def cmp_by_rev_func(revlog):
1519 def cmp_by_rev_func(revlog):
1511 # Compare two nodes by their revision number in the environment's
1520 # Compare two nodes by their revision number in the environment's
1512 # revision history. Since the revision number both represents the
1521 # revision history. Since the revision number both represents the
1513 # most efficient order to read the nodes in, and represents a
1522 # most efficient order to read the nodes in, and represents a
1514 # topological sorting of the nodes, this function is often useful.
1523 # topological sorting of the nodes, this function is often useful.
1515 def cmp_by_rev(a, b):
1524 def cmp_by_rev(a, b):
1516 return cmp(revlog.rev(a), revlog.rev(b))
1525 return cmp(revlog.rev(a), revlog.rev(b))
1517 return cmp_by_rev
1526 return cmp_by_rev
1518
1527
1519 # If we determine that a particular file or manifest node must be a
1528 # If we determine that a particular file or manifest node must be a
1520 # node that the recipient of the changegroup will already have, we can
1529 # node that the recipient of the changegroup will already have, we can
1521 # also assume the recipient will have all the parents. This function
1530 # also assume the recipient will have all the parents. This function
1522 # prunes them from the set of missing nodes.
1531 # prunes them from the set of missing nodes.
1523 def prune_parents(revlog, hasset, msngset):
1532 def prune_parents(revlog, hasset, msngset):
1524 haslst = hasset.keys()
1533 haslst = hasset.keys()
1525 haslst.sort(cmp_by_rev_func(revlog))
1534 haslst.sort(cmp_by_rev_func(revlog))
1526 for node in haslst:
1535 for node in haslst:
1527 parentlst = [p for p in revlog.parents(node) if p != nullid]
1536 parentlst = [p for p in revlog.parents(node) if p != nullid]
1528 while parentlst:
1537 while parentlst:
1529 n = parentlst.pop()
1538 n = parentlst.pop()
1530 if n not in hasset:
1539 if n not in hasset:
1531 hasset[n] = 1
1540 hasset[n] = 1
1532 p = [p for p in revlog.parents(n) if p != nullid]
1541 p = [p for p in revlog.parents(n) if p != nullid]
1533 parentlst.extend(p)
1542 parentlst.extend(p)
1534 for n in hasset:
1543 for n in hasset:
1535 msngset.pop(n, None)
1544 msngset.pop(n, None)
1536
1545
1537 # This is a function generating function used to set up an environment
1546 # This is a function generating function used to set up an environment
1538 # for the inner function to execute in.
1547 # for the inner function to execute in.
1539 def manifest_and_file_collector(changedfileset):
1548 def manifest_and_file_collector(changedfileset):
1540 # This is an information gathering function that gathers
1549 # This is an information gathering function that gathers
1541 # information from each changeset node that goes out as part of
1550 # information from each changeset node that goes out as part of
1542 # the changegroup. The information gathered is a list of which
1551 # the changegroup. The information gathered is a list of which
1543 # manifest nodes are potentially required (the recipient may
1552 # manifest nodes are potentially required (the recipient may
1544 # already have them) and total list of all files which were
1553 # already have them) and total list of all files which were
1545 # changed in any changeset in the changegroup.
1554 # changed in any changeset in the changegroup.
1546 #
1555 #
1547 # We also remember the first changenode we saw any manifest
1556 # We also remember the first changenode we saw any manifest
1548 # referenced by so we can later determine which changenode 'owns'
1557 # referenced by so we can later determine which changenode 'owns'
1549 # the manifest.
1558 # the manifest.
1550 def collect_manifests_and_files(clnode):
1559 def collect_manifests_and_files(clnode):
1551 c = cl.read(clnode)
1560 c = cl.read(clnode)
1552 for f in c[3]:
1561 for f in c[3]:
1553 # This is to make sure we only have one instance of each
1562 # This is to make sure we only have one instance of each
1554 # filename string for each filename.
1563 # filename string for each filename.
1555 changedfileset.setdefault(f, f)
1564 changedfileset.setdefault(f, f)
1556 msng_mnfst_set.setdefault(c[0], clnode)
1565 msng_mnfst_set.setdefault(c[0], clnode)
1557 return collect_manifests_and_files
1566 return collect_manifests_and_files
1558
1567
1559 # Figure out which manifest nodes (of the ones we think might be part
1568 # Figure out which manifest nodes (of the ones we think might be part
1560 # of the changegroup) the recipient must know about and remove them
1569 # of the changegroup) the recipient must know about and remove them
1561 # from the changegroup.
1570 # from the changegroup.
1562 def prune_manifests():
1571 def prune_manifests():
1563 has_mnfst_set = {}
1572 has_mnfst_set = {}
1564 for n in msng_mnfst_set:
1573 for n in msng_mnfst_set:
1565 # If a 'missing' manifest thinks it belongs to a changenode
1574 # If a 'missing' manifest thinks it belongs to a changenode
1566 # the recipient is assumed to have, obviously the recipient
1575 # the recipient is assumed to have, obviously the recipient
1567 # must have that manifest.
1576 # must have that manifest.
1568 linknode = cl.node(mnfst.linkrev(n))
1577 linknode = cl.node(mnfst.linkrev(n))
1569 if linknode in has_cl_set:
1578 if linknode in has_cl_set:
1570 has_mnfst_set[n] = 1
1579 has_mnfst_set[n] = 1
1571 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1580 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1572
1581
1573 # Use the information collected in collect_manifests_and_files to say
1582 # Use the information collected in collect_manifests_and_files to say
1574 # which changenode any manifestnode belongs to.
1583 # which changenode any manifestnode belongs to.
1575 def lookup_manifest_link(mnfstnode):
1584 def lookup_manifest_link(mnfstnode):
1576 return msng_mnfst_set[mnfstnode]
1585 return msng_mnfst_set[mnfstnode]
1577
1586
1578 # A function generating function that sets up the initial environment
1587 # A function generating function that sets up the initial environment
1579 # the inner function.
1588 # the inner function.
1580 def filenode_collector(changedfiles):
1589 def filenode_collector(changedfiles):
1581 next_rev = [0]
1590 next_rev = [0]
1582 # This gathers information from each manifestnode included in the
1591 # This gathers information from each manifestnode included in the
1583 # changegroup about which filenodes the manifest node references
1592 # changegroup about which filenodes the manifest node references
1584 # so we can include those in the changegroup too.
1593 # so we can include those in the changegroup too.
1585 #
1594 #
1586 # It also remembers which changenode each filenode belongs to. It
1595 # It also remembers which changenode each filenode belongs to. It
1587 # does this by assuming the a filenode belongs to the changenode
1596 # does this by assuming the a filenode belongs to the changenode
1588 # the first manifest that references it belongs to.
1597 # the first manifest that references it belongs to.
1589 def collect_msng_filenodes(mnfstnode):
1598 def collect_msng_filenodes(mnfstnode):
1590 r = mnfst.rev(mnfstnode)
1599 r = mnfst.rev(mnfstnode)
1591 if r == next_rev[0]:
1600 if r == next_rev[0]:
1592 # If the last rev we looked at was the one just previous,
1601 # If the last rev we looked at was the one just previous,
1593 # we only need to see a diff.
1602 # we only need to see a diff.
1594 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1603 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1595 # For each line in the delta
1604 # For each line in the delta
1596 for dline in delta.splitlines():
1605 for dline in delta.splitlines():
1597 # get the filename and filenode for that line
1606 # get the filename and filenode for that line
1598 f, fnode = dline.split('\0')
1607 f, fnode = dline.split('\0')
1599 fnode = bin(fnode[:40])
1608 fnode = bin(fnode[:40])
1600 f = changedfiles.get(f, None)
1609 f = changedfiles.get(f, None)
1601 # And if the file is in the list of files we care
1610 # And if the file is in the list of files we care
1602 # about.
1611 # about.
1603 if f is not None:
1612 if f is not None:
1604 # Get the changenode this manifest belongs to
1613 # Get the changenode this manifest belongs to
1605 clnode = msng_mnfst_set[mnfstnode]
1614 clnode = msng_mnfst_set[mnfstnode]
1606 # Create the set of filenodes for the file if
1615 # Create the set of filenodes for the file if
1607 # there isn't one already.
1616 # there isn't one already.
1608 ndset = msng_filenode_set.setdefault(f, {})
1617 ndset = msng_filenode_set.setdefault(f, {})
1609 # And set the filenode's changelog node to the
1618 # And set the filenode's changelog node to the
1610 # manifest's if it hasn't been set already.
1619 # manifest's if it hasn't been set already.
1611 ndset.setdefault(fnode, clnode)
1620 ndset.setdefault(fnode, clnode)
1612 else:
1621 else:
1613 # Otherwise we need a full manifest.
1622 # Otherwise we need a full manifest.
1614 m = mnfst.read(mnfstnode)
1623 m = mnfst.read(mnfstnode)
1615 # For every file in we care about.
1624 # For every file in we care about.
1616 for f in changedfiles:
1625 for f in changedfiles:
1617 fnode = m.get(f, None)
1626 fnode = m.get(f, None)
1618 # If it's in the manifest
1627 # If it's in the manifest
1619 if fnode is not None:
1628 if fnode is not None:
1620 # See comments above.
1629 # See comments above.
1621 clnode = msng_mnfst_set[mnfstnode]
1630 clnode = msng_mnfst_set[mnfstnode]
1622 ndset = msng_filenode_set.setdefault(f, {})
1631 ndset = msng_filenode_set.setdefault(f, {})
1623 ndset.setdefault(fnode, clnode)
1632 ndset.setdefault(fnode, clnode)
1624 # Remember the revision we hope to see next.
1633 # Remember the revision we hope to see next.
1625 next_rev[0] = r + 1
1634 next_rev[0] = r + 1
1626 return collect_msng_filenodes
1635 return collect_msng_filenodes
1627
1636
1628 # We have a list of filenodes we think we need for a file, lets remove
1637 # We have a list of filenodes we think we need for a file, lets remove
1629 # all those we now the recipient must have.
1638 # all those we now the recipient must have.
1630 def prune_filenodes(f, filerevlog):
1639 def prune_filenodes(f, filerevlog):
1631 msngset = msng_filenode_set[f]
1640 msngset = msng_filenode_set[f]
1632 hasset = {}
1641 hasset = {}
1633 # If a 'missing' filenode thinks it belongs to a changenode we
1642 # If a 'missing' filenode thinks it belongs to a changenode we
1634 # assume the recipient must have, then the recipient must have
1643 # assume the recipient must have, then the recipient must have
1635 # that filenode.
1644 # that filenode.
1636 for n in msngset:
1645 for n in msngset:
1637 clnode = cl.node(filerevlog.linkrev(n))
1646 clnode = cl.node(filerevlog.linkrev(n))
1638 if clnode in has_cl_set:
1647 if clnode in has_cl_set:
1639 hasset[n] = 1
1648 hasset[n] = 1
1640 prune_parents(filerevlog, hasset, msngset)
1649 prune_parents(filerevlog, hasset, msngset)
1641
1650
1642 # A function generator function that sets up the a context for the
1651 # A function generator function that sets up the a context for the
1643 # inner function.
1652 # inner function.
1644 def lookup_filenode_link_func(fname):
1653 def lookup_filenode_link_func(fname):
1645 msngset = msng_filenode_set[fname]
1654 msngset = msng_filenode_set[fname]
1646 # Lookup the changenode the filenode belongs to.
1655 # Lookup the changenode the filenode belongs to.
1647 def lookup_filenode_link(fnode):
1656 def lookup_filenode_link(fnode):
1648 return msngset[fnode]
1657 return msngset[fnode]
1649 return lookup_filenode_link
1658 return lookup_filenode_link
1650
1659
1651 # Now that we have all theses utility functions to help out and
1660 # Now that we have all theses utility functions to help out and
1652 # logically divide up the task, generate the group.
1661 # logically divide up the task, generate the group.
1653 def gengroup():
1662 def gengroup():
1654 # The set of changed files starts empty.
1663 # The set of changed files starts empty.
1655 changedfiles = {}
1664 changedfiles = {}
1656 # Create a changenode group generator that will call our functions
1665 # Create a changenode group generator that will call our functions
1657 # back to lookup the owning changenode and collect information.
1666 # back to lookup the owning changenode and collect information.
1658 group = cl.group(msng_cl_lst, identity,
1667 group = cl.group(msng_cl_lst, identity,
1659 manifest_and_file_collector(changedfiles))
1668 manifest_and_file_collector(changedfiles))
1660 for chnk in group:
1669 for chnk in group:
1661 yield chnk
1670 yield chnk
1662
1671
1663 # The list of manifests has been collected by the generator
1672 # The list of manifests has been collected by the generator
1664 # calling our functions back.
1673 # calling our functions back.
1665 prune_manifests()
1674 prune_manifests()
1666 msng_mnfst_lst = msng_mnfst_set.keys()
1675 msng_mnfst_lst = msng_mnfst_set.keys()
1667 # Sort the manifestnodes by revision number.
1676 # Sort the manifestnodes by revision number.
1668 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1677 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1669 # Create a generator for the manifestnodes that calls our lookup
1678 # Create a generator for the manifestnodes that calls our lookup
1670 # and data collection functions back.
1679 # and data collection functions back.
1671 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1680 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1672 filenode_collector(changedfiles))
1681 filenode_collector(changedfiles))
1673 for chnk in group:
1682 for chnk in group:
1674 yield chnk
1683 yield chnk
1675
1684
1676 # These are no longer needed, dereference and toss the memory for
1685 # These are no longer needed, dereference and toss the memory for
1677 # them.
1686 # them.
1678 msng_mnfst_lst = None
1687 msng_mnfst_lst = None
1679 msng_mnfst_set.clear()
1688 msng_mnfst_set.clear()
1680
1689
1681 changedfiles = changedfiles.keys()
1690 changedfiles = changedfiles.keys()
1682 changedfiles.sort()
1691 changedfiles.sort()
1683 # Go through all our files in order sorted by name.
1692 # Go through all our files in order sorted by name.
1684 for fname in changedfiles:
1693 for fname in changedfiles:
1685 filerevlog = self.file(fname)
1694 filerevlog = self.file(fname)
1686 # Toss out the filenodes that the recipient isn't really
1695 # Toss out the filenodes that the recipient isn't really
1687 # missing.
1696 # missing.
1688 if msng_filenode_set.has_key(fname):
1697 if msng_filenode_set.has_key(fname):
1689 prune_filenodes(fname, filerevlog)
1698 prune_filenodes(fname, filerevlog)
1690 msng_filenode_lst = msng_filenode_set[fname].keys()
1699 msng_filenode_lst = msng_filenode_set[fname].keys()
1691 else:
1700 else:
1692 msng_filenode_lst = []
1701 msng_filenode_lst = []
1693 # If any filenodes are left, generate the group for them,
1702 # If any filenodes are left, generate the group for them,
1694 # otherwise don't bother.
1703 # otherwise don't bother.
1695 if len(msng_filenode_lst) > 0:
1704 if len(msng_filenode_lst) > 0:
1696 yield changegroup.genchunk(fname)
1705 yield changegroup.genchunk(fname)
1697 # Sort the filenodes by their revision #
1706 # Sort the filenodes by their revision #
1698 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1707 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1699 # Create a group generator and only pass in a changenode
1708 # Create a group generator and only pass in a changenode
1700 # lookup function as we need to collect no information
1709 # lookup function as we need to collect no information
1701 # from filenodes.
1710 # from filenodes.
1702 group = filerevlog.group(msng_filenode_lst,
1711 group = filerevlog.group(msng_filenode_lst,
1703 lookup_filenode_link_func(fname))
1712 lookup_filenode_link_func(fname))
1704 for chnk in group:
1713 for chnk in group:
1705 yield chnk
1714 yield chnk
1706 if msng_filenode_set.has_key(fname):
1715 if msng_filenode_set.has_key(fname):
1707 # Don't need this anymore, toss it to free memory.
1716 # Don't need this anymore, toss it to free memory.
1708 del msng_filenode_set[fname]
1717 del msng_filenode_set[fname]
1709 # Signal that no more groups are left.
1718 # Signal that no more groups are left.
1710 yield changegroup.closechunk()
1719 yield changegroup.closechunk()
1711
1720
1712 if msng_cl_lst:
1721 if msng_cl_lst:
1713 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1722 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1714
1723
1715 return util.chunkbuffer(gengroup())
1724 return util.chunkbuffer(gengroup())
1716
1725
1717 def changegroup(self, basenodes, source):
1726 def changegroup(self, basenodes, source):
1718 """Generate a changegroup of all nodes that we have that a recipient
1727 """Generate a changegroup of all nodes that we have that a recipient
1719 doesn't.
1728 doesn't.
1720
1729
1721 This is much easier than the previous function as we can assume that
1730 This is much easier than the previous function as we can assume that
1722 the recipient has any changenode we aren't sending them."""
1731 the recipient has any changenode we aren't sending them."""
1723
1732
1724 self.hook('preoutgoing', throw=True, source=source)
1733 self.hook('preoutgoing', throw=True, source=source)
1725
1734
1726 cl = self.changelog
1735 cl = self.changelog
1727 nodes = cl.nodesbetween(basenodes, None)[0]
1736 nodes = cl.nodesbetween(basenodes, None)[0]
1728 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1737 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1729 self.changegroupinfo(nodes)
1738 self.changegroupinfo(nodes)
1730
1739
1731 def identity(x):
1740 def identity(x):
1732 return x
1741 return x
1733
1742
1734 def gennodelst(revlog):
1743 def gennodelst(revlog):
1735 for r in xrange(0, revlog.count()):
1744 for r in xrange(0, revlog.count()):
1736 n = revlog.node(r)
1745 n = revlog.node(r)
1737 if revlog.linkrev(n) in revset:
1746 if revlog.linkrev(n) in revset:
1738 yield n
1747 yield n
1739
1748
1740 def changed_file_collector(changedfileset):
1749 def changed_file_collector(changedfileset):
1741 def collect_changed_files(clnode):
1750 def collect_changed_files(clnode):
1742 c = cl.read(clnode)
1751 c = cl.read(clnode)
1743 for fname in c[3]:
1752 for fname in c[3]:
1744 changedfileset[fname] = 1
1753 changedfileset[fname] = 1
1745 return collect_changed_files
1754 return collect_changed_files
1746
1755
1747 def lookuprevlink_func(revlog):
1756 def lookuprevlink_func(revlog):
1748 def lookuprevlink(n):
1757 def lookuprevlink(n):
1749 return cl.node(revlog.linkrev(n))
1758 return cl.node(revlog.linkrev(n))
1750 return lookuprevlink
1759 return lookuprevlink
1751
1760
1752 def gengroup():
1761 def gengroup():
1753 # construct a list of all changed files
1762 # construct a list of all changed files
1754 changedfiles = {}
1763 changedfiles = {}
1755
1764
1756 for chnk in cl.group(nodes, identity,
1765 for chnk in cl.group(nodes, identity,
1757 changed_file_collector(changedfiles)):
1766 changed_file_collector(changedfiles)):
1758 yield chnk
1767 yield chnk
1759 changedfiles = changedfiles.keys()
1768 changedfiles = changedfiles.keys()
1760 changedfiles.sort()
1769 changedfiles.sort()
1761
1770
1762 mnfst = self.manifest
1771 mnfst = self.manifest
1763 nodeiter = gennodelst(mnfst)
1772 nodeiter = gennodelst(mnfst)
1764 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1773 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1765 yield chnk
1774 yield chnk
1766
1775
1767 for fname in changedfiles:
1776 for fname in changedfiles:
1768 filerevlog = self.file(fname)
1777 filerevlog = self.file(fname)
1769 nodeiter = gennodelst(filerevlog)
1778 nodeiter = gennodelst(filerevlog)
1770 nodeiter = list(nodeiter)
1779 nodeiter = list(nodeiter)
1771 if nodeiter:
1780 if nodeiter:
1772 yield changegroup.genchunk(fname)
1781 yield changegroup.genchunk(fname)
1773 lookup = lookuprevlink_func(filerevlog)
1782 lookup = lookuprevlink_func(filerevlog)
1774 for chnk in filerevlog.group(nodeiter, lookup):
1783 for chnk in filerevlog.group(nodeiter, lookup):
1775 yield chnk
1784 yield chnk
1776
1785
1777 yield changegroup.closechunk()
1786 yield changegroup.closechunk()
1778
1787
1779 if nodes:
1788 if nodes:
1780 self.hook('outgoing', node=hex(nodes[0]), source=source)
1789 self.hook('outgoing', node=hex(nodes[0]), source=source)
1781
1790
1782 return util.chunkbuffer(gengroup())
1791 return util.chunkbuffer(gengroup())
1783
1792
1784 def addchangegroup(self, source, srctype, url):
1793 def addchangegroup(self, source, srctype, url):
1785 """add changegroup to repo.
1794 """add changegroup to repo.
1786
1795
1787 return values:
1796 return values:
1788 - nothing changed or no source: 0
1797 - nothing changed or no source: 0
1789 - more heads than before: 1+added heads (2..n)
1798 - more heads than before: 1+added heads (2..n)
1790 - less heads than before: -1-removed heads (-2..-n)
1799 - less heads than before: -1-removed heads (-2..-n)
1791 - number of heads stays the same: 1
1800 - number of heads stays the same: 1
1792 """
1801 """
1793 def csmap(x):
1802 def csmap(x):
1794 self.ui.debug(_("add changeset %s\n") % short(x))
1803 self.ui.debug(_("add changeset %s\n") % short(x))
1795 return cl.count()
1804 return cl.count()
1796
1805
1797 def revmap(x):
1806 def revmap(x):
1798 return cl.rev(x)
1807 return cl.rev(x)
1799
1808
1800 if not source:
1809 if not source:
1801 return 0
1810 return 0
1802
1811
1803 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1812 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1804
1813
1805 changesets = files = revisions = 0
1814 changesets = files = revisions = 0
1806
1815
1807 tr = self.transaction()
1816 tr = self.transaction()
1808
1817
1809 # write changelog data to temp files so concurrent readers will not see
1818 # write changelog data to temp files so concurrent readers will not see
1810 # inconsistent view
1819 # inconsistent view
1811 cl = self.changelog
1820 cl = self.changelog
1812 cl.delayupdate()
1821 cl.delayupdate()
1813 oldheads = len(cl.heads())
1822 oldheads = len(cl.heads())
1814
1823
1815 # pull off the changeset group
1824 # pull off the changeset group
1816 self.ui.status(_("adding changesets\n"))
1825 self.ui.status(_("adding changesets\n"))
1817 cor = cl.count() - 1
1826 cor = cl.count() - 1
1818 chunkiter = changegroup.chunkiter(source)
1827 chunkiter = changegroup.chunkiter(source)
1819 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1828 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1820 raise util.Abort(_("received changelog group is empty"))
1829 raise util.Abort(_("received changelog group is empty"))
1821 cnr = cl.count() - 1
1830 cnr = cl.count() - 1
1822 changesets = cnr - cor
1831 changesets = cnr - cor
1823
1832
1824 # pull off the manifest group
1833 # pull off the manifest group
1825 self.ui.status(_("adding manifests\n"))
1834 self.ui.status(_("adding manifests\n"))
1826 chunkiter = changegroup.chunkiter(source)
1835 chunkiter = changegroup.chunkiter(source)
1827 # no need to check for empty manifest group here:
1836 # no need to check for empty manifest group here:
1828 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1837 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1829 # no new manifest will be created and the manifest group will
1838 # no new manifest will be created and the manifest group will
1830 # be empty during the pull
1839 # be empty during the pull
1831 self.manifest.addgroup(chunkiter, revmap, tr)
1840 self.manifest.addgroup(chunkiter, revmap, tr)
1832
1841
1833 # process the files
1842 # process the files
1834 self.ui.status(_("adding file changes\n"))
1843 self.ui.status(_("adding file changes\n"))
1835 while 1:
1844 while 1:
1836 f = changegroup.getchunk(source)
1845 f = changegroup.getchunk(source)
1837 if not f:
1846 if not f:
1838 break
1847 break
1839 self.ui.debug(_("adding %s revisions\n") % f)
1848 self.ui.debug(_("adding %s revisions\n") % f)
1840 fl = self.file(f)
1849 fl = self.file(f)
1841 o = fl.count()
1850 o = fl.count()
1842 chunkiter = changegroup.chunkiter(source)
1851 chunkiter = changegroup.chunkiter(source)
1843 if fl.addgroup(chunkiter, revmap, tr) is None:
1852 if fl.addgroup(chunkiter, revmap, tr) is None:
1844 raise util.Abort(_("received file revlog group is empty"))
1853 raise util.Abort(_("received file revlog group is empty"))
1845 revisions += fl.count() - o
1854 revisions += fl.count() - o
1846 files += 1
1855 files += 1
1847
1856
1848 # make changelog see real files again
1857 # make changelog see real files again
1849 cl.finalize(tr)
1858 cl.finalize(tr)
1850
1859
1851 newheads = len(self.changelog.heads())
1860 newheads = len(self.changelog.heads())
1852 heads = ""
1861 heads = ""
1853 if oldheads and newheads != oldheads:
1862 if oldheads and newheads != oldheads:
1854 heads = _(" (%+d heads)") % (newheads - oldheads)
1863 heads = _(" (%+d heads)") % (newheads - oldheads)
1855
1864
1856 self.ui.status(_("added %d changesets"
1865 self.ui.status(_("added %d changesets"
1857 " with %d changes to %d files%s\n")
1866 " with %d changes to %d files%s\n")
1858 % (changesets, revisions, files, heads))
1867 % (changesets, revisions, files, heads))
1859
1868
1860 if changesets > 0:
1869 if changesets > 0:
1861 self.hook('pretxnchangegroup', throw=True,
1870 self.hook('pretxnchangegroup', throw=True,
1862 node=hex(self.changelog.node(cor+1)), source=srctype,
1871 node=hex(self.changelog.node(cor+1)), source=srctype,
1863 url=url)
1872 url=url)
1864
1873
1865 tr.close()
1874 tr.close()
1866
1875
1867 if changesets > 0:
1876 if changesets > 0:
1868 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1877 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1869 source=srctype, url=url)
1878 source=srctype, url=url)
1870
1879
1871 for i in xrange(cor + 1, cnr + 1):
1880 for i in xrange(cor + 1, cnr + 1):
1872 self.hook("incoming", node=hex(self.changelog.node(i)),
1881 self.hook("incoming", node=hex(self.changelog.node(i)),
1873 source=srctype, url=url)
1882 source=srctype, url=url)
1874
1883
1875 # never return 0 here:
1884 # never return 0 here:
1876 if newheads < oldheads:
1885 if newheads < oldheads:
1877 return newheads - oldheads - 1
1886 return newheads - oldheads - 1
1878 else:
1887 else:
1879 return newheads - oldheads + 1
1888 return newheads - oldheads + 1
1880
1889
1881
1890
1882 def stream_in(self, remote):
1891 def stream_in(self, remote):
1883 fp = remote.stream_out()
1892 fp = remote.stream_out()
1884 l = fp.readline()
1893 l = fp.readline()
1885 try:
1894 try:
1886 resp = int(l)
1895 resp = int(l)
1887 except ValueError:
1896 except ValueError:
1888 raise util.UnexpectedOutput(
1897 raise util.UnexpectedOutput(
1889 _('Unexpected response from remote server:'), l)
1898 _('Unexpected response from remote server:'), l)
1890 if resp == 1:
1899 if resp == 1:
1891 raise util.Abort(_('operation forbidden by server'))
1900 raise util.Abort(_('operation forbidden by server'))
1892 elif resp == 2:
1901 elif resp == 2:
1893 raise util.Abort(_('locking the remote repository failed'))
1902 raise util.Abort(_('locking the remote repository failed'))
1894 elif resp != 0:
1903 elif resp != 0:
1895 raise util.Abort(_('the server sent an unknown error code'))
1904 raise util.Abort(_('the server sent an unknown error code'))
1896 self.ui.status(_('streaming all changes\n'))
1905 self.ui.status(_('streaming all changes\n'))
1897 l = fp.readline()
1906 l = fp.readline()
1898 try:
1907 try:
1899 total_files, total_bytes = map(int, l.split(' ', 1))
1908 total_files, total_bytes = map(int, l.split(' ', 1))
1900 except ValueError, TypeError:
1909 except ValueError, TypeError:
1901 raise util.UnexpectedOutput(
1910 raise util.UnexpectedOutput(
1902 _('Unexpected response from remote server:'), l)
1911 _('Unexpected response from remote server:'), l)
1903 self.ui.status(_('%d files to transfer, %s of data\n') %
1912 self.ui.status(_('%d files to transfer, %s of data\n') %
1904 (total_files, util.bytecount(total_bytes)))
1913 (total_files, util.bytecount(total_bytes)))
1905 start = time.time()
1914 start = time.time()
1906 for i in xrange(total_files):
1915 for i in xrange(total_files):
1907 # XXX doesn't support '\n' or '\r' in filenames
1916 # XXX doesn't support '\n' or '\r' in filenames
1908 l = fp.readline()
1917 l = fp.readline()
1909 try:
1918 try:
1910 name, size = l.split('\0', 1)
1919 name, size = l.split('\0', 1)
1911 size = int(size)
1920 size = int(size)
1912 except ValueError, TypeError:
1921 except ValueError, TypeError:
1913 raise util.UnexpectedOutput(
1922 raise util.UnexpectedOutput(
1914 _('Unexpected response from remote server:'), l)
1923 _('Unexpected response from remote server:'), l)
1915 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1924 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1916 ofp = self.sopener(name, 'w')
1925 ofp = self.sopener(name, 'w')
1917 for chunk in util.filechunkiter(fp, limit=size):
1926 for chunk in util.filechunkiter(fp, limit=size):
1918 ofp.write(chunk)
1927 ofp.write(chunk)
1919 ofp.close()
1928 ofp.close()
1920 elapsed = time.time() - start
1929 elapsed = time.time() - start
1921 if elapsed <= 0:
1930 if elapsed <= 0:
1922 elapsed = 0.001
1931 elapsed = 0.001
1923 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1932 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1924 (util.bytecount(total_bytes), elapsed,
1933 (util.bytecount(total_bytes), elapsed,
1925 util.bytecount(total_bytes / elapsed)))
1934 util.bytecount(total_bytes / elapsed)))
1926 self.reload()
1935 self.reload()
1927 return len(self.heads()) + 1
1936 return len(self.heads()) + 1
1928
1937
1929 def clone(self, remote, heads=[], stream=False):
1938 def clone(self, remote, heads=[], stream=False):
1930 '''clone remote repository.
1939 '''clone remote repository.
1931
1940
1932 keyword arguments:
1941 keyword arguments:
1933 heads: list of revs to clone (forces use of pull)
1942 heads: list of revs to clone (forces use of pull)
1934 stream: use streaming clone if possible'''
1943 stream: use streaming clone if possible'''
1935
1944
1936 # now, all clients that can request uncompressed clones can
1945 # now, all clients that can request uncompressed clones can
1937 # read repo formats supported by all servers that can serve
1946 # read repo formats supported by all servers that can serve
1938 # them.
1947 # them.
1939
1948
1940 # if revlog format changes, client will have to check version
1949 # if revlog format changes, client will have to check version
1941 # and format flags on "stream" capability, and use
1950 # and format flags on "stream" capability, and use
1942 # uncompressed only if compatible.
1951 # uncompressed only if compatible.
1943
1952
1944 if stream and not heads and remote.capable('stream'):
1953 if stream and not heads and remote.capable('stream'):
1945 return self.stream_in(remote)
1954 return self.stream_in(remote)
1946 return self.pull(remote, heads)
1955 return self.pull(remote, heads)
1947
1956
1948 # used to avoid circular references so destructors work
1957 # used to avoid circular references so destructors work
1949 def aftertrans(files):
1958 def aftertrans(files):
1950 renamefiles = [tuple(t) for t in files]
1959 renamefiles = [tuple(t) for t in files]
1951 def a():
1960 def a():
1952 for src, dest in renamefiles:
1961 for src, dest in renamefiles:
1953 util.rename(src, dest)
1962 util.rename(src, dest)
1954 return a
1963 return a
1955
1964
1956 def instance(ui, path, create):
1965 def instance(ui, path, create):
1957 return localrepository(ui, util.drop_scheme('file', path), create)
1966 return localrepository(ui, util.drop_scheme('file', path), create)
1958
1967
1959 def islocal(path):
1968 def islocal(path):
1960 return True
1969 return True
General Comments 0
You need to be logged in to leave comments. Login now