##// END OF EJS Templates
localrepo.status: fcmp gets a getnode function instead of the manifest
Alexis S. L. Carvalho -
r4160:b4bd2f3e default
parent child Browse files
Show More
@@ -1,1926 +1,1929 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import _
9 from i18n import _
10 import repo, appendfile, changegroup
10 import repo, appendfile, changegroup
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
13 import os, revlog, time, util
13 import os, revlog, time, util
14
14
15 class localrepository(repo.repository):
15 class localrepository(repo.repository):
16 capabilities = ('lookup', 'changegroupsubset')
16 capabilities = ('lookup', 'changegroupsubset')
17 supported = ('revlogv1', 'store')
17 supported = ('revlogv1', 'store')
18
18
19 def __del__(self):
19 def __del__(self):
20 self.transhandle = None
20 self.transhandle = None
21 def __init__(self, parentui, path=None, create=0):
21 def __init__(self, parentui, path=None, create=0):
22 repo.repository.__init__(self)
22 repo.repository.__init__(self)
23 if not path:
23 if not path:
24 p = os.getcwd()
24 p = os.getcwd()
25 while not os.path.isdir(os.path.join(p, ".hg")):
25 while not os.path.isdir(os.path.join(p, ".hg")):
26 oldp = p
26 oldp = p
27 p = os.path.dirname(p)
27 p = os.path.dirname(p)
28 if p == oldp:
28 if p == oldp:
29 raise repo.RepoError(_("There is no Mercurial repository"
29 raise repo.RepoError(_("There is no Mercurial repository"
30 " here (.hg not found)"))
30 " here (.hg not found)"))
31 path = p
31 path = p
32
32
33 self.path = os.path.join(path, ".hg")
33 self.path = os.path.join(path, ".hg")
34 self.root = os.path.realpath(path)
34 self.root = os.path.realpath(path)
35 self.origroot = path
35 self.origroot = path
36 self.opener = util.opener(self.path)
36 self.opener = util.opener(self.path)
37 self.wopener = util.opener(self.root)
37 self.wopener = util.opener(self.root)
38
38
39 if not os.path.isdir(self.path):
39 if not os.path.isdir(self.path):
40 if create:
40 if create:
41 if not os.path.exists(path):
41 if not os.path.exists(path):
42 os.mkdir(path)
42 os.mkdir(path)
43 os.mkdir(self.path)
43 os.mkdir(self.path)
44 os.mkdir(os.path.join(self.path, "store"))
44 os.mkdir(os.path.join(self.path, "store"))
45 requirements = ("revlogv1", "store")
45 requirements = ("revlogv1", "store")
46 reqfile = self.opener("requires", "w")
46 reqfile = self.opener("requires", "w")
47 for r in requirements:
47 for r in requirements:
48 reqfile.write("%s\n" % r)
48 reqfile.write("%s\n" % r)
49 reqfile.close()
49 reqfile.close()
50 # create an invalid changelog
50 # create an invalid changelog
51 self.opener("00changelog.i", "a").write(
51 self.opener("00changelog.i", "a").write(
52 '\0\0\0\2' # represents revlogv2
52 '\0\0\0\2' # represents revlogv2
53 ' dummy changelog to prevent using the old repo layout'
53 ' dummy changelog to prevent using the old repo layout'
54 )
54 )
55 else:
55 else:
56 raise repo.RepoError(_("repository %s not found") % path)
56 raise repo.RepoError(_("repository %s not found") % path)
57 elif create:
57 elif create:
58 raise repo.RepoError(_("repository %s already exists") % path)
58 raise repo.RepoError(_("repository %s already exists") % path)
59 else:
59 else:
60 # find requirements
60 # find requirements
61 try:
61 try:
62 requirements = self.opener("requires").read().splitlines()
62 requirements = self.opener("requires").read().splitlines()
63 except IOError, inst:
63 except IOError, inst:
64 if inst.errno != errno.ENOENT:
64 if inst.errno != errno.ENOENT:
65 raise
65 raise
66 requirements = []
66 requirements = []
67 # check them
67 # check them
68 for r in requirements:
68 for r in requirements:
69 if r not in self.supported:
69 if r not in self.supported:
70 raise repo.RepoError(_("requirement '%s' not supported") % r)
70 raise repo.RepoError(_("requirement '%s' not supported") % r)
71
71
72 # setup store
72 # setup store
73 if "store" in requirements:
73 if "store" in requirements:
74 self.encodefn = util.encodefilename
74 self.encodefn = util.encodefilename
75 self.decodefn = util.decodefilename
75 self.decodefn = util.decodefilename
76 self.spath = os.path.join(self.path, "store")
76 self.spath = os.path.join(self.path, "store")
77 else:
77 else:
78 self.encodefn = lambda x: x
78 self.encodefn = lambda x: x
79 self.decodefn = lambda x: x
79 self.decodefn = lambda x: x
80 self.spath = self.path
80 self.spath = self.path
81 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
81 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
82
82
83 self.ui = ui.ui(parentui=parentui)
83 self.ui = ui.ui(parentui=parentui)
84 try:
84 try:
85 self.ui.readconfig(self.join("hgrc"), self.root)
85 self.ui.readconfig(self.join("hgrc"), self.root)
86 except IOError:
86 except IOError:
87 pass
87 pass
88
88
89 v = self.ui.configrevlog()
89 v = self.ui.configrevlog()
90 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
90 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
91 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
91 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
92 fl = v.get('flags', None)
92 fl = v.get('flags', None)
93 flags = 0
93 flags = 0
94 if fl != None:
94 if fl != None:
95 for x in fl.split():
95 for x in fl.split():
96 flags |= revlog.flagstr(x)
96 flags |= revlog.flagstr(x)
97 elif self.revlogv1:
97 elif self.revlogv1:
98 flags = revlog.REVLOG_DEFAULT_FLAGS
98 flags = revlog.REVLOG_DEFAULT_FLAGS
99
99
100 v = self.revlogversion | flags
100 v = self.revlogversion | flags
101 self.manifest = manifest.manifest(self.sopener, v)
101 self.manifest = manifest.manifest(self.sopener, v)
102 self.changelog = changelog.changelog(self.sopener, v)
102 self.changelog = changelog.changelog(self.sopener, v)
103
103
104 fallback = self.ui.config('ui', 'fallbackencoding')
104 fallback = self.ui.config('ui', 'fallbackencoding')
105 if fallback:
105 if fallback:
106 util._fallbackencoding = fallback
106 util._fallbackencoding = fallback
107
107
108 # the changelog might not have the inline index flag
108 # the changelog might not have the inline index flag
109 # on. If the format of the changelog is the same as found in
109 # on. If the format of the changelog is the same as found in
110 # .hgrc, apply any flags found in the .hgrc as well.
110 # .hgrc, apply any flags found in the .hgrc as well.
111 # Otherwise, just version from the changelog
111 # Otherwise, just version from the changelog
112 v = self.changelog.version
112 v = self.changelog.version
113 if v == self.revlogversion:
113 if v == self.revlogversion:
114 v |= flags
114 v |= flags
115 self.revlogversion = v
115 self.revlogversion = v
116
116
117 self.tagscache = None
117 self.tagscache = None
118 self.branchcache = None
118 self.branchcache = None
119 self.nodetagscache = None
119 self.nodetagscache = None
120 self.filterpats = {}
120 self.filterpats = {}
121 self.transhandle = None
121 self.transhandle = None
122
122
123 self._link = lambda x: False
123 self._link = lambda x: False
124 if util.checklink(self.root):
124 if util.checklink(self.root):
125 r = self.root # avoid circular reference in lambda
125 r = self.root # avoid circular reference in lambda
126 self._link = lambda x: util.is_link(os.path.join(r, x))
126 self._link = lambda x: util.is_link(os.path.join(r, x))
127
127
128 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
128 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
129
129
130 def url(self):
130 def url(self):
131 return 'file:' + self.root
131 return 'file:' + self.root
132
132
133 def hook(self, name, throw=False, **args):
133 def hook(self, name, throw=False, **args):
134 def callhook(hname, funcname):
134 def callhook(hname, funcname):
135 '''call python hook. hook is callable object, looked up as
135 '''call python hook. hook is callable object, looked up as
136 name in python module. if callable returns "true", hook
136 name in python module. if callable returns "true", hook
137 fails, else passes. if hook raises exception, treated as
137 fails, else passes. if hook raises exception, treated as
138 hook failure. exception propagates if throw is "true".
138 hook failure. exception propagates if throw is "true".
139
139
140 reason for "true" meaning "hook failed" is so that
140 reason for "true" meaning "hook failed" is so that
141 unmodified commands (e.g. mercurial.commands.update) can
141 unmodified commands (e.g. mercurial.commands.update) can
142 be run as hooks without wrappers to convert return values.'''
142 be run as hooks without wrappers to convert return values.'''
143
143
144 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
144 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
145 obj = funcname
145 obj = funcname
146 if not callable(obj):
146 if not callable(obj):
147 d = funcname.rfind('.')
147 d = funcname.rfind('.')
148 if d == -1:
148 if d == -1:
149 raise util.Abort(_('%s hook is invalid ("%s" not in '
149 raise util.Abort(_('%s hook is invalid ("%s" not in '
150 'a module)') % (hname, funcname))
150 'a module)') % (hname, funcname))
151 modname = funcname[:d]
151 modname = funcname[:d]
152 try:
152 try:
153 obj = __import__(modname)
153 obj = __import__(modname)
154 except ImportError:
154 except ImportError:
155 try:
155 try:
156 # extensions are loaded with hgext_ prefix
156 # extensions are loaded with hgext_ prefix
157 obj = __import__("hgext_%s" % modname)
157 obj = __import__("hgext_%s" % modname)
158 except ImportError:
158 except ImportError:
159 raise util.Abort(_('%s hook is invalid '
159 raise util.Abort(_('%s hook is invalid '
160 '(import of "%s" failed)') %
160 '(import of "%s" failed)') %
161 (hname, modname))
161 (hname, modname))
162 try:
162 try:
163 for p in funcname.split('.')[1:]:
163 for p in funcname.split('.')[1:]:
164 obj = getattr(obj, p)
164 obj = getattr(obj, p)
165 except AttributeError, err:
165 except AttributeError, err:
166 raise util.Abort(_('%s hook is invalid '
166 raise util.Abort(_('%s hook is invalid '
167 '("%s" is not defined)') %
167 '("%s" is not defined)') %
168 (hname, funcname))
168 (hname, funcname))
169 if not callable(obj):
169 if not callable(obj):
170 raise util.Abort(_('%s hook is invalid '
170 raise util.Abort(_('%s hook is invalid '
171 '("%s" is not callable)') %
171 '("%s" is not callable)') %
172 (hname, funcname))
172 (hname, funcname))
173 try:
173 try:
174 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
174 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
175 except (KeyboardInterrupt, util.SignalInterrupt):
175 except (KeyboardInterrupt, util.SignalInterrupt):
176 raise
176 raise
177 except Exception, exc:
177 except Exception, exc:
178 if isinstance(exc, util.Abort):
178 if isinstance(exc, util.Abort):
179 self.ui.warn(_('error: %s hook failed: %s\n') %
179 self.ui.warn(_('error: %s hook failed: %s\n') %
180 (hname, exc.args[0]))
180 (hname, exc.args[0]))
181 else:
181 else:
182 self.ui.warn(_('error: %s hook raised an exception: '
182 self.ui.warn(_('error: %s hook raised an exception: '
183 '%s\n') % (hname, exc))
183 '%s\n') % (hname, exc))
184 if throw:
184 if throw:
185 raise
185 raise
186 self.ui.print_exc()
186 self.ui.print_exc()
187 return True
187 return True
188 if r:
188 if r:
189 if throw:
189 if throw:
190 raise util.Abort(_('%s hook failed') % hname)
190 raise util.Abort(_('%s hook failed') % hname)
191 self.ui.warn(_('warning: %s hook failed\n') % hname)
191 self.ui.warn(_('warning: %s hook failed\n') % hname)
192 return r
192 return r
193
193
194 def runhook(name, cmd):
194 def runhook(name, cmd):
195 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
195 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
196 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
196 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
197 r = util.system(cmd, environ=env, cwd=self.root)
197 r = util.system(cmd, environ=env, cwd=self.root)
198 if r:
198 if r:
199 desc, r = util.explain_exit(r)
199 desc, r = util.explain_exit(r)
200 if throw:
200 if throw:
201 raise util.Abort(_('%s hook %s') % (name, desc))
201 raise util.Abort(_('%s hook %s') % (name, desc))
202 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
202 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
203 return r
203 return r
204
204
205 r = False
205 r = False
206 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
206 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
207 if hname.split(".", 1)[0] == name and cmd]
207 if hname.split(".", 1)[0] == name and cmd]
208 hooks.sort()
208 hooks.sort()
209 for hname, cmd in hooks:
209 for hname, cmd in hooks:
210 if callable(cmd):
210 if callable(cmd):
211 r = callhook(hname, cmd) or r
211 r = callhook(hname, cmd) or r
212 elif cmd.startswith('python:'):
212 elif cmd.startswith('python:'):
213 r = callhook(hname, cmd[7:].strip()) or r
213 r = callhook(hname, cmd[7:].strip()) or r
214 else:
214 else:
215 r = runhook(hname, cmd) or r
215 r = runhook(hname, cmd) or r
216 return r
216 return r
217
217
218 tag_disallowed = ':\r\n'
218 tag_disallowed = ':\r\n'
219
219
220 def _tag(self, name, node, message, local, user, date, parent=None):
220 def _tag(self, name, node, message, local, user, date, parent=None):
221 use_dirstate = parent is None
221 use_dirstate = parent is None
222
222
223 for c in self.tag_disallowed:
223 for c in self.tag_disallowed:
224 if c in name:
224 if c in name:
225 raise util.Abort(_('%r cannot be used in a tag name') % c)
225 raise util.Abort(_('%r cannot be used in a tag name') % c)
226
226
227 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
227 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
228
228
229 if local:
229 if local:
230 # local tags are stored in the current charset
230 # local tags are stored in the current charset
231 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
231 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
232 self.hook('tag', node=hex(node), tag=name, local=local)
232 self.hook('tag', node=hex(node), tag=name, local=local)
233 return
233 return
234
234
235 # committed tags are stored in UTF-8
235 # committed tags are stored in UTF-8
236 line = '%s %s\n' % (hex(node), util.fromlocal(name))
236 line = '%s %s\n' % (hex(node), util.fromlocal(name))
237 if use_dirstate:
237 if use_dirstate:
238 self.wfile('.hgtags', 'ab').write(line)
238 self.wfile('.hgtags', 'ab').write(line)
239 else:
239 else:
240 ntags = self.filectx('.hgtags', parent).data()
240 ntags = self.filectx('.hgtags', parent).data()
241 self.wfile('.hgtags', 'ab').write(ntags + line)
241 self.wfile('.hgtags', 'ab').write(ntags + line)
242 if use_dirstate and self.dirstate.state('.hgtags') == '?':
242 if use_dirstate and self.dirstate.state('.hgtags') == '?':
243 self.add(['.hgtags'])
243 self.add(['.hgtags'])
244
244
245 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent)
245 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent)
246
246
247 self.hook('tag', node=hex(node), tag=name, local=local)
247 self.hook('tag', node=hex(node), tag=name, local=local)
248
248
249 return tagnode
249 return tagnode
250
250
251 def tag(self, name, node, message, local, user, date):
251 def tag(self, name, node, message, local, user, date):
252 '''tag a revision with a symbolic name.
252 '''tag a revision with a symbolic name.
253
253
254 if local is True, the tag is stored in a per-repository file.
254 if local is True, the tag is stored in a per-repository file.
255 otherwise, it is stored in the .hgtags file, and a new
255 otherwise, it is stored in the .hgtags file, and a new
256 changeset is committed with the change.
256 changeset is committed with the change.
257
257
258 keyword arguments:
258 keyword arguments:
259
259
260 local: whether to store tag in non-version-controlled file
260 local: whether to store tag in non-version-controlled file
261 (default False)
261 (default False)
262
262
263 message: commit message to use if committing
263 message: commit message to use if committing
264
264
265 user: name of user to use if committing
265 user: name of user to use if committing
266
266
267 date: date tuple to use if committing'''
267 date: date tuple to use if committing'''
268
268
269 for x in self.status()[:5]:
269 for x in self.status()[:5]:
270 if '.hgtags' in x:
270 if '.hgtags' in x:
271 raise util.Abort(_('working copy of .hgtags is changed '
271 raise util.Abort(_('working copy of .hgtags is changed '
272 '(please commit .hgtags manually)'))
272 '(please commit .hgtags manually)'))
273
273
274
274
275 self._tag(name, node, message, local, user, date)
275 self._tag(name, node, message, local, user, date)
276
276
277 def tags(self):
277 def tags(self):
278 '''return a mapping of tag to node'''
278 '''return a mapping of tag to node'''
279 if not self.tagscache:
279 if not self.tagscache:
280 self.tagscache = {}
280 self.tagscache = {}
281
281
282 def parsetag(line, context):
282 def parsetag(line, context):
283 if not line:
283 if not line:
284 return
284 return
285 s = l.split(" ", 1)
285 s = l.split(" ", 1)
286 if len(s) != 2:
286 if len(s) != 2:
287 self.ui.warn(_("%s: cannot parse entry\n") % context)
287 self.ui.warn(_("%s: cannot parse entry\n") % context)
288 return
288 return
289 node, key = s
289 node, key = s
290 key = util.tolocal(key.strip()) # stored in UTF-8
290 key = util.tolocal(key.strip()) # stored in UTF-8
291 try:
291 try:
292 bin_n = bin(node)
292 bin_n = bin(node)
293 except TypeError:
293 except TypeError:
294 self.ui.warn(_("%s: node '%s' is not well formed\n") %
294 self.ui.warn(_("%s: node '%s' is not well formed\n") %
295 (context, node))
295 (context, node))
296 return
296 return
297 if bin_n not in self.changelog.nodemap:
297 if bin_n not in self.changelog.nodemap:
298 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
298 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
299 (context, key))
299 (context, key))
300 return
300 return
301 self.tagscache[key] = bin_n
301 self.tagscache[key] = bin_n
302
302
303 # read the tags file from each head, ending with the tip,
303 # read the tags file from each head, ending with the tip,
304 # and add each tag found to the map, with "newer" ones
304 # and add each tag found to the map, with "newer" ones
305 # taking precedence
305 # taking precedence
306 f = None
306 f = None
307 for rev, node, fnode in self._hgtagsnodes():
307 for rev, node, fnode in self._hgtagsnodes():
308 f = (f and f.filectx(fnode) or
308 f = (f and f.filectx(fnode) or
309 self.filectx('.hgtags', fileid=fnode))
309 self.filectx('.hgtags', fileid=fnode))
310 count = 0
310 count = 0
311 for l in f.data().splitlines():
311 for l in f.data().splitlines():
312 count += 1
312 count += 1
313 parsetag(l, _("%s, line %d") % (str(f), count))
313 parsetag(l, _("%s, line %d") % (str(f), count))
314
314
315 try:
315 try:
316 f = self.opener("localtags")
316 f = self.opener("localtags")
317 count = 0
317 count = 0
318 for l in f:
318 for l in f:
319 # localtags are stored in the local character set
319 # localtags are stored in the local character set
320 # while the internal tag table is stored in UTF-8
320 # while the internal tag table is stored in UTF-8
321 l = util.fromlocal(l)
321 l = util.fromlocal(l)
322 count += 1
322 count += 1
323 parsetag(l, _("localtags, line %d") % count)
323 parsetag(l, _("localtags, line %d") % count)
324 except IOError:
324 except IOError:
325 pass
325 pass
326
326
327 self.tagscache['tip'] = self.changelog.tip()
327 self.tagscache['tip'] = self.changelog.tip()
328
328
329 return self.tagscache
329 return self.tagscache
330
330
331 def _hgtagsnodes(self):
331 def _hgtagsnodes(self):
332 heads = self.heads()
332 heads = self.heads()
333 heads.reverse()
333 heads.reverse()
334 last = {}
334 last = {}
335 ret = []
335 ret = []
336 for node in heads:
336 for node in heads:
337 c = self.changectx(node)
337 c = self.changectx(node)
338 rev = c.rev()
338 rev = c.rev()
339 try:
339 try:
340 fnode = c.filenode('.hgtags')
340 fnode = c.filenode('.hgtags')
341 except revlog.LookupError:
341 except revlog.LookupError:
342 continue
342 continue
343 ret.append((rev, node, fnode))
343 ret.append((rev, node, fnode))
344 if fnode in last:
344 if fnode in last:
345 ret[last[fnode]] = None
345 ret[last[fnode]] = None
346 last[fnode] = len(ret) - 1
346 last[fnode] = len(ret) - 1
347 return [item for item in ret if item]
347 return [item for item in ret if item]
348
348
349 def tagslist(self):
349 def tagslist(self):
350 '''return a list of tags ordered by revision'''
350 '''return a list of tags ordered by revision'''
351 l = []
351 l = []
352 for t, n in self.tags().items():
352 for t, n in self.tags().items():
353 try:
353 try:
354 r = self.changelog.rev(n)
354 r = self.changelog.rev(n)
355 except:
355 except:
356 r = -2 # sort to the beginning of the list if unknown
356 r = -2 # sort to the beginning of the list if unknown
357 l.append((r, t, n))
357 l.append((r, t, n))
358 l.sort()
358 l.sort()
359 return [(t, n) for r, t, n in l]
359 return [(t, n) for r, t, n in l]
360
360
361 def nodetags(self, node):
361 def nodetags(self, node):
362 '''return the tags associated with a node'''
362 '''return the tags associated with a node'''
363 if not self.nodetagscache:
363 if not self.nodetagscache:
364 self.nodetagscache = {}
364 self.nodetagscache = {}
365 for t, n in self.tags().items():
365 for t, n in self.tags().items():
366 self.nodetagscache.setdefault(n, []).append(t)
366 self.nodetagscache.setdefault(n, []).append(t)
367 return self.nodetagscache.get(node, [])
367 return self.nodetagscache.get(node, [])
368
368
369 def _branchtags(self):
369 def _branchtags(self):
370 partial, last, lrev = self._readbranchcache()
370 partial, last, lrev = self._readbranchcache()
371
371
372 tiprev = self.changelog.count() - 1
372 tiprev = self.changelog.count() - 1
373 if lrev != tiprev:
373 if lrev != tiprev:
374 self._updatebranchcache(partial, lrev+1, tiprev+1)
374 self._updatebranchcache(partial, lrev+1, tiprev+1)
375 self._writebranchcache(partial, self.changelog.tip(), tiprev)
375 self._writebranchcache(partial, self.changelog.tip(), tiprev)
376
376
377 return partial
377 return partial
378
378
379 def branchtags(self):
379 def branchtags(self):
380 if self.branchcache is not None:
380 if self.branchcache is not None:
381 return self.branchcache
381 return self.branchcache
382
382
383 self.branchcache = {} # avoid recursion in changectx
383 self.branchcache = {} # avoid recursion in changectx
384 partial = self._branchtags()
384 partial = self._branchtags()
385
385
386 # the branch cache is stored on disk as UTF-8, but in the local
386 # the branch cache is stored on disk as UTF-8, but in the local
387 # charset internally
387 # charset internally
388 for k, v in partial.items():
388 for k, v in partial.items():
389 self.branchcache[util.tolocal(k)] = v
389 self.branchcache[util.tolocal(k)] = v
390 return self.branchcache
390 return self.branchcache
391
391
392 def _readbranchcache(self):
392 def _readbranchcache(self):
393 partial = {}
393 partial = {}
394 try:
394 try:
395 f = self.opener("branches.cache")
395 f = self.opener("branches.cache")
396 lines = f.read().split('\n')
396 lines = f.read().split('\n')
397 f.close()
397 f.close()
398 last, lrev = lines.pop(0).rstrip().split(" ", 1)
398 last, lrev = lines.pop(0).rstrip().split(" ", 1)
399 last, lrev = bin(last), int(lrev)
399 last, lrev = bin(last), int(lrev)
400 if not (lrev < self.changelog.count() and
400 if not (lrev < self.changelog.count() and
401 self.changelog.node(lrev) == last): # sanity check
401 self.changelog.node(lrev) == last): # sanity check
402 # invalidate the cache
402 # invalidate the cache
403 raise ValueError('Invalid branch cache: unknown tip')
403 raise ValueError('Invalid branch cache: unknown tip')
404 for l in lines:
404 for l in lines:
405 if not l: continue
405 if not l: continue
406 node, label = l.rstrip().split(" ", 1)
406 node, label = l.rstrip().split(" ", 1)
407 partial[label] = bin(node)
407 partial[label] = bin(node)
408 except (KeyboardInterrupt, util.SignalInterrupt):
408 except (KeyboardInterrupt, util.SignalInterrupt):
409 raise
409 raise
410 except Exception, inst:
410 except Exception, inst:
411 if self.ui.debugflag:
411 if self.ui.debugflag:
412 self.ui.warn(str(inst), '\n')
412 self.ui.warn(str(inst), '\n')
413 partial, last, lrev = {}, nullid, nullrev
413 partial, last, lrev = {}, nullid, nullrev
414 return partial, last, lrev
414 return partial, last, lrev
415
415
416 def _writebranchcache(self, branches, tip, tiprev):
416 def _writebranchcache(self, branches, tip, tiprev):
417 try:
417 try:
418 f = self.opener("branches.cache", "w")
418 f = self.opener("branches.cache", "w")
419 f.write("%s %s\n" % (hex(tip), tiprev))
419 f.write("%s %s\n" % (hex(tip), tiprev))
420 for label, node in branches.iteritems():
420 for label, node in branches.iteritems():
421 f.write("%s %s\n" % (hex(node), label))
421 f.write("%s %s\n" % (hex(node), label))
422 except IOError:
422 except IOError:
423 pass
423 pass
424
424
425 def _updatebranchcache(self, partial, start, end):
425 def _updatebranchcache(self, partial, start, end):
426 for r in xrange(start, end):
426 for r in xrange(start, end):
427 c = self.changectx(r)
427 c = self.changectx(r)
428 b = c.branch()
428 b = c.branch()
429 if b:
429 if b:
430 partial[b] = c.node()
430 partial[b] = c.node()
431
431
432 def lookup(self, key):
432 def lookup(self, key):
433 if key == '.':
433 if key == '.':
434 key = self.dirstate.parents()[0]
434 key = self.dirstate.parents()[0]
435 if key == nullid:
435 if key == nullid:
436 raise repo.RepoError(_("no revision checked out"))
436 raise repo.RepoError(_("no revision checked out"))
437 elif key == 'null':
437 elif key == 'null':
438 return nullid
438 return nullid
439 n = self.changelog._match(key)
439 n = self.changelog._match(key)
440 if n:
440 if n:
441 return n
441 return n
442 if key in self.tags():
442 if key in self.tags():
443 return self.tags()[key]
443 return self.tags()[key]
444 if key in self.branchtags():
444 if key in self.branchtags():
445 return self.branchtags()[key]
445 return self.branchtags()[key]
446 n = self.changelog._partialmatch(key)
446 n = self.changelog._partialmatch(key)
447 if n:
447 if n:
448 return n
448 return n
449 raise repo.RepoError(_("unknown revision '%s'") % key)
449 raise repo.RepoError(_("unknown revision '%s'") % key)
450
450
451 def dev(self):
451 def dev(self):
452 return os.lstat(self.path).st_dev
452 return os.lstat(self.path).st_dev
453
453
454 def local(self):
454 def local(self):
455 return True
455 return True
456
456
457 def join(self, f):
457 def join(self, f):
458 return os.path.join(self.path, f)
458 return os.path.join(self.path, f)
459
459
460 def sjoin(self, f):
460 def sjoin(self, f):
461 f = self.encodefn(f)
461 f = self.encodefn(f)
462 return os.path.join(self.spath, f)
462 return os.path.join(self.spath, f)
463
463
464 def wjoin(self, f):
464 def wjoin(self, f):
465 return os.path.join(self.root, f)
465 return os.path.join(self.root, f)
466
466
467 def file(self, f):
467 def file(self, f):
468 if f[0] == '/':
468 if f[0] == '/':
469 f = f[1:]
469 f = f[1:]
470 return filelog.filelog(self.sopener, f, self.revlogversion)
470 return filelog.filelog(self.sopener, f, self.revlogversion)
471
471
472 def changectx(self, changeid=None):
472 def changectx(self, changeid=None):
473 return context.changectx(self, changeid)
473 return context.changectx(self, changeid)
474
474
475 def workingctx(self):
475 def workingctx(self):
476 return context.workingctx(self)
476 return context.workingctx(self)
477
477
478 def parents(self, changeid=None):
478 def parents(self, changeid=None):
479 '''
479 '''
480 get list of changectxs for parents of changeid or working directory
480 get list of changectxs for parents of changeid or working directory
481 '''
481 '''
482 if changeid is None:
482 if changeid is None:
483 pl = self.dirstate.parents()
483 pl = self.dirstate.parents()
484 else:
484 else:
485 n = self.changelog.lookup(changeid)
485 n = self.changelog.lookup(changeid)
486 pl = self.changelog.parents(n)
486 pl = self.changelog.parents(n)
487 if pl[1] == nullid:
487 if pl[1] == nullid:
488 return [self.changectx(pl[0])]
488 return [self.changectx(pl[0])]
489 return [self.changectx(pl[0]), self.changectx(pl[1])]
489 return [self.changectx(pl[0]), self.changectx(pl[1])]
490
490
491 def filectx(self, path, changeid=None, fileid=None):
491 def filectx(self, path, changeid=None, fileid=None):
492 """changeid can be a changeset revision, node, or tag.
492 """changeid can be a changeset revision, node, or tag.
493 fileid can be a file revision or node."""
493 fileid can be a file revision or node."""
494 return context.filectx(self, path, changeid, fileid)
494 return context.filectx(self, path, changeid, fileid)
495
495
496 def getcwd(self):
496 def getcwd(self):
497 return self.dirstate.getcwd()
497 return self.dirstate.getcwd()
498
498
499 def wfile(self, f, mode='r'):
499 def wfile(self, f, mode='r'):
500 return self.wopener(f, mode)
500 return self.wopener(f, mode)
501
501
502 def _filter(self, filter, filename, data):
502 def _filter(self, filter, filename, data):
503 if filter not in self.filterpats:
503 if filter not in self.filterpats:
504 l = []
504 l = []
505 for pat, cmd in self.ui.configitems(filter):
505 for pat, cmd in self.ui.configitems(filter):
506 mf = util.matcher(self.root, "", [pat], [], [])[1]
506 mf = util.matcher(self.root, "", [pat], [], [])[1]
507 l.append((mf, cmd))
507 l.append((mf, cmd))
508 self.filterpats[filter] = l
508 self.filterpats[filter] = l
509
509
510 for mf, cmd in self.filterpats[filter]:
510 for mf, cmd in self.filterpats[filter]:
511 if mf(filename):
511 if mf(filename):
512 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
512 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
513 data = util.filter(data, cmd)
513 data = util.filter(data, cmd)
514 break
514 break
515
515
516 return data
516 return data
517
517
518 def wread(self, filename):
518 def wread(self, filename):
519 if self._link(filename):
519 if self._link(filename):
520 data = os.readlink(self.wjoin(filename))
520 data = os.readlink(self.wjoin(filename))
521 else:
521 else:
522 data = self.wopener(filename, 'r').read()
522 data = self.wopener(filename, 'r').read()
523 return self._filter("encode", filename, data)
523 return self._filter("encode", filename, data)
524
524
525 def wwrite(self, filename, data, flags):
525 def wwrite(self, filename, data, flags):
526 data = self._filter("decode", filename, data)
526 data = self._filter("decode", filename, data)
527 if "l" in flags:
527 if "l" in flags:
528 f = self.wjoin(filename)
528 f = self.wjoin(filename)
529 try:
529 try:
530 os.unlink(f)
530 os.unlink(f)
531 except OSError:
531 except OSError:
532 pass
532 pass
533 d = os.path.dirname(f)
533 d = os.path.dirname(f)
534 if not os.path.exists(d):
534 if not os.path.exists(d):
535 os.makedirs(d)
535 os.makedirs(d)
536 os.symlink(data, f)
536 os.symlink(data, f)
537 else:
537 else:
538 try:
538 try:
539 if self._link(filename):
539 if self._link(filename):
540 os.unlink(self.wjoin(filename))
540 os.unlink(self.wjoin(filename))
541 except OSError:
541 except OSError:
542 pass
542 pass
543 self.wopener(filename, 'w').write(data)
543 self.wopener(filename, 'w').write(data)
544 util.set_exec(self.wjoin(filename), "x" in flags)
544 util.set_exec(self.wjoin(filename), "x" in flags)
545
545
546 def wwritedata(self, filename, data):
546 def wwritedata(self, filename, data):
547 return self._filter("decode", filename, data)
547 return self._filter("decode", filename, data)
548
548
549 def transaction(self):
549 def transaction(self):
550 tr = self.transhandle
550 tr = self.transhandle
551 if tr != None and tr.running():
551 if tr != None and tr.running():
552 return tr.nest()
552 return tr.nest()
553
553
554 # save dirstate for rollback
554 # save dirstate for rollback
555 try:
555 try:
556 ds = self.opener("dirstate").read()
556 ds = self.opener("dirstate").read()
557 except IOError:
557 except IOError:
558 ds = ""
558 ds = ""
559 self.opener("journal.dirstate", "w").write(ds)
559 self.opener("journal.dirstate", "w").write(ds)
560
560
561 renames = [(self.sjoin("journal"), self.sjoin("undo")),
561 renames = [(self.sjoin("journal"), self.sjoin("undo")),
562 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
562 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
563 tr = transaction.transaction(self.ui.warn, self.sopener,
563 tr = transaction.transaction(self.ui.warn, self.sopener,
564 self.sjoin("journal"),
564 self.sjoin("journal"),
565 aftertrans(renames))
565 aftertrans(renames))
566 self.transhandle = tr
566 self.transhandle = tr
567 return tr
567 return tr
568
568
569 def recover(self):
569 def recover(self):
570 l = self.lock()
570 l = self.lock()
571 if os.path.exists(self.sjoin("journal")):
571 if os.path.exists(self.sjoin("journal")):
572 self.ui.status(_("rolling back interrupted transaction\n"))
572 self.ui.status(_("rolling back interrupted transaction\n"))
573 transaction.rollback(self.sopener, self.sjoin("journal"))
573 transaction.rollback(self.sopener, self.sjoin("journal"))
574 self.reload()
574 self.reload()
575 return True
575 return True
576 else:
576 else:
577 self.ui.warn(_("no interrupted transaction available\n"))
577 self.ui.warn(_("no interrupted transaction available\n"))
578 return False
578 return False
579
579
580 def rollback(self, wlock=None):
580 def rollback(self, wlock=None):
581 if not wlock:
581 if not wlock:
582 wlock = self.wlock()
582 wlock = self.wlock()
583 l = self.lock()
583 l = self.lock()
584 if os.path.exists(self.sjoin("undo")):
584 if os.path.exists(self.sjoin("undo")):
585 self.ui.status(_("rolling back last transaction\n"))
585 self.ui.status(_("rolling back last transaction\n"))
586 transaction.rollback(self.sopener, self.sjoin("undo"))
586 transaction.rollback(self.sopener, self.sjoin("undo"))
587 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
587 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
588 self.reload()
588 self.reload()
589 self.wreload()
589 self.wreload()
590 else:
590 else:
591 self.ui.warn(_("no rollback information available\n"))
591 self.ui.warn(_("no rollback information available\n"))
592
592
593 def wreload(self):
593 def wreload(self):
594 self.dirstate.read()
594 self.dirstate.read()
595
595
596 def reload(self):
596 def reload(self):
597 self.changelog.load()
597 self.changelog.load()
598 self.manifest.load()
598 self.manifest.load()
599 self.tagscache = None
599 self.tagscache = None
600 self.nodetagscache = None
600 self.nodetagscache = None
601
601
602 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
602 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
603 desc=None):
603 desc=None):
604 try:
604 try:
605 l = lock.lock(lockname, 0, releasefn, desc=desc)
605 l = lock.lock(lockname, 0, releasefn, desc=desc)
606 except lock.LockHeld, inst:
606 except lock.LockHeld, inst:
607 if not wait:
607 if not wait:
608 raise
608 raise
609 self.ui.warn(_("waiting for lock on %s held by %r\n") %
609 self.ui.warn(_("waiting for lock on %s held by %r\n") %
610 (desc, inst.locker))
610 (desc, inst.locker))
611 # default to 600 seconds timeout
611 # default to 600 seconds timeout
612 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
612 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
613 releasefn, desc=desc)
613 releasefn, desc=desc)
614 if acquirefn:
614 if acquirefn:
615 acquirefn()
615 acquirefn()
616 return l
616 return l
617
617
618 def lock(self, wait=1):
618 def lock(self, wait=1):
619 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
619 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
620 desc=_('repository %s') % self.origroot)
620 desc=_('repository %s') % self.origroot)
621
621
622 def wlock(self, wait=1):
622 def wlock(self, wait=1):
623 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
623 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
624 self.wreload,
624 self.wreload,
625 desc=_('working directory of %s') % self.origroot)
625 desc=_('working directory of %s') % self.origroot)
626
626
627 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
627 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
628 """
628 """
629 commit an individual file as part of a larger transaction
629 commit an individual file as part of a larger transaction
630 """
630 """
631
631
632 t = self.wread(fn)
632 t = self.wread(fn)
633 fl = self.file(fn)
633 fl = self.file(fn)
634 fp1 = manifest1.get(fn, nullid)
634 fp1 = manifest1.get(fn, nullid)
635 fp2 = manifest2.get(fn, nullid)
635 fp2 = manifest2.get(fn, nullid)
636
636
637 meta = {}
637 meta = {}
638 cp = self.dirstate.copied(fn)
638 cp = self.dirstate.copied(fn)
639 if cp:
639 if cp:
640 # Mark the new revision of this file as a copy of another
640 # Mark the new revision of this file as a copy of another
641 # file. This copy data will effectively act as a parent
641 # file. This copy data will effectively act as a parent
642 # of this new revision. If this is a merge, the first
642 # of this new revision. If this is a merge, the first
643 # parent will be the nullid (meaning "look up the copy data")
643 # parent will be the nullid (meaning "look up the copy data")
644 # and the second one will be the other parent. For example:
644 # and the second one will be the other parent. For example:
645 #
645 #
646 # 0 --- 1 --- 3 rev1 changes file foo
646 # 0 --- 1 --- 3 rev1 changes file foo
647 # \ / rev2 renames foo to bar and changes it
647 # \ / rev2 renames foo to bar and changes it
648 # \- 2 -/ rev3 should have bar with all changes and
648 # \- 2 -/ rev3 should have bar with all changes and
649 # should record that bar descends from
649 # should record that bar descends from
650 # bar in rev2 and foo in rev1
650 # bar in rev2 and foo in rev1
651 #
651 #
652 # this allows this merge to succeed:
652 # this allows this merge to succeed:
653 #
653 #
654 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
654 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
655 # \ / merging rev3 and rev4 should use bar@rev2
655 # \ / merging rev3 and rev4 should use bar@rev2
656 # \- 2 --- 4 as the merge base
656 # \- 2 --- 4 as the merge base
657 #
657 #
658 meta["copy"] = cp
658 meta["copy"] = cp
659 if not manifest2: # not a branch merge
659 if not manifest2: # not a branch merge
660 meta["copyrev"] = hex(manifest1.get(cp, nullid))
660 meta["copyrev"] = hex(manifest1.get(cp, nullid))
661 fp2 = nullid
661 fp2 = nullid
662 elif fp2 != nullid: # copied on remote side
662 elif fp2 != nullid: # copied on remote side
663 meta["copyrev"] = hex(manifest1.get(cp, nullid))
663 meta["copyrev"] = hex(manifest1.get(cp, nullid))
664 elif fp1 != nullid: # copied on local side, reversed
664 elif fp1 != nullid: # copied on local side, reversed
665 meta["copyrev"] = hex(manifest2.get(cp))
665 meta["copyrev"] = hex(manifest2.get(cp))
666 fp2 = fp1
666 fp2 = fp1
667 else: # directory rename
667 else: # directory rename
668 meta["copyrev"] = hex(manifest1.get(cp, nullid))
668 meta["copyrev"] = hex(manifest1.get(cp, nullid))
669 self.ui.debug(_(" %s: copy %s:%s\n") %
669 self.ui.debug(_(" %s: copy %s:%s\n") %
670 (fn, cp, meta["copyrev"]))
670 (fn, cp, meta["copyrev"]))
671 fp1 = nullid
671 fp1 = nullid
672 elif fp2 != nullid:
672 elif fp2 != nullid:
673 # is one parent an ancestor of the other?
673 # is one parent an ancestor of the other?
674 fpa = fl.ancestor(fp1, fp2)
674 fpa = fl.ancestor(fp1, fp2)
675 if fpa == fp1:
675 if fpa == fp1:
676 fp1, fp2 = fp2, nullid
676 fp1, fp2 = fp2, nullid
677 elif fpa == fp2:
677 elif fpa == fp2:
678 fp2 = nullid
678 fp2 = nullid
679
679
680 # is the file unmodified from the parent? report existing entry
680 # is the file unmodified from the parent? report existing entry
681 if fp2 == nullid and not fl.cmp(fp1, t):
681 if fp2 == nullid and not fl.cmp(fp1, t):
682 return fp1
682 return fp1
683
683
684 changelist.append(fn)
684 changelist.append(fn)
685 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
685 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
686
686
687 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
687 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
688 if p1 is None:
688 if p1 is None:
689 p1, p2 = self.dirstate.parents()
689 p1, p2 = self.dirstate.parents()
690 return self.commit(files=files, text=text, user=user, date=date,
690 return self.commit(files=files, text=text, user=user, date=date,
691 p1=p1, p2=p2, wlock=wlock, extra=extra)
691 p1=p1, p2=p2, wlock=wlock, extra=extra)
692
692
693 def commit(self, files=None, text="", user=None, date=None,
693 def commit(self, files=None, text="", user=None, date=None,
694 match=util.always, force=False, lock=None, wlock=None,
694 match=util.always, force=False, lock=None, wlock=None,
695 force_editor=False, p1=None, p2=None, extra={}):
695 force_editor=False, p1=None, p2=None, extra={}):
696
696
697 commit = []
697 commit = []
698 remove = []
698 remove = []
699 changed = []
699 changed = []
700 use_dirstate = (p1 is None) # not rawcommit
700 use_dirstate = (p1 is None) # not rawcommit
701 extra = extra.copy()
701 extra = extra.copy()
702
702
703 if use_dirstate:
703 if use_dirstate:
704 if files:
704 if files:
705 for f in files:
705 for f in files:
706 s = self.dirstate.state(f)
706 s = self.dirstate.state(f)
707 if s in 'nmai':
707 if s in 'nmai':
708 commit.append(f)
708 commit.append(f)
709 elif s == 'r':
709 elif s == 'r':
710 remove.append(f)
710 remove.append(f)
711 else:
711 else:
712 self.ui.warn(_("%s not tracked!\n") % f)
712 self.ui.warn(_("%s not tracked!\n") % f)
713 else:
713 else:
714 changes = self.status(match=match)[:5]
714 changes = self.status(match=match)[:5]
715 modified, added, removed, deleted, unknown = changes
715 modified, added, removed, deleted, unknown = changes
716 commit = modified + added
716 commit = modified + added
717 remove = removed
717 remove = removed
718 else:
718 else:
719 commit = files
719 commit = files
720
720
721 if use_dirstate:
721 if use_dirstate:
722 p1, p2 = self.dirstate.parents()
722 p1, p2 = self.dirstate.parents()
723 update_dirstate = True
723 update_dirstate = True
724 else:
724 else:
725 p1, p2 = p1, p2 or nullid
725 p1, p2 = p1, p2 or nullid
726 update_dirstate = (self.dirstate.parents()[0] == p1)
726 update_dirstate = (self.dirstate.parents()[0] == p1)
727
727
728 c1 = self.changelog.read(p1)
728 c1 = self.changelog.read(p1)
729 c2 = self.changelog.read(p2)
729 c2 = self.changelog.read(p2)
730 m1 = self.manifest.read(c1[0]).copy()
730 m1 = self.manifest.read(c1[0]).copy()
731 m2 = self.manifest.read(c2[0])
731 m2 = self.manifest.read(c2[0])
732
732
733 if use_dirstate:
733 if use_dirstate:
734 branchname = self.workingctx().branch()
734 branchname = self.workingctx().branch()
735 try:
735 try:
736 branchname = branchname.decode('UTF-8').encode('UTF-8')
736 branchname = branchname.decode('UTF-8').encode('UTF-8')
737 except UnicodeDecodeError:
737 except UnicodeDecodeError:
738 raise util.Abort(_('branch name not in UTF-8!'))
738 raise util.Abort(_('branch name not in UTF-8!'))
739 else:
739 else:
740 branchname = ""
740 branchname = ""
741
741
742 if use_dirstate:
742 if use_dirstate:
743 oldname = c1[5].get("branch", "") # stored in UTF-8
743 oldname = c1[5].get("branch", "") # stored in UTF-8
744 if not commit and not remove and not force and p2 == nullid and \
744 if not commit and not remove and not force and p2 == nullid and \
745 branchname == oldname:
745 branchname == oldname:
746 self.ui.status(_("nothing changed\n"))
746 self.ui.status(_("nothing changed\n"))
747 return None
747 return None
748
748
749 xp1 = hex(p1)
749 xp1 = hex(p1)
750 if p2 == nullid: xp2 = ''
750 if p2 == nullid: xp2 = ''
751 else: xp2 = hex(p2)
751 else: xp2 = hex(p2)
752
752
753 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
753 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
754
754
755 if not wlock:
755 if not wlock:
756 wlock = self.wlock()
756 wlock = self.wlock()
757 if not lock:
757 if not lock:
758 lock = self.lock()
758 lock = self.lock()
759 tr = self.transaction()
759 tr = self.transaction()
760
760
761 # check in files
761 # check in files
762 new = {}
762 new = {}
763 linkrev = self.changelog.count()
763 linkrev = self.changelog.count()
764 commit.sort()
764 commit.sort()
765 is_exec = util.execfunc(self.root, m1.execf)
765 is_exec = util.execfunc(self.root, m1.execf)
766 is_link = util.linkfunc(self.root, m1.linkf)
766 is_link = util.linkfunc(self.root, m1.linkf)
767 for f in commit:
767 for f in commit:
768 self.ui.note(f + "\n")
768 self.ui.note(f + "\n")
769 try:
769 try:
770 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
770 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
771 m1.set(f, is_exec(f), is_link(f))
771 m1.set(f, is_exec(f), is_link(f))
772 except (OSError, IOError):
772 except (OSError, IOError):
773 if use_dirstate:
773 if use_dirstate:
774 self.ui.warn(_("trouble committing %s!\n") % f)
774 self.ui.warn(_("trouble committing %s!\n") % f)
775 raise
775 raise
776 else:
776 else:
777 remove.append(f)
777 remove.append(f)
778
778
779 # update manifest
779 # update manifest
780 m1.update(new)
780 m1.update(new)
781 remove.sort()
781 remove.sort()
782 removed = []
782 removed = []
783
783
784 for f in remove:
784 for f in remove:
785 if f in m1:
785 if f in m1:
786 del m1[f]
786 del m1[f]
787 removed.append(f)
787 removed.append(f)
788 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
788 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
789
789
790 # add changeset
790 # add changeset
791 new = new.keys()
791 new = new.keys()
792 new.sort()
792 new.sort()
793
793
794 user = user or self.ui.username()
794 user = user or self.ui.username()
795 if not text or force_editor:
795 if not text or force_editor:
796 edittext = []
796 edittext = []
797 if text:
797 if text:
798 edittext.append(text)
798 edittext.append(text)
799 edittext.append("")
799 edittext.append("")
800 edittext.append("HG: user: %s" % user)
800 edittext.append("HG: user: %s" % user)
801 if p2 != nullid:
801 if p2 != nullid:
802 edittext.append("HG: branch merge")
802 edittext.append("HG: branch merge")
803 if branchname:
803 if branchname:
804 edittext.append("HG: branch %s" % util.tolocal(branchname))
804 edittext.append("HG: branch %s" % util.tolocal(branchname))
805 edittext.extend(["HG: changed %s" % f for f in changed])
805 edittext.extend(["HG: changed %s" % f for f in changed])
806 edittext.extend(["HG: removed %s" % f for f in removed])
806 edittext.extend(["HG: removed %s" % f for f in removed])
807 if not changed and not remove:
807 if not changed and not remove:
808 edittext.append("HG: no files changed")
808 edittext.append("HG: no files changed")
809 edittext.append("")
809 edittext.append("")
810 # run editor in the repository root
810 # run editor in the repository root
811 olddir = os.getcwd()
811 olddir = os.getcwd()
812 os.chdir(self.root)
812 os.chdir(self.root)
813 text = self.ui.edit("\n".join(edittext), user)
813 text = self.ui.edit("\n".join(edittext), user)
814 os.chdir(olddir)
814 os.chdir(olddir)
815
815
816 lines = [line.rstrip() for line in text.rstrip().splitlines()]
816 lines = [line.rstrip() for line in text.rstrip().splitlines()]
817 while lines and not lines[0]:
817 while lines and not lines[0]:
818 del lines[0]
818 del lines[0]
819 if not lines:
819 if not lines:
820 return None
820 return None
821 text = '\n'.join(lines)
821 text = '\n'.join(lines)
822 if branchname:
822 if branchname:
823 extra["branch"] = branchname
823 extra["branch"] = branchname
824 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
824 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
825 user, date, extra)
825 user, date, extra)
826 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
826 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
827 parent2=xp2)
827 parent2=xp2)
828 tr.close()
828 tr.close()
829
829
830 if self.branchcache and "branch" in extra:
830 if self.branchcache and "branch" in extra:
831 self.branchcache[util.tolocal(extra["branch"])] = n
831 self.branchcache[util.tolocal(extra["branch"])] = n
832
832
833 if use_dirstate or update_dirstate:
833 if use_dirstate or update_dirstate:
834 self.dirstate.setparents(n)
834 self.dirstate.setparents(n)
835 if use_dirstate:
835 if use_dirstate:
836 self.dirstate.update(new, "n")
836 self.dirstate.update(new, "n")
837 self.dirstate.forget(removed)
837 self.dirstate.forget(removed)
838
838
839 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
839 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
840 return n
840 return n
841
841
842 def walk(self, node=None, files=[], match=util.always, badmatch=None):
842 def walk(self, node=None, files=[], match=util.always, badmatch=None):
843 '''
843 '''
844 walk recursively through the directory tree or a given
844 walk recursively through the directory tree or a given
845 changeset, finding all files matched by the match
845 changeset, finding all files matched by the match
846 function
846 function
847
847
848 results are yielded in a tuple (src, filename), where src
848 results are yielded in a tuple (src, filename), where src
849 is one of:
849 is one of:
850 'f' the file was found in the directory tree
850 'f' the file was found in the directory tree
851 'm' the file was only in the dirstate and not in the tree
851 'm' the file was only in the dirstate and not in the tree
852 'b' file was not found and matched badmatch
852 'b' file was not found and matched badmatch
853 '''
853 '''
854
854
855 if node:
855 if node:
856 fdict = dict.fromkeys(files)
856 fdict = dict.fromkeys(files)
857 for fn in self.manifest.read(self.changelog.read(node)[0]):
857 for fn in self.manifest.read(self.changelog.read(node)[0]):
858 for ffn in fdict:
858 for ffn in fdict:
859 # match if the file is the exact name or a directory
859 # match if the file is the exact name or a directory
860 if ffn == fn or fn.startswith("%s/" % ffn):
860 if ffn == fn or fn.startswith("%s/" % ffn):
861 del fdict[ffn]
861 del fdict[ffn]
862 break
862 break
863 if match(fn):
863 if match(fn):
864 yield 'm', fn
864 yield 'm', fn
865 for fn in fdict:
865 for fn in fdict:
866 if badmatch and badmatch(fn):
866 if badmatch and badmatch(fn):
867 if match(fn):
867 if match(fn):
868 yield 'b', fn
868 yield 'b', fn
869 else:
869 else:
870 self.ui.warn(_('%s: No such file in rev %s\n') % (
870 self.ui.warn(_('%s: No such file in rev %s\n') % (
871 util.pathto(self.getcwd(), fn), short(node)))
871 util.pathto(self.getcwd(), fn), short(node)))
872 else:
872 else:
873 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
873 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
874 yield src, fn
874 yield src, fn
875
875
876 def status(self, node1=None, node2=None, files=[], match=util.always,
876 def status(self, node1=None, node2=None, files=[], match=util.always,
877 wlock=None, list_ignored=False, list_clean=False):
877 wlock=None, list_ignored=False, list_clean=False):
878 """return status of files between two nodes or node and working directory
878 """return status of files between two nodes or node and working directory
879
879
880 If node1 is None, use the first dirstate parent instead.
880 If node1 is None, use the first dirstate parent instead.
881 If node2 is None, compare node1 with working directory.
881 If node2 is None, compare node1 with working directory.
882 """
882 """
883
883
884 def fcmp(fn, mf):
884 def fcmp(fn, getnode):
885 t1 = self.wread(fn)
885 t1 = self.wread(fn)
886 return self.file(fn).cmp(mf.get(fn, nullid), t1)
886 return self.file(fn).cmp(getnode(fn), t1)
887
887
888 def mfmatches(node):
888 def mfmatches(node):
889 change = self.changelog.read(node)
889 change = self.changelog.read(node)
890 mf = self.manifest.read(change[0]).copy()
890 mf = self.manifest.read(change[0]).copy()
891 for fn in mf.keys():
891 for fn in mf.keys():
892 if not match(fn):
892 if not match(fn):
893 del mf[fn]
893 del mf[fn]
894 return mf
894 return mf
895
895
896 modified, added, removed, deleted, unknown = [], [], [], [], []
896 modified, added, removed, deleted, unknown = [], [], [], [], []
897 ignored, clean = [], []
897 ignored, clean = [], []
898
898
899 compareworking = False
899 compareworking = False
900 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
900 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
901 compareworking = True
901 compareworking = True
902
902
903 if not compareworking:
903 if not compareworking:
904 # read the manifest from node1 before the manifest from node2,
904 # read the manifest from node1 before the manifest from node2,
905 # so that we'll hit the manifest cache if we're going through
905 # so that we'll hit the manifest cache if we're going through
906 # all the revisions in parent->child order.
906 # all the revisions in parent->child order.
907 mf1 = mfmatches(node1)
907 mf1 = mfmatches(node1)
908
908
909 # are we comparing the working directory?
909 # are we comparing the working directory?
910 if not node2:
910 if not node2:
911 if not wlock:
911 if not wlock:
912 try:
912 try:
913 wlock = self.wlock(wait=0)
913 wlock = self.wlock(wait=0)
914 except lock.LockException:
914 except lock.LockException:
915 wlock = None
915 wlock = None
916 (lookup, modified, added, removed, deleted, unknown,
916 (lookup, modified, added, removed, deleted, unknown,
917 ignored, clean) = self.dirstate.status(files, match,
917 ignored, clean) = self.dirstate.status(files, match,
918 list_ignored, list_clean)
918 list_ignored, list_clean)
919
919
920 # are we comparing working dir against its parent?
920 # are we comparing working dir against its parent?
921 if compareworking:
921 if compareworking:
922 if lookup:
922 if lookup:
923 # do a full compare of any files that might have changed
923 # do a full compare of any files that might have changed
924 mf2 = mfmatches(self.dirstate.parents()[0])
924 mf2 = mfmatches(self.dirstate.parents()[0])
925 getnode = lambda fn: mf2.get(fn, nullid)
925 for f in lookup:
926 for f in lookup:
926 if fcmp(f, mf2):
927 if fcmp(f, getnode):
927 modified.append(f)
928 modified.append(f)
928 else:
929 else:
929 clean.append(f)
930 clean.append(f)
930 if wlock is not None:
931 if wlock is not None:
931 self.dirstate.update([f], "n")
932 self.dirstate.update([f], "n")
932 else:
933 else:
933 # we are comparing working dir against non-parent
934 # we are comparing working dir against non-parent
934 # generate a pseudo-manifest for the working dir
935 # generate a pseudo-manifest for the working dir
935 # XXX: create it in dirstate.py ?
936 # XXX: create it in dirstate.py ?
936 mf2 = mfmatches(self.dirstate.parents()[0])
937 mf2 = mfmatches(self.dirstate.parents()[0])
937 is_exec = util.execfunc(self.root, mf2.execf)
938 is_exec = util.execfunc(self.root, mf2.execf)
938 is_link = util.linkfunc(self.root, mf2.linkf)
939 is_link = util.linkfunc(self.root, mf2.linkf)
939 for f in lookup + modified + added:
940 for f in lookup + modified + added:
940 mf2[f] = ""
941 mf2[f] = ""
941 mf2.set(f, is_exec(f), is_link(f))
942 mf2.set(f, is_exec(f), is_link(f))
942 for f in removed:
943 for f in removed:
943 if f in mf2:
944 if f in mf2:
944 del mf2[f]
945 del mf2[f]
945 else:
946 else:
946 # we are comparing two revisions
947 # we are comparing two revisions
947 mf2 = mfmatches(node2)
948 mf2 = mfmatches(node2)
948
949
949 if not compareworking:
950 if not compareworking:
950 # flush lists from dirstate before comparing manifests
951 # flush lists from dirstate before comparing manifests
951 modified, added, clean = [], [], []
952 modified, added, clean = [], [], []
952
953
953 # make sure to sort the files so we talk to the disk in a
954 # make sure to sort the files so we talk to the disk in a
954 # reasonable order
955 # reasonable order
955 mf2keys = mf2.keys()
956 mf2keys = mf2.keys()
956 mf2keys.sort()
957 mf2keys.sort()
958 getnode = lambda fn: mf1.get(fn, nullid)
957 for fn in mf2keys:
959 for fn in mf2keys:
958 if mf1.has_key(fn):
960 if mf1.has_key(fn):
959 if mf1.flags(fn) != mf2.flags(fn) or \
961 if mf1.flags(fn) != mf2.flags(fn) or \
960 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
962 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or
963 fcmp(fn, getnode))):
961 modified.append(fn)
964 modified.append(fn)
962 elif list_clean:
965 elif list_clean:
963 clean.append(fn)
966 clean.append(fn)
964 del mf1[fn]
967 del mf1[fn]
965 else:
968 else:
966 added.append(fn)
969 added.append(fn)
967
970
968 removed = mf1.keys()
971 removed = mf1.keys()
969
972
970 # sort and return results:
973 # sort and return results:
971 for l in modified, added, removed, deleted, unknown, ignored, clean:
974 for l in modified, added, removed, deleted, unknown, ignored, clean:
972 l.sort()
975 l.sort()
973 return (modified, added, removed, deleted, unknown, ignored, clean)
976 return (modified, added, removed, deleted, unknown, ignored, clean)
974
977
975 def add(self, list, wlock=None):
978 def add(self, list, wlock=None):
976 if not wlock:
979 if not wlock:
977 wlock = self.wlock()
980 wlock = self.wlock()
978 for f in list:
981 for f in list:
979 p = self.wjoin(f)
982 p = self.wjoin(f)
980 islink = os.path.islink(p)
983 islink = os.path.islink(p)
981 if not islink and not os.path.exists(p):
984 if not islink and not os.path.exists(p):
982 self.ui.warn(_("%s does not exist!\n") % f)
985 self.ui.warn(_("%s does not exist!\n") % f)
983 elif not islink and not os.path.isfile(p):
986 elif not islink and not os.path.isfile(p):
984 self.ui.warn(_("%s not added: only files and symlinks "
987 self.ui.warn(_("%s not added: only files and symlinks "
985 "supported currently\n") % f)
988 "supported currently\n") % f)
986 elif self.dirstate.state(f) in 'an':
989 elif self.dirstate.state(f) in 'an':
987 self.ui.warn(_("%s already tracked!\n") % f)
990 self.ui.warn(_("%s already tracked!\n") % f)
988 else:
991 else:
989 self.dirstate.update([f], "a")
992 self.dirstate.update([f], "a")
990
993
991 def forget(self, list, wlock=None):
994 def forget(self, list, wlock=None):
992 if not wlock:
995 if not wlock:
993 wlock = self.wlock()
996 wlock = self.wlock()
994 for f in list:
997 for f in list:
995 if self.dirstate.state(f) not in 'ai':
998 if self.dirstate.state(f) not in 'ai':
996 self.ui.warn(_("%s not added!\n") % f)
999 self.ui.warn(_("%s not added!\n") % f)
997 else:
1000 else:
998 self.dirstate.forget([f])
1001 self.dirstate.forget([f])
999
1002
1000 def remove(self, list, unlink=False, wlock=None):
1003 def remove(self, list, unlink=False, wlock=None):
1001 if unlink:
1004 if unlink:
1002 for f in list:
1005 for f in list:
1003 try:
1006 try:
1004 util.unlink(self.wjoin(f))
1007 util.unlink(self.wjoin(f))
1005 except OSError, inst:
1008 except OSError, inst:
1006 if inst.errno != errno.ENOENT:
1009 if inst.errno != errno.ENOENT:
1007 raise
1010 raise
1008 if not wlock:
1011 if not wlock:
1009 wlock = self.wlock()
1012 wlock = self.wlock()
1010 for f in list:
1013 for f in list:
1011 p = self.wjoin(f)
1014 p = self.wjoin(f)
1012 if os.path.exists(p):
1015 if os.path.exists(p):
1013 self.ui.warn(_("%s still exists!\n") % f)
1016 self.ui.warn(_("%s still exists!\n") % f)
1014 elif self.dirstate.state(f) == 'a':
1017 elif self.dirstate.state(f) == 'a':
1015 self.dirstate.forget([f])
1018 self.dirstate.forget([f])
1016 elif f not in self.dirstate:
1019 elif f not in self.dirstate:
1017 self.ui.warn(_("%s not tracked!\n") % f)
1020 self.ui.warn(_("%s not tracked!\n") % f)
1018 else:
1021 else:
1019 self.dirstate.update([f], "r")
1022 self.dirstate.update([f], "r")
1020
1023
1021 def undelete(self, list, wlock=None):
1024 def undelete(self, list, wlock=None):
1022 p = self.dirstate.parents()[0]
1025 p = self.dirstate.parents()[0]
1023 mn = self.changelog.read(p)[0]
1026 mn = self.changelog.read(p)[0]
1024 m = self.manifest.read(mn)
1027 m = self.manifest.read(mn)
1025 if not wlock:
1028 if not wlock:
1026 wlock = self.wlock()
1029 wlock = self.wlock()
1027 for f in list:
1030 for f in list:
1028 if self.dirstate.state(f) not in "r":
1031 if self.dirstate.state(f) not in "r":
1029 self.ui.warn("%s not removed!\n" % f)
1032 self.ui.warn("%s not removed!\n" % f)
1030 else:
1033 else:
1031 t = self.file(f).read(m[f])
1034 t = self.file(f).read(m[f])
1032 self.wwrite(f, t, m.flags(f))
1035 self.wwrite(f, t, m.flags(f))
1033 self.dirstate.update([f], "n")
1036 self.dirstate.update([f], "n")
1034
1037
1035 def copy(self, source, dest, wlock=None):
1038 def copy(self, source, dest, wlock=None):
1036 p = self.wjoin(dest)
1039 p = self.wjoin(dest)
1037 if not os.path.exists(p):
1040 if not os.path.exists(p):
1038 self.ui.warn(_("%s does not exist!\n") % dest)
1041 self.ui.warn(_("%s does not exist!\n") % dest)
1039 elif not os.path.isfile(p):
1042 elif not os.path.isfile(p):
1040 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
1043 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
1041 else:
1044 else:
1042 if not wlock:
1045 if not wlock:
1043 wlock = self.wlock()
1046 wlock = self.wlock()
1044 if self.dirstate.state(dest) == '?':
1047 if self.dirstate.state(dest) == '?':
1045 self.dirstate.update([dest], "a")
1048 self.dirstate.update([dest], "a")
1046 self.dirstate.copy(source, dest)
1049 self.dirstate.copy(source, dest)
1047
1050
1048 def heads(self, start=None):
1051 def heads(self, start=None):
1049 heads = self.changelog.heads(start)
1052 heads = self.changelog.heads(start)
1050 # sort the output in rev descending order
1053 # sort the output in rev descending order
1051 heads = [(-self.changelog.rev(h), h) for h in heads]
1054 heads = [(-self.changelog.rev(h), h) for h in heads]
1052 heads.sort()
1055 heads.sort()
1053 return [n for (r, n) in heads]
1056 return [n for (r, n) in heads]
1054
1057
1055 def branches(self, nodes):
1058 def branches(self, nodes):
1056 if not nodes:
1059 if not nodes:
1057 nodes = [self.changelog.tip()]
1060 nodes = [self.changelog.tip()]
1058 b = []
1061 b = []
1059 for n in nodes:
1062 for n in nodes:
1060 t = n
1063 t = n
1061 while 1:
1064 while 1:
1062 p = self.changelog.parents(n)
1065 p = self.changelog.parents(n)
1063 if p[1] != nullid or p[0] == nullid:
1066 if p[1] != nullid or p[0] == nullid:
1064 b.append((t, n, p[0], p[1]))
1067 b.append((t, n, p[0], p[1]))
1065 break
1068 break
1066 n = p[0]
1069 n = p[0]
1067 return b
1070 return b
1068
1071
1069 def between(self, pairs):
1072 def between(self, pairs):
1070 r = []
1073 r = []
1071
1074
1072 for top, bottom in pairs:
1075 for top, bottom in pairs:
1073 n, l, i = top, [], 0
1076 n, l, i = top, [], 0
1074 f = 1
1077 f = 1
1075
1078
1076 while n != bottom:
1079 while n != bottom:
1077 p = self.changelog.parents(n)[0]
1080 p = self.changelog.parents(n)[0]
1078 if i == f:
1081 if i == f:
1079 l.append(n)
1082 l.append(n)
1080 f = f * 2
1083 f = f * 2
1081 n = p
1084 n = p
1082 i += 1
1085 i += 1
1083
1086
1084 r.append(l)
1087 r.append(l)
1085
1088
1086 return r
1089 return r
1087
1090
1088 def findincoming(self, remote, base=None, heads=None, force=False):
1091 def findincoming(self, remote, base=None, heads=None, force=False):
1089 """Return list of roots of the subsets of missing nodes from remote
1092 """Return list of roots of the subsets of missing nodes from remote
1090
1093
1091 If base dict is specified, assume that these nodes and their parents
1094 If base dict is specified, assume that these nodes and their parents
1092 exist on the remote side and that no child of a node of base exists
1095 exist on the remote side and that no child of a node of base exists
1093 in both remote and self.
1096 in both remote and self.
1094 Furthermore base will be updated to include the nodes that exists
1097 Furthermore base will be updated to include the nodes that exists
1095 in self and remote but no children exists in self and remote.
1098 in self and remote but no children exists in self and remote.
1096 If a list of heads is specified, return only nodes which are heads
1099 If a list of heads is specified, return only nodes which are heads
1097 or ancestors of these heads.
1100 or ancestors of these heads.
1098
1101
1099 All the ancestors of base are in self and in remote.
1102 All the ancestors of base are in self and in remote.
1100 All the descendants of the list returned are missing in self.
1103 All the descendants of the list returned are missing in self.
1101 (and so we know that the rest of the nodes are missing in remote, see
1104 (and so we know that the rest of the nodes are missing in remote, see
1102 outgoing)
1105 outgoing)
1103 """
1106 """
1104 m = self.changelog.nodemap
1107 m = self.changelog.nodemap
1105 search = []
1108 search = []
1106 fetch = {}
1109 fetch = {}
1107 seen = {}
1110 seen = {}
1108 seenbranch = {}
1111 seenbranch = {}
1109 if base == None:
1112 if base == None:
1110 base = {}
1113 base = {}
1111
1114
1112 if not heads:
1115 if not heads:
1113 heads = remote.heads()
1116 heads = remote.heads()
1114
1117
1115 if self.changelog.tip() == nullid:
1118 if self.changelog.tip() == nullid:
1116 base[nullid] = 1
1119 base[nullid] = 1
1117 if heads != [nullid]:
1120 if heads != [nullid]:
1118 return [nullid]
1121 return [nullid]
1119 return []
1122 return []
1120
1123
1121 # assume we're closer to the tip than the root
1124 # assume we're closer to the tip than the root
1122 # and start by examining the heads
1125 # and start by examining the heads
1123 self.ui.status(_("searching for changes\n"))
1126 self.ui.status(_("searching for changes\n"))
1124
1127
1125 unknown = []
1128 unknown = []
1126 for h in heads:
1129 for h in heads:
1127 if h not in m:
1130 if h not in m:
1128 unknown.append(h)
1131 unknown.append(h)
1129 else:
1132 else:
1130 base[h] = 1
1133 base[h] = 1
1131
1134
1132 if not unknown:
1135 if not unknown:
1133 return []
1136 return []
1134
1137
1135 req = dict.fromkeys(unknown)
1138 req = dict.fromkeys(unknown)
1136 reqcnt = 0
1139 reqcnt = 0
1137
1140
1138 # search through remote branches
1141 # search through remote branches
1139 # a 'branch' here is a linear segment of history, with four parts:
1142 # a 'branch' here is a linear segment of history, with four parts:
1140 # head, root, first parent, second parent
1143 # head, root, first parent, second parent
1141 # (a branch always has two parents (or none) by definition)
1144 # (a branch always has two parents (or none) by definition)
1142 unknown = remote.branches(unknown)
1145 unknown = remote.branches(unknown)
1143 while unknown:
1146 while unknown:
1144 r = []
1147 r = []
1145 while unknown:
1148 while unknown:
1146 n = unknown.pop(0)
1149 n = unknown.pop(0)
1147 if n[0] in seen:
1150 if n[0] in seen:
1148 continue
1151 continue
1149
1152
1150 self.ui.debug(_("examining %s:%s\n")
1153 self.ui.debug(_("examining %s:%s\n")
1151 % (short(n[0]), short(n[1])))
1154 % (short(n[0]), short(n[1])))
1152 if n[0] == nullid: # found the end of the branch
1155 if n[0] == nullid: # found the end of the branch
1153 pass
1156 pass
1154 elif n in seenbranch:
1157 elif n in seenbranch:
1155 self.ui.debug(_("branch already found\n"))
1158 self.ui.debug(_("branch already found\n"))
1156 continue
1159 continue
1157 elif n[1] and n[1] in m: # do we know the base?
1160 elif n[1] and n[1] in m: # do we know the base?
1158 self.ui.debug(_("found incomplete branch %s:%s\n")
1161 self.ui.debug(_("found incomplete branch %s:%s\n")
1159 % (short(n[0]), short(n[1])))
1162 % (short(n[0]), short(n[1])))
1160 search.append(n) # schedule branch range for scanning
1163 search.append(n) # schedule branch range for scanning
1161 seenbranch[n] = 1
1164 seenbranch[n] = 1
1162 else:
1165 else:
1163 if n[1] not in seen and n[1] not in fetch:
1166 if n[1] not in seen and n[1] not in fetch:
1164 if n[2] in m and n[3] in m:
1167 if n[2] in m and n[3] in m:
1165 self.ui.debug(_("found new changeset %s\n") %
1168 self.ui.debug(_("found new changeset %s\n") %
1166 short(n[1]))
1169 short(n[1]))
1167 fetch[n[1]] = 1 # earliest unknown
1170 fetch[n[1]] = 1 # earliest unknown
1168 for p in n[2:4]:
1171 for p in n[2:4]:
1169 if p in m:
1172 if p in m:
1170 base[p] = 1 # latest known
1173 base[p] = 1 # latest known
1171
1174
1172 for p in n[2:4]:
1175 for p in n[2:4]:
1173 if p not in req and p not in m:
1176 if p not in req and p not in m:
1174 r.append(p)
1177 r.append(p)
1175 req[p] = 1
1178 req[p] = 1
1176 seen[n[0]] = 1
1179 seen[n[0]] = 1
1177
1180
1178 if r:
1181 if r:
1179 reqcnt += 1
1182 reqcnt += 1
1180 self.ui.debug(_("request %d: %s\n") %
1183 self.ui.debug(_("request %d: %s\n") %
1181 (reqcnt, " ".join(map(short, r))))
1184 (reqcnt, " ".join(map(short, r))))
1182 for p in xrange(0, len(r), 10):
1185 for p in xrange(0, len(r), 10):
1183 for b in remote.branches(r[p:p+10]):
1186 for b in remote.branches(r[p:p+10]):
1184 self.ui.debug(_("received %s:%s\n") %
1187 self.ui.debug(_("received %s:%s\n") %
1185 (short(b[0]), short(b[1])))
1188 (short(b[0]), short(b[1])))
1186 unknown.append(b)
1189 unknown.append(b)
1187
1190
1188 # do binary search on the branches we found
1191 # do binary search on the branches we found
1189 while search:
1192 while search:
1190 n = search.pop(0)
1193 n = search.pop(0)
1191 reqcnt += 1
1194 reqcnt += 1
1192 l = remote.between([(n[0], n[1])])[0]
1195 l = remote.between([(n[0], n[1])])[0]
1193 l.append(n[1])
1196 l.append(n[1])
1194 p = n[0]
1197 p = n[0]
1195 f = 1
1198 f = 1
1196 for i in l:
1199 for i in l:
1197 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1200 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1198 if i in m:
1201 if i in m:
1199 if f <= 2:
1202 if f <= 2:
1200 self.ui.debug(_("found new branch changeset %s\n") %
1203 self.ui.debug(_("found new branch changeset %s\n") %
1201 short(p))
1204 short(p))
1202 fetch[p] = 1
1205 fetch[p] = 1
1203 base[i] = 1
1206 base[i] = 1
1204 else:
1207 else:
1205 self.ui.debug(_("narrowed branch search to %s:%s\n")
1208 self.ui.debug(_("narrowed branch search to %s:%s\n")
1206 % (short(p), short(i)))
1209 % (short(p), short(i)))
1207 search.append((p, i))
1210 search.append((p, i))
1208 break
1211 break
1209 p, f = i, f * 2
1212 p, f = i, f * 2
1210
1213
1211 # sanity check our fetch list
1214 # sanity check our fetch list
1212 for f in fetch.keys():
1215 for f in fetch.keys():
1213 if f in m:
1216 if f in m:
1214 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1217 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1215
1218
1216 if base.keys() == [nullid]:
1219 if base.keys() == [nullid]:
1217 if force:
1220 if force:
1218 self.ui.warn(_("warning: repository is unrelated\n"))
1221 self.ui.warn(_("warning: repository is unrelated\n"))
1219 else:
1222 else:
1220 raise util.Abort(_("repository is unrelated"))
1223 raise util.Abort(_("repository is unrelated"))
1221
1224
1222 self.ui.debug(_("found new changesets starting at ") +
1225 self.ui.debug(_("found new changesets starting at ") +
1223 " ".join([short(f) for f in fetch]) + "\n")
1226 " ".join([short(f) for f in fetch]) + "\n")
1224
1227
1225 self.ui.debug(_("%d total queries\n") % reqcnt)
1228 self.ui.debug(_("%d total queries\n") % reqcnt)
1226
1229
1227 return fetch.keys()
1230 return fetch.keys()
1228
1231
1229 def findoutgoing(self, remote, base=None, heads=None, force=False):
1232 def findoutgoing(self, remote, base=None, heads=None, force=False):
1230 """Return list of nodes that are roots of subsets not in remote
1233 """Return list of nodes that are roots of subsets not in remote
1231
1234
1232 If base dict is specified, assume that these nodes and their parents
1235 If base dict is specified, assume that these nodes and their parents
1233 exist on the remote side.
1236 exist on the remote side.
1234 If a list of heads is specified, return only nodes which are heads
1237 If a list of heads is specified, return only nodes which are heads
1235 or ancestors of these heads, and return a second element which
1238 or ancestors of these heads, and return a second element which
1236 contains all remote heads which get new children.
1239 contains all remote heads which get new children.
1237 """
1240 """
1238 if base == None:
1241 if base == None:
1239 base = {}
1242 base = {}
1240 self.findincoming(remote, base, heads, force=force)
1243 self.findincoming(remote, base, heads, force=force)
1241
1244
1242 self.ui.debug(_("common changesets up to ")
1245 self.ui.debug(_("common changesets up to ")
1243 + " ".join(map(short, base.keys())) + "\n")
1246 + " ".join(map(short, base.keys())) + "\n")
1244
1247
1245 remain = dict.fromkeys(self.changelog.nodemap)
1248 remain = dict.fromkeys(self.changelog.nodemap)
1246
1249
1247 # prune everything remote has from the tree
1250 # prune everything remote has from the tree
1248 del remain[nullid]
1251 del remain[nullid]
1249 remove = base.keys()
1252 remove = base.keys()
1250 while remove:
1253 while remove:
1251 n = remove.pop(0)
1254 n = remove.pop(0)
1252 if n in remain:
1255 if n in remain:
1253 del remain[n]
1256 del remain[n]
1254 for p in self.changelog.parents(n):
1257 for p in self.changelog.parents(n):
1255 remove.append(p)
1258 remove.append(p)
1256
1259
1257 # find every node whose parents have been pruned
1260 # find every node whose parents have been pruned
1258 subset = []
1261 subset = []
1259 # find every remote head that will get new children
1262 # find every remote head that will get new children
1260 updated_heads = {}
1263 updated_heads = {}
1261 for n in remain:
1264 for n in remain:
1262 p1, p2 = self.changelog.parents(n)
1265 p1, p2 = self.changelog.parents(n)
1263 if p1 not in remain and p2 not in remain:
1266 if p1 not in remain and p2 not in remain:
1264 subset.append(n)
1267 subset.append(n)
1265 if heads:
1268 if heads:
1266 if p1 in heads:
1269 if p1 in heads:
1267 updated_heads[p1] = True
1270 updated_heads[p1] = True
1268 if p2 in heads:
1271 if p2 in heads:
1269 updated_heads[p2] = True
1272 updated_heads[p2] = True
1270
1273
1271 # this is the set of all roots we have to push
1274 # this is the set of all roots we have to push
1272 if heads:
1275 if heads:
1273 return subset, updated_heads.keys()
1276 return subset, updated_heads.keys()
1274 else:
1277 else:
1275 return subset
1278 return subset
1276
1279
1277 def pull(self, remote, heads=None, force=False, lock=None):
1280 def pull(self, remote, heads=None, force=False, lock=None):
1278 mylock = False
1281 mylock = False
1279 if not lock:
1282 if not lock:
1280 lock = self.lock()
1283 lock = self.lock()
1281 mylock = True
1284 mylock = True
1282
1285
1283 try:
1286 try:
1284 fetch = self.findincoming(remote, force=force)
1287 fetch = self.findincoming(remote, force=force)
1285 if fetch == [nullid]:
1288 if fetch == [nullid]:
1286 self.ui.status(_("requesting all changes\n"))
1289 self.ui.status(_("requesting all changes\n"))
1287
1290
1288 if not fetch:
1291 if not fetch:
1289 self.ui.status(_("no changes found\n"))
1292 self.ui.status(_("no changes found\n"))
1290 return 0
1293 return 0
1291
1294
1292 if heads is None:
1295 if heads is None:
1293 cg = remote.changegroup(fetch, 'pull')
1296 cg = remote.changegroup(fetch, 'pull')
1294 else:
1297 else:
1295 if 'changegroupsubset' not in remote.capabilities:
1298 if 'changegroupsubset' not in remote.capabilities:
1296 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1299 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1297 cg = remote.changegroupsubset(fetch, heads, 'pull')
1300 cg = remote.changegroupsubset(fetch, heads, 'pull')
1298 return self.addchangegroup(cg, 'pull', remote.url())
1301 return self.addchangegroup(cg, 'pull', remote.url())
1299 finally:
1302 finally:
1300 if mylock:
1303 if mylock:
1301 lock.release()
1304 lock.release()
1302
1305
1303 def push(self, remote, force=False, revs=None):
1306 def push(self, remote, force=False, revs=None):
1304 # there are two ways to push to remote repo:
1307 # there are two ways to push to remote repo:
1305 #
1308 #
1306 # addchangegroup assumes local user can lock remote
1309 # addchangegroup assumes local user can lock remote
1307 # repo (local filesystem, old ssh servers).
1310 # repo (local filesystem, old ssh servers).
1308 #
1311 #
1309 # unbundle assumes local user cannot lock remote repo (new ssh
1312 # unbundle assumes local user cannot lock remote repo (new ssh
1310 # servers, http servers).
1313 # servers, http servers).
1311
1314
1312 if remote.capable('unbundle'):
1315 if remote.capable('unbundle'):
1313 return self.push_unbundle(remote, force, revs)
1316 return self.push_unbundle(remote, force, revs)
1314 return self.push_addchangegroup(remote, force, revs)
1317 return self.push_addchangegroup(remote, force, revs)
1315
1318
1316 def prepush(self, remote, force, revs):
1319 def prepush(self, remote, force, revs):
1317 base = {}
1320 base = {}
1318 remote_heads = remote.heads()
1321 remote_heads = remote.heads()
1319 inc = self.findincoming(remote, base, remote_heads, force=force)
1322 inc = self.findincoming(remote, base, remote_heads, force=force)
1320
1323
1321 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1324 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1322 if revs is not None:
1325 if revs is not None:
1323 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1326 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1324 else:
1327 else:
1325 bases, heads = update, self.changelog.heads()
1328 bases, heads = update, self.changelog.heads()
1326
1329
1327 if not bases:
1330 if not bases:
1328 self.ui.status(_("no changes found\n"))
1331 self.ui.status(_("no changes found\n"))
1329 return None, 1
1332 return None, 1
1330 elif not force:
1333 elif not force:
1331 # check if we're creating new remote heads
1334 # check if we're creating new remote heads
1332 # to be a remote head after push, node must be either
1335 # to be a remote head after push, node must be either
1333 # - unknown locally
1336 # - unknown locally
1334 # - a local outgoing head descended from update
1337 # - a local outgoing head descended from update
1335 # - a remote head that's known locally and not
1338 # - a remote head that's known locally and not
1336 # ancestral to an outgoing head
1339 # ancestral to an outgoing head
1337
1340
1338 warn = 0
1341 warn = 0
1339
1342
1340 if remote_heads == [nullid]:
1343 if remote_heads == [nullid]:
1341 warn = 0
1344 warn = 0
1342 elif not revs and len(heads) > len(remote_heads):
1345 elif not revs and len(heads) > len(remote_heads):
1343 warn = 1
1346 warn = 1
1344 else:
1347 else:
1345 newheads = list(heads)
1348 newheads = list(heads)
1346 for r in remote_heads:
1349 for r in remote_heads:
1347 if r in self.changelog.nodemap:
1350 if r in self.changelog.nodemap:
1348 desc = self.changelog.heads(r, heads)
1351 desc = self.changelog.heads(r, heads)
1349 l = [h for h in heads if h in desc]
1352 l = [h for h in heads if h in desc]
1350 if not l:
1353 if not l:
1351 newheads.append(r)
1354 newheads.append(r)
1352 else:
1355 else:
1353 newheads.append(r)
1356 newheads.append(r)
1354 if len(newheads) > len(remote_heads):
1357 if len(newheads) > len(remote_heads):
1355 warn = 1
1358 warn = 1
1356
1359
1357 if warn:
1360 if warn:
1358 self.ui.warn(_("abort: push creates new remote branches!\n"))
1361 self.ui.warn(_("abort: push creates new remote branches!\n"))
1359 self.ui.status(_("(did you forget to merge?"
1362 self.ui.status(_("(did you forget to merge?"
1360 " use push -f to force)\n"))
1363 " use push -f to force)\n"))
1361 return None, 1
1364 return None, 1
1362 elif inc:
1365 elif inc:
1363 self.ui.warn(_("note: unsynced remote changes!\n"))
1366 self.ui.warn(_("note: unsynced remote changes!\n"))
1364
1367
1365
1368
1366 if revs is None:
1369 if revs is None:
1367 cg = self.changegroup(update, 'push')
1370 cg = self.changegroup(update, 'push')
1368 else:
1371 else:
1369 cg = self.changegroupsubset(update, revs, 'push')
1372 cg = self.changegroupsubset(update, revs, 'push')
1370 return cg, remote_heads
1373 return cg, remote_heads
1371
1374
1372 def push_addchangegroup(self, remote, force, revs):
1375 def push_addchangegroup(self, remote, force, revs):
1373 lock = remote.lock()
1376 lock = remote.lock()
1374
1377
1375 ret = self.prepush(remote, force, revs)
1378 ret = self.prepush(remote, force, revs)
1376 if ret[0] is not None:
1379 if ret[0] is not None:
1377 cg, remote_heads = ret
1380 cg, remote_heads = ret
1378 return remote.addchangegroup(cg, 'push', self.url())
1381 return remote.addchangegroup(cg, 'push', self.url())
1379 return ret[1]
1382 return ret[1]
1380
1383
1381 def push_unbundle(self, remote, force, revs):
1384 def push_unbundle(self, remote, force, revs):
1382 # local repo finds heads on server, finds out what revs it
1385 # local repo finds heads on server, finds out what revs it
1383 # must push. once revs transferred, if server finds it has
1386 # must push. once revs transferred, if server finds it has
1384 # different heads (someone else won commit/push race), server
1387 # different heads (someone else won commit/push race), server
1385 # aborts.
1388 # aborts.
1386
1389
1387 ret = self.prepush(remote, force, revs)
1390 ret = self.prepush(remote, force, revs)
1388 if ret[0] is not None:
1391 if ret[0] is not None:
1389 cg, remote_heads = ret
1392 cg, remote_heads = ret
1390 if force: remote_heads = ['force']
1393 if force: remote_heads = ['force']
1391 return remote.unbundle(cg, remote_heads, 'push')
1394 return remote.unbundle(cg, remote_heads, 'push')
1392 return ret[1]
1395 return ret[1]
1393
1396
1394 def changegroupinfo(self, nodes):
1397 def changegroupinfo(self, nodes):
1395 self.ui.note(_("%d changesets found\n") % len(nodes))
1398 self.ui.note(_("%d changesets found\n") % len(nodes))
1396 if self.ui.debugflag:
1399 if self.ui.debugflag:
1397 self.ui.debug(_("List of changesets:\n"))
1400 self.ui.debug(_("List of changesets:\n"))
1398 for node in nodes:
1401 for node in nodes:
1399 self.ui.debug("%s\n" % hex(node))
1402 self.ui.debug("%s\n" % hex(node))
1400
1403
1401 def changegroupsubset(self, bases, heads, source):
1404 def changegroupsubset(self, bases, heads, source):
1402 """This function generates a changegroup consisting of all the nodes
1405 """This function generates a changegroup consisting of all the nodes
1403 that are descendents of any of the bases, and ancestors of any of
1406 that are descendents of any of the bases, and ancestors of any of
1404 the heads.
1407 the heads.
1405
1408
1406 It is fairly complex as determining which filenodes and which
1409 It is fairly complex as determining which filenodes and which
1407 manifest nodes need to be included for the changeset to be complete
1410 manifest nodes need to be included for the changeset to be complete
1408 is non-trivial.
1411 is non-trivial.
1409
1412
1410 Another wrinkle is doing the reverse, figuring out which changeset in
1413 Another wrinkle is doing the reverse, figuring out which changeset in
1411 the changegroup a particular filenode or manifestnode belongs to."""
1414 the changegroup a particular filenode or manifestnode belongs to."""
1412
1415
1413 self.hook('preoutgoing', throw=True, source=source)
1416 self.hook('preoutgoing', throw=True, source=source)
1414
1417
1415 # Set up some initial variables
1418 # Set up some initial variables
1416 # Make it easy to refer to self.changelog
1419 # Make it easy to refer to self.changelog
1417 cl = self.changelog
1420 cl = self.changelog
1418 # msng is short for missing - compute the list of changesets in this
1421 # msng is short for missing - compute the list of changesets in this
1419 # changegroup.
1422 # changegroup.
1420 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1423 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1421 self.changegroupinfo(msng_cl_lst)
1424 self.changegroupinfo(msng_cl_lst)
1422 # Some bases may turn out to be superfluous, and some heads may be
1425 # Some bases may turn out to be superfluous, and some heads may be
1423 # too. nodesbetween will return the minimal set of bases and heads
1426 # too. nodesbetween will return the minimal set of bases and heads
1424 # necessary to re-create the changegroup.
1427 # necessary to re-create the changegroup.
1425
1428
1426 # Known heads are the list of heads that it is assumed the recipient
1429 # Known heads are the list of heads that it is assumed the recipient
1427 # of this changegroup will know about.
1430 # of this changegroup will know about.
1428 knownheads = {}
1431 knownheads = {}
1429 # We assume that all parents of bases are known heads.
1432 # We assume that all parents of bases are known heads.
1430 for n in bases:
1433 for n in bases:
1431 for p in cl.parents(n):
1434 for p in cl.parents(n):
1432 if p != nullid:
1435 if p != nullid:
1433 knownheads[p] = 1
1436 knownheads[p] = 1
1434 knownheads = knownheads.keys()
1437 knownheads = knownheads.keys()
1435 if knownheads:
1438 if knownheads:
1436 # Now that we know what heads are known, we can compute which
1439 # Now that we know what heads are known, we can compute which
1437 # changesets are known. The recipient must know about all
1440 # changesets are known. The recipient must know about all
1438 # changesets required to reach the known heads from the null
1441 # changesets required to reach the known heads from the null
1439 # changeset.
1442 # changeset.
1440 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1443 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1441 junk = None
1444 junk = None
1442 # Transform the list into an ersatz set.
1445 # Transform the list into an ersatz set.
1443 has_cl_set = dict.fromkeys(has_cl_set)
1446 has_cl_set = dict.fromkeys(has_cl_set)
1444 else:
1447 else:
1445 # If there were no known heads, the recipient cannot be assumed to
1448 # If there were no known heads, the recipient cannot be assumed to
1446 # know about any changesets.
1449 # know about any changesets.
1447 has_cl_set = {}
1450 has_cl_set = {}
1448
1451
1449 # Make it easy to refer to self.manifest
1452 # Make it easy to refer to self.manifest
1450 mnfst = self.manifest
1453 mnfst = self.manifest
1451 # We don't know which manifests are missing yet
1454 # We don't know which manifests are missing yet
1452 msng_mnfst_set = {}
1455 msng_mnfst_set = {}
1453 # Nor do we know which filenodes are missing.
1456 # Nor do we know which filenodes are missing.
1454 msng_filenode_set = {}
1457 msng_filenode_set = {}
1455
1458
1456 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1459 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1457 junk = None
1460 junk = None
1458
1461
1459 # A changeset always belongs to itself, so the changenode lookup
1462 # A changeset always belongs to itself, so the changenode lookup
1460 # function for a changenode is identity.
1463 # function for a changenode is identity.
1461 def identity(x):
1464 def identity(x):
1462 return x
1465 return x
1463
1466
1464 # A function generating function. Sets up an environment for the
1467 # A function generating function. Sets up an environment for the
1465 # inner function.
1468 # inner function.
1466 def cmp_by_rev_func(revlog):
1469 def cmp_by_rev_func(revlog):
1467 # Compare two nodes by their revision number in the environment's
1470 # Compare two nodes by their revision number in the environment's
1468 # revision history. Since the revision number both represents the
1471 # revision history. Since the revision number both represents the
1469 # most efficient order to read the nodes in, and represents a
1472 # most efficient order to read the nodes in, and represents a
1470 # topological sorting of the nodes, this function is often useful.
1473 # topological sorting of the nodes, this function is often useful.
1471 def cmp_by_rev(a, b):
1474 def cmp_by_rev(a, b):
1472 return cmp(revlog.rev(a), revlog.rev(b))
1475 return cmp(revlog.rev(a), revlog.rev(b))
1473 return cmp_by_rev
1476 return cmp_by_rev
1474
1477
1475 # If we determine that a particular file or manifest node must be a
1478 # If we determine that a particular file or manifest node must be a
1476 # node that the recipient of the changegroup will already have, we can
1479 # node that the recipient of the changegroup will already have, we can
1477 # also assume the recipient will have all the parents. This function
1480 # also assume the recipient will have all the parents. This function
1478 # prunes them from the set of missing nodes.
1481 # prunes them from the set of missing nodes.
1479 def prune_parents(revlog, hasset, msngset):
1482 def prune_parents(revlog, hasset, msngset):
1480 haslst = hasset.keys()
1483 haslst = hasset.keys()
1481 haslst.sort(cmp_by_rev_func(revlog))
1484 haslst.sort(cmp_by_rev_func(revlog))
1482 for node in haslst:
1485 for node in haslst:
1483 parentlst = [p for p in revlog.parents(node) if p != nullid]
1486 parentlst = [p for p in revlog.parents(node) if p != nullid]
1484 while parentlst:
1487 while parentlst:
1485 n = parentlst.pop()
1488 n = parentlst.pop()
1486 if n not in hasset:
1489 if n not in hasset:
1487 hasset[n] = 1
1490 hasset[n] = 1
1488 p = [p for p in revlog.parents(n) if p != nullid]
1491 p = [p for p in revlog.parents(n) if p != nullid]
1489 parentlst.extend(p)
1492 parentlst.extend(p)
1490 for n in hasset:
1493 for n in hasset:
1491 msngset.pop(n, None)
1494 msngset.pop(n, None)
1492
1495
1493 # This is a function generating function used to set up an environment
1496 # This is a function generating function used to set up an environment
1494 # for the inner function to execute in.
1497 # for the inner function to execute in.
1495 def manifest_and_file_collector(changedfileset):
1498 def manifest_and_file_collector(changedfileset):
1496 # This is an information gathering function that gathers
1499 # This is an information gathering function that gathers
1497 # information from each changeset node that goes out as part of
1500 # information from each changeset node that goes out as part of
1498 # the changegroup. The information gathered is a list of which
1501 # the changegroup. The information gathered is a list of which
1499 # manifest nodes are potentially required (the recipient may
1502 # manifest nodes are potentially required (the recipient may
1500 # already have them) and total list of all files which were
1503 # already have them) and total list of all files which were
1501 # changed in any changeset in the changegroup.
1504 # changed in any changeset in the changegroup.
1502 #
1505 #
1503 # We also remember the first changenode we saw any manifest
1506 # We also remember the first changenode we saw any manifest
1504 # referenced by so we can later determine which changenode 'owns'
1507 # referenced by so we can later determine which changenode 'owns'
1505 # the manifest.
1508 # the manifest.
1506 def collect_manifests_and_files(clnode):
1509 def collect_manifests_and_files(clnode):
1507 c = cl.read(clnode)
1510 c = cl.read(clnode)
1508 for f in c[3]:
1511 for f in c[3]:
1509 # This is to make sure we only have one instance of each
1512 # This is to make sure we only have one instance of each
1510 # filename string for each filename.
1513 # filename string for each filename.
1511 changedfileset.setdefault(f, f)
1514 changedfileset.setdefault(f, f)
1512 msng_mnfst_set.setdefault(c[0], clnode)
1515 msng_mnfst_set.setdefault(c[0], clnode)
1513 return collect_manifests_and_files
1516 return collect_manifests_and_files
1514
1517
1515 # Figure out which manifest nodes (of the ones we think might be part
1518 # Figure out which manifest nodes (of the ones we think might be part
1516 # of the changegroup) the recipient must know about and remove them
1519 # of the changegroup) the recipient must know about and remove them
1517 # from the changegroup.
1520 # from the changegroup.
1518 def prune_manifests():
1521 def prune_manifests():
1519 has_mnfst_set = {}
1522 has_mnfst_set = {}
1520 for n in msng_mnfst_set:
1523 for n in msng_mnfst_set:
1521 # If a 'missing' manifest thinks it belongs to a changenode
1524 # If a 'missing' manifest thinks it belongs to a changenode
1522 # the recipient is assumed to have, obviously the recipient
1525 # the recipient is assumed to have, obviously the recipient
1523 # must have that manifest.
1526 # must have that manifest.
1524 linknode = cl.node(mnfst.linkrev(n))
1527 linknode = cl.node(mnfst.linkrev(n))
1525 if linknode in has_cl_set:
1528 if linknode in has_cl_set:
1526 has_mnfst_set[n] = 1
1529 has_mnfst_set[n] = 1
1527 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1530 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1528
1531
1529 # Use the information collected in collect_manifests_and_files to say
1532 # Use the information collected in collect_manifests_and_files to say
1530 # which changenode any manifestnode belongs to.
1533 # which changenode any manifestnode belongs to.
1531 def lookup_manifest_link(mnfstnode):
1534 def lookup_manifest_link(mnfstnode):
1532 return msng_mnfst_set[mnfstnode]
1535 return msng_mnfst_set[mnfstnode]
1533
1536
1534 # A function generating function that sets up the initial environment
1537 # A function generating function that sets up the initial environment
1535 # the inner function.
1538 # the inner function.
1536 def filenode_collector(changedfiles):
1539 def filenode_collector(changedfiles):
1537 next_rev = [0]
1540 next_rev = [0]
1538 # This gathers information from each manifestnode included in the
1541 # This gathers information from each manifestnode included in the
1539 # changegroup about which filenodes the manifest node references
1542 # changegroup about which filenodes the manifest node references
1540 # so we can include those in the changegroup too.
1543 # so we can include those in the changegroup too.
1541 #
1544 #
1542 # It also remembers which changenode each filenode belongs to. It
1545 # It also remembers which changenode each filenode belongs to. It
1543 # does this by assuming the a filenode belongs to the changenode
1546 # does this by assuming the a filenode belongs to the changenode
1544 # the first manifest that references it belongs to.
1547 # the first manifest that references it belongs to.
1545 def collect_msng_filenodes(mnfstnode):
1548 def collect_msng_filenodes(mnfstnode):
1546 r = mnfst.rev(mnfstnode)
1549 r = mnfst.rev(mnfstnode)
1547 if r == next_rev[0]:
1550 if r == next_rev[0]:
1548 # If the last rev we looked at was the one just previous,
1551 # If the last rev we looked at was the one just previous,
1549 # we only need to see a diff.
1552 # we only need to see a diff.
1550 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1553 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1551 # For each line in the delta
1554 # For each line in the delta
1552 for dline in delta.splitlines():
1555 for dline in delta.splitlines():
1553 # get the filename and filenode for that line
1556 # get the filename and filenode for that line
1554 f, fnode = dline.split('\0')
1557 f, fnode = dline.split('\0')
1555 fnode = bin(fnode[:40])
1558 fnode = bin(fnode[:40])
1556 f = changedfiles.get(f, None)
1559 f = changedfiles.get(f, None)
1557 # And if the file is in the list of files we care
1560 # And if the file is in the list of files we care
1558 # about.
1561 # about.
1559 if f is not None:
1562 if f is not None:
1560 # Get the changenode this manifest belongs to
1563 # Get the changenode this manifest belongs to
1561 clnode = msng_mnfst_set[mnfstnode]
1564 clnode = msng_mnfst_set[mnfstnode]
1562 # Create the set of filenodes for the file if
1565 # Create the set of filenodes for the file if
1563 # there isn't one already.
1566 # there isn't one already.
1564 ndset = msng_filenode_set.setdefault(f, {})
1567 ndset = msng_filenode_set.setdefault(f, {})
1565 # And set the filenode's changelog node to the
1568 # And set the filenode's changelog node to the
1566 # manifest's if it hasn't been set already.
1569 # manifest's if it hasn't been set already.
1567 ndset.setdefault(fnode, clnode)
1570 ndset.setdefault(fnode, clnode)
1568 else:
1571 else:
1569 # Otherwise we need a full manifest.
1572 # Otherwise we need a full manifest.
1570 m = mnfst.read(mnfstnode)
1573 m = mnfst.read(mnfstnode)
1571 # For every file in we care about.
1574 # For every file in we care about.
1572 for f in changedfiles:
1575 for f in changedfiles:
1573 fnode = m.get(f, None)
1576 fnode = m.get(f, None)
1574 # If it's in the manifest
1577 # If it's in the manifest
1575 if fnode is not None:
1578 if fnode is not None:
1576 # See comments above.
1579 # See comments above.
1577 clnode = msng_mnfst_set[mnfstnode]
1580 clnode = msng_mnfst_set[mnfstnode]
1578 ndset = msng_filenode_set.setdefault(f, {})
1581 ndset = msng_filenode_set.setdefault(f, {})
1579 ndset.setdefault(fnode, clnode)
1582 ndset.setdefault(fnode, clnode)
1580 # Remember the revision we hope to see next.
1583 # Remember the revision we hope to see next.
1581 next_rev[0] = r + 1
1584 next_rev[0] = r + 1
1582 return collect_msng_filenodes
1585 return collect_msng_filenodes
1583
1586
1584 # We have a list of filenodes we think we need for a file, lets remove
1587 # We have a list of filenodes we think we need for a file, lets remove
1585 # all those we now the recipient must have.
1588 # all those we now the recipient must have.
1586 def prune_filenodes(f, filerevlog):
1589 def prune_filenodes(f, filerevlog):
1587 msngset = msng_filenode_set[f]
1590 msngset = msng_filenode_set[f]
1588 hasset = {}
1591 hasset = {}
1589 # If a 'missing' filenode thinks it belongs to a changenode we
1592 # If a 'missing' filenode thinks it belongs to a changenode we
1590 # assume the recipient must have, then the recipient must have
1593 # assume the recipient must have, then the recipient must have
1591 # that filenode.
1594 # that filenode.
1592 for n in msngset:
1595 for n in msngset:
1593 clnode = cl.node(filerevlog.linkrev(n))
1596 clnode = cl.node(filerevlog.linkrev(n))
1594 if clnode in has_cl_set:
1597 if clnode in has_cl_set:
1595 hasset[n] = 1
1598 hasset[n] = 1
1596 prune_parents(filerevlog, hasset, msngset)
1599 prune_parents(filerevlog, hasset, msngset)
1597
1600
1598 # A function generator function that sets up the a context for the
1601 # A function generator function that sets up the a context for the
1599 # inner function.
1602 # inner function.
1600 def lookup_filenode_link_func(fname):
1603 def lookup_filenode_link_func(fname):
1601 msngset = msng_filenode_set[fname]
1604 msngset = msng_filenode_set[fname]
1602 # Lookup the changenode the filenode belongs to.
1605 # Lookup the changenode the filenode belongs to.
1603 def lookup_filenode_link(fnode):
1606 def lookup_filenode_link(fnode):
1604 return msngset[fnode]
1607 return msngset[fnode]
1605 return lookup_filenode_link
1608 return lookup_filenode_link
1606
1609
1607 # Now that we have all theses utility functions to help out and
1610 # Now that we have all theses utility functions to help out and
1608 # logically divide up the task, generate the group.
1611 # logically divide up the task, generate the group.
1609 def gengroup():
1612 def gengroup():
1610 # The set of changed files starts empty.
1613 # The set of changed files starts empty.
1611 changedfiles = {}
1614 changedfiles = {}
1612 # Create a changenode group generator that will call our functions
1615 # Create a changenode group generator that will call our functions
1613 # back to lookup the owning changenode and collect information.
1616 # back to lookup the owning changenode and collect information.
1614 group = cl.group(msng_cl_lst, identity,
1617 group = cl.group(msng_cl_lst, identity,
1615 manifest_and_file_collector(changedfiles))
1618 manifest_and_file_collector(changedfiles))
1616 for chnk in group:
1619 for chnk in group:
1617 yield chnk
1620 yield chnk
1618
1621
1619 # The list of manifests has been collected by the generator
1622 # The list of manifests has been collected by the generator
1620 # calling our functions back.
1623 # calling our functions back.
1621 prune_manifests()
1624 prune_manifests()
1622 msng_mnfst_lst = msng_mnfst_set.keys()
1625 msng_mnfst_lst = msng_mnfst_set.keys()
1623 # Sort the manifestnodes by revision number.
1626 # Sort the manifestnodes by revision number.
1624 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1627 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1625 # Create a generator for the manifestnodes that calls our lookup
1628 # Create a generator for the manifestnodes that calls our lookup
1626 # and data collection functions back.
1629 # and data collection functions back.
1627 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1630 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1628 filenode_collector(changedfiles))
1631 filenode_collector(changedfiles))
1629 for chnk in group:
1632 for chnk in group:
1630 yield chnk
1633 yield chnk
1631
1634
1632 # These are no longer needed, dereference and toss the memory for
1635 # These are no longer needed, dereference and toss the memory for
1633 # them.
1636 # them.
1634 msng_mnfst_lst = None
1637 msng_mnfst_lst = None
1635 msng_mnfst_set.clear()
1638 msng_mnfst_set.clear()
1636
1639
1637 changedfiles = changedfiles.keys()
1640 changedfiles = changedfiles.keys()
1638 changedfiles.sort()
1641 changedfiles.sort()
1639 # Go through all our files in order sorted by name.
1642 # Go through all our files in order sorted by name.
1640 for fname in changedfiles:
1643 for fname in changedfiles:
1641 filerevlog = self.file(fname)
1644 filerevlog = self.file(fname)
1642 # Toss out the filenodes that the recipient isn't really
1645 # Toss out the filenodes that the recipient isn't really
1643 # missing.
1646 # missing.
1644 if msng_filenode_set.has_key(fname):
1647 if msng_filenode_set.has_key(fname):
1645 prune_filenodes(fname, filerevlog)
1648 prune_filenodes(fname, filerevlog)
1646 msng_filenode_lst = msng_filenode_set[fname].keys()
1649 msng_filenode_lst = msng_filenode_set[fname].keys()
1647 else:
1650 else:
1648 msng_filenode_lst = []
1651 msng_filenode_lst = []
1649 # If any filenodes are left, generate the group for them,
1652 # If any filenodes are left, generate the group for them,
1650 # otherwise don't bother.
1653 # otherwise don't bother.
1651 if len(msng_filenode_lst) > 0:
1654 if len(msng_filenode_lst) > 0:
1652 yield changegroup.genchunk(fname)
1655 yield changegroup.genchunk(fname)
1653 # Sort the filenodes by their revision #
1656 # Sort the filenodes by their revision #
1654 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1657 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1655 # Create a group generator and only pass in a changenode
1658 # Create a group generator and only pass in a changenode
1656 # lookup function as we need to collect no information
1659 # lookup function as we need to collect no information
1657 # from filenodes.
1660 # from filenodes.
1658 group = filerevlog.group(msng_filenode_lst,
1661 group = filerevlog.group(msng_filenode_lst,
1659 lookup_filenode_link_func(fname))
1662 lookup_filenode_link_func(fname))
1660 for chnk in group:
1663 for chnk in group:
1661 yield chnk
1664 yield chnk
1662 if msng_filenode_set.has_key(fname):
1665 if msng_filenode_set.has_key(fname):
1663 # Don't need this anymore, toss it to free memory.
1666 # Don't need this anymore, toss it to free memory.
1664 del msng_filenode_set[fname]
1667 del msng_filenode_set[fname]
1665 # Signal that no more groups are left.
1668 # Signal that no more groups are left.
1666 yield changegroup.closechunk()
1669 yield changegroup.closechunk()
1667
1670
1668 if msng_cl_lst:
1671 if msng_cl_lst:
1669 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1672 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1670
1673
1671 return util.chunkbuffer(gengroup())
1674 return util.chunkbuffer(gengroup())
1672
1675
1673 def changegroup(self, basenodes, source):
1676 def changegroup(self, basenodes, source):
1674 """Generate a changegroup of all nodes that we have that a recipient
1677 """Generate a changegroup of all nodes that we have that a recipient
1675 doesn't.
1678 doesn't.
1676
1679
1677 This is much easier than the previous function as we can assume that
1680 This is much easier than the previous function as we can assume that
1678 the recipient has any changenode we aren't sending them."""
1681 the recipient has any changenode we aren't sending them."""
1679
1682
1680 self.hook('preoutgoing', throw=True, source=source)
1683 self.hook('preoutgoing', throw=True, source=source)
1681
1684
1682 cl = self.changelog
1685 cl = self.changelog
1683 nodes = cl.nodesbetween(basenodes, None)[0]
1686 nodes = cl.nodesbetween(basenodes, None)[0]
1684 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1687 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1685 self.changegroupinfo(nodes)
1688 self.changegroupinfo(nodes)
1686
1689
1687 def identity(x):
1690 def identity(x):
1688 return x
1691 return x
1689
1692
1690 def gennodelst(revlog):
1693 def gennodelst(revlog):
1691 for r in xrange(0, revlog.count()):
1694 for r in xrange(0, revlog.count()):
1692 n = revlog.node(r)
1695 n = revlog.node(r)
1693 if revlog.linkrev(n) in revset:
1696 if revlog.linkrev(n) in revset:
1694 yield n
1697 yield n
1695
1698
1696 def changed_file_collector(changedfileset):
1699 def changed_file_collector(changedfileset):
1697 def collect_changed_files(clnode):
1700 def collect_changed_files(clnode):
1698 c = cl.read(clnode)
1701 c = cl.read(clnode)
1699 for fname in c[3]:
1702 for fname in c[3]:
1700 changedfileset[fname] = 1
1703 changedfileset[fname] = 1
1701 return collect_changed_files
1704 return collect_changed_files
1702
1705
1703 def lookuprevlink_func(revlog):
1706 def lookuprevlink_func(revlog):
1704 def lookuprevlink(n):
1707 def lookuprevlink(n):
1705 return cl.node(revlog.linkrev(n))
1708 return cl.node(revlog.linkrev(n))
1706 return lookuprevlink
1709 return lookuprevlink
1707
1710
1708 def gengroup():
1711 def gengroup():
1709 # construct a list of all changed files
1712 # construct a list of all changed files
1710 changedfiles = {}
1713 changedfiles = {}
1711
1714
1712 for chnk in cl.group(nodes, identity,
1715 for chnk in cl.group(nodes, identity,
1713 changed_file_collector(changedfiles)):
1716 changed_file_collector(changedfiles)):
1714 yield chnk
1717 yield chnk
1715 changedfiles = changedfiles.keys()
1718 changedfiles = changedfiles.keys()
1716 changedfiles.sort()
1719 changedfiles.sort()
1717
1720
1718 mnfst = self.manifest
1721 mnfst = self.manifest
1719 nodeiter = gennodelst(mnfst)
1722 nodeiter = gennodelst(mnfst)
1720 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1723 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1721 yield chnk
1724 yield chnk
1722
1725
1723 for fname in changedfiles:
1726 for fname in changedfiles:
1724 filerevlog = self.file(fname)
1727 filerevlog = self.file(fname)
1725 nodeiter = gennodelst(filerevlog)
1728 nodeiter = gennodelst(filerevlog)
1726 nodeiter = list(nodeiter)
1729 nodeiter = list(nodeiter)
1727 if nodeiter:
1730 if nodeiter:
1728 yield changegroup.genchunk(fname)
1731 yield changegroup.genchunk(fname)
1729 lookup = lookuprevlink_func(filerevlog)
1732 lookup = lookuprevlink_func(filerevlog)
1730 for chnk in filerevlog.group(nodeiter, lookup):
1733 for chnk in filerevlog.group(nodeiter, lookup):
1731 yield chnk
1734 yield chnk
1732
1735
1733 yield changegroup.closechunk()
1736 yield changegroup.closechunk()
1734
1737
1735 if nodes:
1738 if nodes:
1736 self.hook('outgoing', node=hex(nodes[0]), source=source)
1739 self.hook('outgoing', node=hex(nodes[0]), source=source)
1737
1740
1738 return util.chunkbuffer(gengroup())
1741 return util.chunkbuffer(gengroup())
1739
1742
1740 def addchangegroup(self, source, srctype, url):
1743 def addchangegroup(self, source, srctype, url):
1741 """add changegroup to repo.
1744 """add changegroup to repo.
1742
1745
1743 return values:
1746 return values:
1744 - nothing changed or no source: 0
1747 - nothing changed or no source: 0
1745 - more heads than before: 1+added heads (2..n)
1748 - more heads than before: 1+added heads (2..n)
1746 - less heads than before: -1-removed heads (-2..-n)
1749 - less heads than before: -1-removed heads (-2..-n)
1747 - number of heads stays the same: 1
1750 - number of heads stays the same: 1
1748 """
1751 """
1749 def csmap(x):
1752 def csmap(x):
1750 self.ui.debug(_("add changeset %s\n") % short(x))
1753 self.ui.debug(_("add changeset %s\n") % short(x))
1751 return cl.count()
1754 return cl.count()
1752
1755
1753 def revmap(x):
1756 def revmap(x):
1754 return cl.rev(x)
1757 return cl.rev(x)
1755
1758
1756 if not source:
1759 if not source:
1757 return 0
1760 return 0
1758
1761
1759 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1762 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1760
1763
1761 changesets = files = revisions = 0
1764 changesets = files = revisions = 0
1762
1765
1763 tr = self.transaction()
1766 tr = self.transaction()
1764
1767
1765 # write changelog data to temp files so concurrent readers will not see
1768 # write changelog data to temp files so concurrent readers will not see
1766 # inconsistent view
1769 # inconsistent view
1767 cl = None
1770 cl = None
1768 try:
1771 try:
1769 cl = appendfile.appendchangelog(self.sopener,
1772 cl = appendfile.appendchangelog(self.sopener,
1770 self.changelog.version)
1773 self.changelog.version)
1771
1774
1772 oldheads = len(cl.heads())
1775 oldheads = len(cl.heads())
1773
1776
1774 # pull off the changeset group
1777 # pull off the changeset group
1775 self.ui.status(_("adding changesets\n"))
1778 self.ui.status(_("adding changesets\n"))
1776 cor = cl.count() - 1
1779 cor = cl.count() - 1
1777 chunkiter = changegroup.chunkiter(source)
1780 chunkiter = changegroup.chunkiter(source)
1778 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1781 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1779 raise util.Abort(_("received changelog group is empty"))
1782 raise util.Abort(_("received changelog group is empty"))
1780 cnr = cl.count() - 1
1783 cnr = cl.count() - 1
1781 changesets = cnr - cor
1784 changesets = cnr - cor
1782
1785
1783 # pull off the manifest group
1786 # pull off the manifest group
1784 self.ui.status(_("adding manifests\n"))
1787 self.ui.status(_("adding manifests\n"))
1785 chunkiter = changegroup.chunkiter(source)
1788 chunkiter = changegroup.chunkiter(source)
1786 # no need to check for empty manifest group here:
1789 # no need to check for empty manifest group here:
1787 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1790 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1788 # no new manifest will be created and the manifest group will
1791 # no new manifest will be created and the manifest group will
1789 # be empty during the pull
1792 # be empty during the pull
1790 self.manifest.addgroup(chunkiter, revmap, tr)
1793 self.manifest.addgroup(chunkiter, revmap, tr)
1791
1794
1792 # process the files
1795 # process the files
1793 self.ui.status(_("adding file changes\n"))
1796 self.ui.status(_("adding file changes\n"))
1794 while 1:
1797 while 1:
1795 f = changegroup.getchunk(source)
1798 f = changegroup.getchunk(source)
1796 if not f:
1799 if not f:
1797 break
1800 break
1798 self.ui.debug(_("adding %s revisions\n") % f)
1801 self.ui.debug(_("adding %s revisions\n") % f)
1799 fl = self.file(f)
1802 fl = self.file(f)
1800 o = fl.count()
1803 o = fl.count()
1801 chunkiter = changegroup.chunkiter(source)
1804 chunkiter = changegroup.chunkiter(source)
1802 if fl.addgroup(chunkiter, revmap, tr) is None:
1805 if fl.addgroup(chunkiter, revmap, tr) is None:
1803 raise util.Abort(_("received file revlog group is empty"))
1806 raise util.Abort(_("received file revlog group is empty"))
1804 revisions += fl.count() - o
1807 revisions += fl.count() - o
1805 files += 1
1808 files += 1
1806
1809
1807 cl.writedata()
1810 cl.writedata()
1808 finally:
1811 finally:
1809 if cl:
1812 if cl:
1810 cl.cleanup()
1813 cl.cleanup()
1811
1814
1812 # make changelog see real files again
1815 # make changelog see real files again
1813 self.changelog = changelog.changelog(self.sopener,
1816 self.changelog = changelog.changelog(self.sopener,
1814 self.changelog.version)
1817 self.changelog.version)
1815 self.changelog.checkinlinesize(tr)
1818 self.changelog.checkinlinesize(tr)
1816
1819
1817 newheads = len(self.changelog.heads())
1820 newheads = len(self.changelog.heads())
1818 heads = ""
1821 heads = ""
1819 if oldheads and newheads != oldheads:
1822 if oldheads and newheads != oldheads:
1820 heads = _(" (%+d heads)") % (newheads - oldheads)
1823 heads = _(" (%+d heads)") % (newheads - oldheads)
1821
1824
1822 self.ui.status(_("added %d changesets"
1825 self.ui.status(_("added %d changesets"
1823 " with %d changes to %d files%s\n")
1826 " with %d changes to %d files%s\n")
1824 % (changesets, revisions, files, heads))
1827 % (changesets, revisions, files, heads))
1825
1828
1826 if changesets > 0:
1829 if changesets > 0:
1827 self.hook('pretxnchangegroup', throw=True,
1830 self.hook('pretxnchangegroup', throw=True,
1828 node=hex(self.changelog.node(cor+1)), source=srctype,
1831 node=hex(self.changelog.node(cor+1)), source=srctype,
1829 url=url)
1832 url=url)
1830
1833
1831 tr.close()
1834 tr.close()
1832
1835
1833 if changesets > 0:
1836 if changesets > 0:
1834 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1837 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1835 source=srctype, url=url)
1838 source=srctype, url=url)
1836
1839
1837 for i in xrange(cor + 1, cnr + 1):
1840 for i in xrange(cor + 1, cnr + 1):
1838 self.hook("incoming", node=hex(self.changelog.node(i)),
1841 self.hook("incoming", node=hex(self.changelog.node(i)),
1839 source=srctype, url=url)
1842 source=srctype, url=url)
1840
1843
1841 # never return 0 here:
1844 # never return 0 here:
1842 if newheads < oldheads:
1845 if newheads < oldheads:
1843 return newheads - oldheads - 1
1846 return newheads - oldheads - 1
1844 else:
1847 else:
1845 return newheads - oldheads + 1
1848 return newheads - oldheads + 1
1846
1849
1847
1850
1848 def stream_in(self, remote):
1851 def stream_in(self, remote):
1849 fp = remote.stream_out()
1852 fp = remote.stream_out()
1850 l = fp.readline()
1853 l = fp.readline()
1851 try:
1854 try:
1852 resp = int(l)
1855 resp = int(l)
1853 except ValueError:
1856 except ValueError:
1854 raise util.UnexpectedOutput(
1857 raise util.UnexpectedOutput(
1855 _('Unexpected response from remote server:'), l)
1858 _('Unexpected response from remote server:'), l)
1856 if resp == 1:
1859 if resp == 1:
1857 raise util.Abort(_('operation forbidden by server'))
1860 raise util.Abort(_('operation forbidden by server'))
1858 elif resp == 2:
1861 elif resp == 2:
1859 raise util.Abort(_('locking the remote repository failed'))
1862 raise util.Abort(_('locking the remote repository failed'))
1860 elif resp != 0:
1863 elif resp != 0:
1861 raise util.Abort(_('the server sent an unknown error code'))
1864 raise util.Abort(_('the server sent an unknown error code'))
1862 self.ui.status(_('streaming all changes\n'))
1865 self.ui.status(_('streaming all changes\n'))
1863 l = fp.readline()
1866 l = fp.readline()
1864 try:
1867 try:
1865 total_files, total_bytes = map(int, l.split(' ', 1))
1868 total_files, total_bytes = map(int, l.split(' ', 1))
1866 except ValueError, TypeError:
1869 except ValueError, TypeError:
1867 raise util.UnexpectedOutput(
1870 raise util.UnexpectedOutput(
1868 _('Unexpected response from remote server:'), l)
1871 _('Unexpected response from remote server:'), l)
1869 self.ui.status(_('%d files to transfer, %s of data\n') %
1872 self.ui.status(_('%d files to transfer, %s of data\n') %
1870 (total_files, util.bytecount(total_bytes)))
1873 (total_files, util.bytecount(total_bytes)))
1871 start = time.time()
1874 start = time.time()
1872 for i in xrange(total_files):
1875 for i in xrange(total_files):
1873 # XXX doesn't support '\n' or '\r' in filenames
1876 # XXX doesn't support '\n' or '\r' in filenames
1874 l = fp.readline()
1877 l = fp.readline()
1875 try:
1878 try:
1876 name, size = l.split('\0', 1)
1879 name, size = l.split('\0', 1)
1877 size = int(size)
1880 size = int(size)
1878 except ValueError, TypeError:
1881 except ValueError, TypeError:
1879 raise util.UnexpectedOutput(
1882 raise util.UnexpectedOutput(
1880 _('Unexpected response from remote server:'), l)
1883 _('Unexpected response from remote server:'), l)
1881 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1884 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1882 ofp = self.sopener(name, 'w')
1885 ofp = self.sopener(name, 'w')
1883 for chunk in util.filechunkiter(fp, limit=size):
1886 for chunk in util.filechunkiter(fp, limit=size):
1884 ofp.write(chunk)
1887 ofp.write(chunk)
1885 ofp.close()
1888 ofp.close()
1886 elapsed = time.time() - start
1889 elapsed = time.time() - start
1887 if elapsed <= 0:
1890 if elapsed <= 0:
1888 elapsed = 0.001
1891 elapsed = 0.001
1889 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1892 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1890 (util.bytecount(total_bytes), elapsed,
1893 (util.bytecount(total_bytes), elapsed,
1891 util.bytecount(total_bytes / elapsed)))
1894 util.bytecount(total_bytes / elapsed)))
1892 self.reload()
1895 self.reload()
1893 return len(self.heads()) + 1
1896 return len(self.heads()) + 1
1894
1897
1895 def clone(self, remote, heads=[], stream=False):
1898 def clone(self, remote, heads=[], stream=False):
1896 '''clone remote repository.
1899 '''clone remote repository.
1897
1900
1898 keyword arguments:
1901 keyword arguments:
1899 heads: list of revs to clone (forces use of pull)
1902 heads: list of revs to clone (forces use of pull)
1900 stream: use streaming clone if possible'''
1903 stream: use streaming clone if possible'''
1901
1904
1902 # now, all clients that can request uncompressed clones can
1905 # now, all clients that can request uncompressed clones can
1903 # read repo formats supported by all servers that can serve
1906 # read repo formats supported by all servers that can serve
1904 # them.
1907 # them.
1905
1908
1906 # if revlog format changes, client will have to check version
1909 # if revlog format changes, client will have to check version
1907 # and format flags on "stream" capability, and use
1910 # and format flags on "stream" capability, and use
1908 # uncompressed only if compatible.
1911 # uncompressed only if compatible.
1909
1912
1910 if stream and not heads and remote.capable('stream'):
1913 if stream and not heads and remote.capable('stream'):
1911 return self.stream_in(remote)
1914 return self.stream_in(remote)
1912 return self.pull(remote, heads)
1915 return self.pull(remote, heads)
1913
1916
1914 # used to avoid circular references so destructors work
1917 # used to avoid circular references so destructors work
1915 def aftertrans(files):
1918 def aftertrans(files):
1916 renamefiles = [tuple(t) for t in files]
1919 renamefiles = [tuple(t) for t in files]
1917 def a():
1920 def a():
1918 for src, dest in renamefiles:
1921 for src, dest in renamefiles:
1919 util.rename(src, dest)
1922 util.rename(src, dest)
1920 return a
1923 return a
1921
1924
1922 def instance(ui, path, create):
1925 def instance(ui, path, create):
1923 return localrepository(ui, util.drop_scheme('file', path), create)
1926 return localrepository(ui, util.drop_scheme('file', path), create)
1924
1927
1925 def islocal(path):
1928 def islocal(path):
1926 return True
1929 return True
General Comments 0
You need to be logged in to leave comments. Login now