##// END OF EJS Templates
pass the extra dict in rawcommit
Edouard Gomez -
r3947:79cf0977 default
parent child Browse files
Show More
@@ -1,1863 +1,1863 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import _
9 from i18n import _
10 import repo, appendfile, changegroup
10 import repo, appendfile, changegroup
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
13 import os, revlog, time, util
13 import os, revlog, time, util
14
14
15 class localrepository(repo.repository):
15 class localrepository(repo.repository):
16 capabilities = ('lookup', 'changegroupsubset')
16 capabilities = ('lookup', 'changegroupsubset')
17 supported = ('revlogv1', 'store')
17 supported = ('revlogv1', 'store')
18
18
19 def __del__(self):
19 def __del__(self):
20 self.transhandle = None
20 self.transhandle = None
21 def __init__(self, parentui, path=None, create=0):
21 def __init__(self, parentui, path=None, create=0):
22 repo.repository.__init__(self)
22 repo.repository.__init__(self)
23 if not path:
23 if not path:
24 p = os.getcwd()
24 p = os.getcwd()
25 while not os.path.isdir(os.path.join(p, ".hg")):
25 while not os.path.isdir(os.path.join(p, ".hg")):
26 oldp = p
26 oldp = p
27 p = os.path.dirname(p)
27 p = os.path.dirname(p)
28 if p == oldp:
28 if p == oldp:
29 raise repo.RepoError(_("There is no Mercurial repository"
29 raise repo.RepoError(_("There is no Mercurial repository"
30 " here (.hg not found)"))
30 " here (.hg not found)"))
31 path = p
31 path = p
32
32
33 self.path = os.path.join(path, ".hg")
33 self.path = os.path.join(path, ".hg")
34 self.root = os.path.realpath(path)
34 self.root = os.path.realpath(path)
35 self.origroot = path
35 self.origroot = path
36 self.opener = util.opener(self.path)
36 self.opener = util.opener(self.path)
37 self.wopener = util.opener(self.root)
37 self.wopener = util.opener(self.root)
38
38
39 if not os.path.isdir(self.path):
39 if not os.path.isdir(self.path):
40 if create:
40 if create:
41 if not os.path.exists(path):
41 if not os.path.exists(path):
42 os.mkdir(path)
42 os.mkdir(path)
43 os.mkdir(self.path)
43 os.mkdir(self.path)
44 os.mkdir(os.path.join(self.path, "store"))
44 os.mkdir(os.path.join(self.path, "store"))
45 requirements = ("revlogv1", "store")
45 requirements = ("revlogv1", "store")
46 reqfile = self.opener("requires", "w")
46 reqfile = self.opener("requires", "w")
47 for r in requirements:
47 for r in requirements:
48 reqfile.write("%s\n" % r)
48 reqfile.write("%s\n" % r)
49 reqfile.close()
49 reqfile.close()
50 # create an invalid changelog
50 # create an invalid changelog
51 self.opener("00changelog.i", "a").write(
51 self.opener("00changelog.i", "a").write(
52 '\0\0\0\2' # represents revlogv2
52 '\0\0\0\2' # represents revlogv2
53 ' dummy changelog to prevent using the old repo layout'
53 ' dummy changelog to prevent using the old repo layout'
54 )
54 )
55 else:
55 else:
56 raise repo.RepoError(_("repository %s not found") % path)
56 raise repo.RepoError(_("repository %s not found") % path)
57 elif create:
57 elif create:
58 raise repo.RepoError(_("repository %s already exists") % path)
58 raise repo.RepoError(_("repository %s already exists") % path)
59 else:
59 else:
60 # find requirements
60 # find requirements
61 try:
61 try:
62 requirements = self.opener("requires").read().splitlines()
62 requirements = self.opener("requires").read().splitlines()
63 except IOError, inst:
63 except IOError, inst:
64 if inst.errno != errno.ENOENT:
64 if inst.errno != errno.ENOENT:
65 raise
65 raise
66 requirements = []
66 requirements = []
67 # check them
67 # check them
68 for r in requirements:
68 for r in requirements:
69 if r not in self.supported:
69 if r not in self.supported:
70 raise repo.RepoError(_("requirement '%s' not supported") % r)
70 raise repo.RepoError(_("requirement '%s' not supported") % r)
71
71
72 # setup store
72 # setup store
73 if "store" in requirements:
73 if "store" in requirements:
74 self.encodefn = util.encodefilename
74 self.encodefn = util.encodefilename
75 self.decodefn = util.decodefilename
75 self.decodefn = util.decodefilename
76 self.spath = os.path.join(self.path, "store")
76 self.spath = os.path.join(self.path, "store")
77 else:
77 else:
78 self.encodefn = lambda x: x
78 self.encodefn = lambda x: x
79 self.decodefn = lambda x: x
79 self.decodefn = lambda x: x
80 self.spath = self.path
80 self.spath = self.path
81 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
81 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
82
82
83 self.ui = ui.ui(parentui=parentui)
83 self.ui = ui.ui(parentui=parentui)
84 try:
84 try:
85 self.ui.readconfig(self.join("hgrc"), self.root)
85 self.ui.readconfig(self.join("hgrc"), self.root)
86 except IOError:
86 except IOError:
87 pass
87 pass
88
88
89 v = self.ui.configrevlog()
89 v = self.ui.configrevlog()
90 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
90 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
91 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
91 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
92 fl = v.get('flags', None)
92 fl = v.get('flags', None)
93 flags = 0
93 flags = 0
94 if fl != None:
94 if fl != None:
95 for x in fl.split():
95 for x in fl.split():
96 flags |= revlog.flagstr(x)
96 flags |= revlog.flagstr(x)
97 elif self.revlogv1:
97 elif self.revlogv1:
98 flags = revlog.REVLOG_DEFAULT_FLAGS
98 flags = revlog.REVLOG_DEFAULT_FLAGS
99
99
100 v = self.revlogversion | flags
100 v = self.revlogversion | flags
101 self.manifest = manifest.manifest(self.sopener, v)
101 self.manifest = manifest.manifest(self.sopener, v)
102 self.changelog = changelog.changelog(self.sopener, v)
102 self.changelog = changelog.changelog(self.sopener, v)
103
103
104 fallback = self.ui.config('ui', 'fallbackencoding')
104 fallback = self.ui.config('ui', 'fallbackencoding')
105 if fallback:
105 if fallback:
106 util._fallbackencoding = fallback
106 util._fallbackencoding = fallback
107
107
108 # the changelog might not have the inline index flag
108 # the changelog might not have the inline index flag
109 # on. If the format of the changelog is the same as found in
109 # on. If the format of the changelog is the same as found in
110 # .hgrc, apply any flags found in the .hgrc as well.
110 # .hgrc, apply any flags found in the .hgrc as well.
111 # Otherwise, just version from the changelog
111 # Otherwise, just version from the changelog
112 v = self.changelog.version
112 v = self.changelog.version
113 if v == self.revlogversion:
113 if v == self.revlogversion:
114 v |= flags
114 v |= flags
115 self.revlogversion = v
115 self.revlogversion = v
116
116
117 self.tagscache = None
117 self.tagscache = None
118 self.branchcache = None
118 self.branchcache = None
119 self.nodetagscache = None
119 self.nodetagscache = None
120 self.encodepats = None
120 self.encodepats = None
121 self.decodepats = None
121 self.decodepats = None
122 self.transhandle = None
122 self.transhandle = None
123
123
124 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
124 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
125
125
126 def url(self):
126 def url(self):
127 return 'file:' + self.root
127 return 'file:' + self.root
128
128
129 def hook(self, name, throw=False, **args):
129 def hook(self, name, throw=False, **args):
130 def callhook(hname, funcname):
130 def callhook(hname, funcname):
131 '''call python hook. hook is callable object, looked up as
131 '''call python hook. hook is callable object, looked up as
132 name in python module. if callable returns "true", hook
132 name in python module. if callable returns "true", hook
133 fails, else passes. if hook raises exception, treated as
133 fails, else passes. if hook raises exception, treated as
134 hook failure. exception propagates if throw is "true".
134 hook failure. exception propagates if throw is "true".
135
135
136 reason for "true" meaning "hook failed" is so that
136 reason for "true" meaning "hook failed" is so that
137 unmodified commands (e.g. mercurial.commands.update) can
137 unmodified commands (e.g. mercurial.commands.update) can
138 be run as hooks without wrappers to convert return values.'''
138 be run as hooks without wrappers to convert return values.'''
139
139
140 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
140 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
141 d = funcname.rfind('.')
141 d = funcname.rfind('.')
142 if d == -1:
142 if d == -1:
143 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
143 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
144 % (hname, funcname))
144 % (hname, funcname))
145 modname = funcname[:d]
145 modname = funcname[:d]
146 try:
146 try:
147 obj = __import__(modname)
147 obj = __import__(modname)
148 except ImportError:
148 except ImportError:
149 try:
149 try:
150 # extensions are loaded with hgext_ prefix
150 # extensions are loaded with hgext_ prefix
151 obj = __import__("hgext_%s" % modname)
151 obj = __import__("hgext_%s" % modname)
152 except ImportError:
152 except ImportError:
153 raise util.Abort(_('%s hook is invalid '
153 raise util.Abort(_('%s hook is invalid '
154 '(import of "%s" failed)') %
154 '(import of "%s" failed)') %
155 (hname, modname))
155 (hname, modname))
156 try:
156 try:
157 for p in funcname.split('.')[1:]:
157 for p in funcname.split('.')[1:]:
158 obj = getattr(obj, p)
158 obj = getattr(obj, p)
159 except AttributeError, err:
159 except AttributeError, err:
160 raise util.Abort(_('%s hook is invalid '
160 raise util.Abort(_('%s hook is invalid '
161 '("%s" is not defined)') %
161 '("%s" is not defined)') %
162 (hname, funcname))
162 (hname, funcname))
163 if not callable(obj):
163 if not callable(obj):
164 raise util.Abort(_('%s hook is invalid '
164 raise util.Abort(_('%s hook is invalid '
165 '("%s" is not callable)') %
165 '("%s" is not callable)') %
166 (hname, funcname))
166 (hname, funcname))
167 try:
167 try:
168 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
168 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
169 except (KeyboardInterrupt, util.SignalInterrupt):
169 except (KeyboardInterrupt, util.SignalInterrupt):
170 raise
170 raise
171 except Exception, exc:
171 except Exception, exc:
172 if isinstance(exc, util.Abort):
172 if isinstance(exc, util.Abort):
173 self.ui.warn(_('error: %s hook failed: %s\n') %
173 self.ui.warn(_('error: %s hook failed: %s\n') %
174 (hname, exc.args[0]))
174 (hname, exc.args[0]))
175 else:
175 else:
176 self.ui.warn(_('error: %s hook raised an exception: '
176 self.ui.warn(_('error: %s hook raised an exception: '
177 '%s\n') % (hname, exc))
177 '%s\n') % (hname, exc))
178 if throw:
178 if throw:
179 raise
179 raise
180 self.ui.print_exc()
180 self.ui.print_exc()
181 return True
181 return True
182 if r:
182 if r:
183 if throw:
183 if throw:
184 raise util.Abort(_('%s hook failed') % hname)
184 raise util.Abort(_('%s hook failed') % hname)
185 self.ui.warn(_('warning: %s hook failed\n') % hname)
185 self.ui.warn(_('warning: %s hook failed\n') % hname)
186 return r
186 return r
187
187
188 def runhook(name, cmd):
188 def runhook(name, cmd):
189 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
189 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
190 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
190 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
191 r = util.system(cmd, environ=env, cwd=self.root)
191 r = util.system(cmd, environ=env, cwd=self.root)
192 if r:
192 if r:
193 desc, r = util.explain_exit(r)
193 desc, r = util.explain_exit(r)
194 if throw:
194 if throw:
195 raise util.Abort(_('%s hook %s') % (name, desc))
195 raise util.Abort(_('%s hook %s') % (name, desc))
196 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
196 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
197 return r
197 return r
198
198
199 r = False
199 r = False
200 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
200 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
201 if hname.split(".", 1)[0] == name and cmd]
201 if hname.split(".", 1)[0] == name and cmd]
202 hooks.sort()
202 hooks.sort()
203 for hname, cmd in hooks:
203 for hname, cmd in hooks:
204 if cmd.startswith('python:'):
204 if cmd.startswith('python:'):
205 r = callhook(hname, cmd[7:].strip()) or r
205 r = callhook(hname, cmd[7:].strip()) or r
206 else:
206 else:
207 r = runhook(hname, cmd) or r
207 r = runhook(hname, cmd) or r
208 return r
208 return r
209
209
210 tag_disallowed = ':\r\n'
210 tag_disallowed = ':\r\n'
211
211
212 def tag(self, name, node, message, local, user, date):
212 def tag(self, name, node, message, local, user, date):
213 '''tag a revision with a symbolic name.
213 '''tag a revision with a symbolic name.
214
214
215 if local is True, the tag is stored in a per-repository file.
215 if local is True, the tag is stored in a per-repository file.
216 otherwise, it is stored in the .hgtags file, and a new
216 otherwise, it is stored in the .hgtags file, and a new
217 changeset is committed with the change.
217 changeset is committed with the change.
218
218
219 keyword arguments:
219 keyword arguments:
220
220
221 local: whether to store tag in non-version-controlled file
221 local: whether to store tag in non-version-controlled file
222 (default False)
222 (default False)
223
223
224 message: commit message to use if committing
224 message: commit message to use if committing
225
225
226 user: name of user to use if committing
226 user: name of user to use if committing
227
227
228 date: date tuple to use if committing'''
228 date: date tuple to use if committing'''
229
229
230 for c in self.tag_disallowed:
230 for c in self.tag_disallowed:
231 if c in name:
231 if c in name:
232 raise util.Abort(_('%r cannot be used in a tag name') % c)
232 raise util.Abort(_('%r cannot be used in a tag name') % c)
233
233
234 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
234 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
235
235
236 if local:
236 if local:
237 # local tags are stored in the current charset
237 # local tags are stored in the current charset
238 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
238 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
239 self.hook('tag', node=hex(node), tag=name, local=local)
239 self.hook('tag', node=hex(node), tag=name, local=local)
240 return
240 return
241
241
242 for x in self.status()[:5]:
242 for x in self.status()[:5]:
243 if '.hgtags' in x:
243 if '.hgtags' in x:
244 raise util.Abort(_('working copy of .hgtags is changed '
244 raise util.Abort(_('working copy of .hgtags is changed '
245 '(please commit .hgtags manually)'))
245 '(please commit .hgtags manually)'))
246
246
247 # committed tags are stored in UTF-8
247 # committed tags are stored in UTF-8
248 line = '%s %s\n' % (hex(node), util.fromlocal(name))
248 line = '%s %s\n' % (hex(node), util.fromlocal(name))
249 self.wfile('.hgtags', 'ab').write(line)
249 self.wfile('.hgtags', 'ab').write(line)
250 if self.dirstate.state('.hgtags') == '?':
250 if self.dirstate.state('.hgtags') == '?':
251 self.add(['.hgtags'])
251 self.add(['.hgtags'])
252
252
253 self.commit(['.hgtags'], message, user, date)
253 self.commit(['.hgtags'], message, user, date)
254 self.hook('tag', node=hex(node), tag=name, local=local)
254 self.hook('tag', node=hex(node), tag=name, local=local)
255
255
256 def tags(self):
256 def tags(self):
257 '''return a mapping of tag to node'''
257 '''return a mapping of tag to node'''
258 if not self.tagscache:
258 if not self.tagscache:
259 self.tagscache = {}
259 self.tagscache = {}
260
260
261 def parsetag(line, context):
261 def parsetag(line, context):
262 if not line:
262 if not line:
263 return
263 return
264 s = l.split(" ", 1)
264 s = l.split(" ", 1)
265 if len(s) != 2:
265 if len(s) != 2:
266 self.ui.warn(_("%s: cannot parse entry\n") % context)
266 self.ui.warn(_("%s: cannot parse entry\n") % context)
267 return
267 return
268 node, key = s
268 node, key = s
269 key = util.tolocal(key.strip()) # stored in UTF-8
269 key = util.tolocal(key.strip()) # stored in UTF-8
270 try:
270 try:
271 bin_n = bin(node)
271 bin_n = bin(node)
272 except TypeError:
272 except TypeError:
273 self.ui.warn(_("%s: node '%s' is not well formed\n") %
273 self.ui.warn(_("%s: node '%s' is not well formed\n") %
274 (context, node))
274 (context, node))
275 return
275 return
276 if bin_n not in self.changelog.nodemap:
276 if bin_n not in self.changelog.nodemap:
277 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
277 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
278 (context, key))
278 (context, key))
279 return
279 return
280 self.tagscache[key] = bin_n
280 self.tagscache[key] = bin_n
281
281
282 # read the tags file from each head, ending with the tip,
282 # read the tags file from each head, ending with the tip,
283 # and add each tag found to the map, with "newer" ones
283 # and add each tag found to the map, with "newer" ones
284 # taking precedence
284 # taking precedence
285 f = None
285 f = None
286 for rev, node, fnode in self._hgtagsnodes():
286 for rev, node, fnode in self._hgtagsnodes():
287 f = (f and f.filectx(fnode) or
287 f = (f and f.filectx(fnode) or
288 self.filectx('.hgtags', fileid=fnode))
288 self.filectx('.hgtags', fileid=fnode))
289 count = 0
289 count = 0
290 for l in f.data().splitlines():
290 for l in f.data().splitlines():
291 count += 1
291 count += 1
292 parsetag(l, _("%s, line %d") % (str(f), count))
292 parsetag(l, _("%s, line %d") % (str(f), count))
293
293
294 try:
294 try:
295 f = self.opener("localtags")
295 f = self.opener("localtags")
296 count = 0
296 count = 0
297 for l in f:
297 for l in f:
298 # localtags are stored in the local character set
298 # localtags are stored in the local character set
299 # while the internal tag table is stored in UTF-8
299 # while the internal tag table is stored in UTF-8
300 l = util.fromlocal(l)
300 l = util.fromlocal(l)
301 count += 1
301 count += 1
302 parsetag(l, _("localtags, line %d") % count)
302 parsetag(l, _("localtags, line %d") % count)
303 except IOError:
303 except IOError:
304 pass
304 pass
305
305
306 self.tagscache['tip'] = self.changelog.tip()
306 self.tagscache['tip'] = self.changelog.tip()
307
307
308 return self.tagscache
308 return self.tagscache
309
309
310 def _hgtagsnodes(self):
310 def _hgtagsnodes(self):
311 heads = self.heads()
311 heads = self.heads()
312 heads.reverse()
312 heads.reverse()
313 last = {}
313 last = {}
314 ret = []
314 ret = []
315 for node in heads:
315 for node in heads:
316 c = self.changectx(node)
316 c = self.changectx(node)
317 rev = c.rev()
317 rev = c.rev()
318 try:
318 try:
319 fnode = c.filenode('.hgtags')
319 fnode = c.filenode('.hgtags')
320 except revlog.LookupError:
320 except revlog.LookupError:
321 continue
321 continue
322 ret.append((rev, node, fnode))
322 ret.append((rev, node, fnode))
323 if fnode in last:
323 if fnode in last:
324 ret[last[fnode]] = None
324 ret[last[fnode]] = None
325 last[fnode] = len(ret) - 1
325 last[fnode] = len(ret) - 1
326 return [item for item in ret if item]
326 return [item for item in ret if item]
327
327
328 def tagslist(self):
328 def tagslist(self):
329 '''return a list of tags ordered by revision'''
329 '''return a list of tags ordered by revision'''
330 l = []
330 l = []
331 for t, n in self.tags().items():
331 for t, n in self.tags().items():
332 try:
332 try:
333 r = self.changelog.rev(n)
333 r = self.changelog.rev(n)
334 except:
334 except:
335 r = -2 # sort to the beginning of the list if unknown
335 r = -2 # sort to the beginning of the list if unknown
336 l.append((r, t, n))
336 l.append((r, t, n))
337 l.sort()
337 l.sort()
338 return [(t, n) for r, t, n in l]
338 return [(t, n) for r, t, n in l]
339
339
340 def nodetags(self, node):
340 def nodetags(self, node):
341 '''return the tags associated with a node'''
341 '''return the tags associated with a node'''
342 if not self.nodetagscache:
342 if not self.nodetagscache:
343 self.nodetagscache = {}
343 self.nodetagscache = {}
344 for t, n in self.tags().items():
344 for t, n in self.tags().items():
345 self.nodetagscache.setdefault(n, []).append(t)
345 self.nodetagscache.setdefault(n, []).append(t)
346 return self.nodetagscache.get(node, [])
346 return self.nodetagscache.get(node, [])
347
347
348 def _branchtags(self):
348 def _branchtags(self):
349 partial, last, lrev = self._readbranchcache()
349 partial, last, lrev = self._readbranchcache()
350
350
351 tiprev = self.changelog.count() - 1
351 tiprev = self.changelog.count() - 1
352 if lrev != tiprev:
352 if lrev != tiprev:
353 self._updatebranchcache(partial, lrev+1, tiprev+1)
353 self._updatebranchcache(partial, lrev+1, tiprev+1)
354 self._writebranchcache(partial, self.changelog.tip(), tiprev)
354 self._writebranchcache(partial, self.changelog.tip(), tiprev)
355
355
356 return partial
356 return partial
357
357
358 def branchtags(self):
358 def branchtags(self):
359 if self.branchcache is not None:
359 if self.branchcache is not None:
360 return self.branchcache
360 return self.branchcache
361
361
362 self.branchcache = {} # avoid recursion in changectx
362 self.branchcache = {} # avoid recursion in changectx
363 partial = self._branchtags()
363 partial = self._branchtags()
364
364
365 # the branch cache is stored on disk as UTF-8, but in the local
365 # the branch cache is stored on disk as UTF-8, but in the local
366 # charset internally
366 # charset internally
367 for k, v in partial.items():
367 for k, v in partial.items():
368 self.branchcache[util.tolocal(k)] = v
368 self.branchcache[util.tolocal(k)] = v
369 return self.branchcache
369 return self.branchcache
370
370
371 def _readbranchcache(self):
371 def _readbranchcache(self):
372 partial = {}
372 partial = {}
373 try:
373 try:
374 f = self.opener("branches.cache")
374 f = self.opener("branches.cache")
375 lines = f.read().split('\n')
375 lines = f.read().split('\n')
376 f.close()
376 f.close()
377 last, lrev = lines.pop(0).rstrip().split(" ", 1)
377 last, lrev = lines.pop(0).rstrip().split(" ", 1)
378 last, lrev = bin(last), int(lrev)
378 last, lrev = bin(last), int(lrev)
379 if not (lrev < self.changelog.count() and
379 if not (lrev < self.changelog.count() and
380 self.changelog.node(lrev) == last): # sanity check
380 self.changelog.node(lrev) == last): # sanity check
381 # invalidate the cache
381 # invalidate the cache
382 raise ValueError('Invalid branch cache: unknown tip')
382 raise ValueError('Invalid branch cache: unknown tip')
383 for l in lines:
383 for l in lines:
384 if not l: continue
384 if not l: continue
385 node, label = l.rstrip().split(" ", 1)
385 node, label = l.rstrip().split(" ", 1)
386 partial[label] = bin(node)
386 partial[label] = bin(node)
387 except (KeyboardInterrupt, util.SignalInterrupt):
387 except (KeyboardInterrupt, util.SignalInterrupt):
388 raise
388 raise
389 except Exception, inst:
389 except Exception, inst:
390 if self.ui.debugflag:
390 if self.ui.debugflag:
391 self.ui.warn(str(inst), '\n')
391 self.ui.warn(str(inst), '\n')
392 partial, last, lrev = {}, nullid, nullrev
392 partial, last, lrev = {}, nullid, nullrev
393 return partial, last, lrev
393 return partial, last, lrev
394
394
395 def _writebranchcache(self, branches, tip, tiprev):
395 def _writebranchcache(self, branches, tip, tiprev):
396 try:
396 try:
397 f = self.opener("branches.cache", "w")
397 f = self.opener("branches.cache", "w")
398 f.write("%s %s\n" % (hex(tip), tiprev))
398 f.write("%s %s\n" % (hex(tip), tiprev))
399 for label, node in branches.iteritems():
399 for label, node in branches.iteritems():
400 f.write("%s %s\n" % (hex(node), label))
400 f.write("%s %s\n" % (hex(node), label))
401 except IOError:
401 except IOError:
402 pass
402 pass
403
403
404 def _updatebranchcache(self, partial, start, end):
404 def _updatebranchcache(self, partial, start, end):
405 for r in xrange(start, end):
405 for r in xrange(start, end):
406 c = self.changectx(r)
406 c = self.changectx(r)
407 b = c.branch()
407 b = c.branch()
408 if b:
408 if b:
409 partial[b] = c.node()
409 partial[b] = c.node()
410
410
411 def lookup(self, key):
411 def lookup(self, key):
412 if key == '.':
412 if key == '.':
413 key = self.dirstate.parents()[0]
413 key = self.dirstate.parents()[0]
414 if key == nullid:
414 if key == nullid:
415 raise repo.RepoError(_("no revision checked out"))
415 raise repo.RepoError(_("no revision checked out"))
416 elif key == 'null':
416 elif key == 'null':
417 return nullid
417 return nullid
418 n = self.changelog._match(key)
418 n = self.changelog._match(key)
419 if n:
419 if n:
420 return n
420 return n
421 if key in self.tags():
421 if key in self.tags():
422 return self.tags()[key]
422 return self.tags()[key]
423 if key in self.branchtags():
423 if key in self.branchtags():
424 return self.branchtags()[key]
424 return self.branchtags()[key]
425 n = self.changelog._partialmatch(key)
425 n = self.changelog._partialmatch(key)
426 if n:
426 if n:
427 return n
427 return n
428 raise repo.RepoError(_("unknown revision '%s'") % key)
428 raise repo.RepoError(_("unknown revision '%s'") % key)
429
429
430 def dev(self):
430 def dev(self):
431 return os.lstat(self.path).st_dev
431 return os.lstat(self.path).st_dev
432
432
433 def local(self):
433 def local(self):
434 return True
434 return True
435
435
436 def join(self, f):
436 def join(self, f):
437 return os.path.join(self.path, f)
437 return os.path.join(self.path, f)
438
438
439 def sjoin(self, f):
439 def sjoin(self, f):
440 f = self.encodefn(f)
440 f = self.encodefn(f)
441 return os.path.join(self.spath, f)
441 return os.path.join(self.spath, f)
442
442
443 def wjoin(self, f):
443 def wjoin(self, f):
444 return os.path.join(self.root, f)
444 return os.path.join(self.root, f)
445
445
446 def file(self, f):
446 def file(self, f):
447 if f[0] == '/':
447 if f[0] == '/':
448 f = f[1:]
448 f = f[1:]
449 return filelog.filelog(self.sopener, f, self.revlogversion)
449 return filelog.filelog(self.sopener, f, self.revlogversion)
450
450
451 def changectx(self, changeid=None):
451 def changectx(self, changeid=None):
452 return context.changectx(self, changeid)
452 return context.changectx(self, changeid)
453
453
454 def workingctx(self):
454 def workingctx(self):
455 return context.workingctx(self)
455 return context.workingctx(self)
456
456
457 def parents(self, changeid=None):
457 def parents(self, changeid=None):
458 '''
458 '''
459 get list of changectxs for parents of changeid or working directory
459 get list of changectxs for parents of changeid or working directory
460 '''
460 '''
461 if changeid is None:
461 if changeid is None:
462 pl = self.dirstate.parents()
462 pl = self.dirstate.parents()
463 else:
463 else:
464 n = self.changelog.lookup(changeid)
464 n = self.changelog.lookup(changeid)
465 pl = self.changelog.parents(n)
465 pl = self.changelog.parents(n)
466 if pl[1] == nullid:
466 if pl[1] == nullid:
467 return [self.changectx(pl[0])]
467 return [self.changectx(pl[0])]
468 return [self.changectx(pl[0]), self.changectx(pl[1])]
468 return [self.changectx(pl[0]), self.changectx(pl[1])]
469
469
470 def filectx(self, path, changeid=None, fileid=None):
470 def filectx(self, path, changeid=None, fileid=None):
471 """changeid can be a changeset revision, node, or tag.
471 """changeid can be a changeset revision, node, or tag.
472 fileid can be a file revision or node."""
472 fileid can be a file revision or node."""
473 return context.filectx(self, path, changeid, fileid)
473 return context.filectx(self, path, changeid, fileid)
474
474
475 def getcwd(self):
475 def getcwd(self):
476 return self.dirstate.getcwd()
476 return self.dirstate.getcwd()
477
477
478 def wfile(self, f, mode='r'):
478 def wfile(self, f, mode='r'):
479 return self.wopener(f, mode)
479 return self.wopener(f, mode)
480
480
481 def wread(self, filename):
481 def wread(self, filename):
482 if self.encodepats == None:
482 if self.encodepats == None:
483 l = []
483 l = []
484 for pat, cmd in self.ui.configitems("encode"):
484 for pat, cmd in self.ui.configitems("encode"):
485 mf = util.matcher(self.root, "", [pat], [], [])[1]
485 mf = util.matcher(self.root, "", [pat], [], [])[1]
486 l.append((mf, cmd))
486 l.append((mf, cmd))
487 self.encodepats = l
487 self.encodepats = l
488
488
489 data = self.wopener(filename, 'r').read()
489 data = self.wopener(filename, 'r').read()
490
490
491 for mf, cmd in self.encodepats:
491 for mf, cmd in self.encodepats:
492 if mf(filename):
492 if mf(filename):
493 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
493 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
494 data = util.filter(data, cmd)
494 data = util.filter(data, cmd)
495 break
495 break
496
496
497 return data
497 return data
498
498
499 def wwrite(self, filename, data, fd=None):
499 def wwrite(self, filename, data, fd=None):
500 if self.decodepats == None:
500 if self.decodepats == None:
501 l = []
501 l = []
502 for pat, cmd in self.ui.configitems("decode"):
502 for pat, cmd in self.ui.configitems("decode"):
503 mf = util.matcher(self.root, "", [pat], [], [])[1]
503 mf = util.matcher(self.root, "", [pat], [], [])[1]
504 l.append((mf, cmd))
504 l.append((mf, cmd))
505 self.decodepats = l
505 self.decodepats = l
506
506
507 for mf, cmd in self.decodepats:
507 for mf, cmd in self.decodepats:
508 if mf(filename):
508 if mf(filename):
509 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
509 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
510 data = util.filter(data, cmd)
510 data = util.filter(data, cmd)
511 break
511 break
512
512
513 if fd:
513 if fd:
514 return fd.write(data)
514 return fd.write(data)
515 return self.wopener(filename, 'w').write(data)
515 return self.wopener(filename, 'w').write(data)
516
516
517 def transaction(self):
517 def transaction(self):
518 tr = self.transhandle
518 tr = self.transhandle
519 if tr != None and tr.running():
519 if tr != None and tr.running():
520 return tr.nest()
520 return tr.nest()
521
521
522 # save dirstate for rollback
522 # save dirstate for rollback
523 try:
523 try:
524 ds = self.opener("dirstate").read()
524 ds = self.opener("dirstate").read()
525 except IOError:
525 except IOError:
526 ds = ""
526 ds = ""
527 self.opener("journal.dirstate", "w").write(ds)
527 self.opener("journal.dirstate", "w").write(ds)
528
528
529 renames = [(self.sjoin("journal"), self.sjoin("undo")),
529 renames = [(self.sjoin("journal"), self.sjoin("undo")),
530 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
530 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
531 tr = transaction.transaction(self.ui.warn, self.sopener,
531 tr = transaction.transaction(self.ui.warn, self.sopener,
532 self.sjoin("journal"),
532 self.sjoin("journal"),
533 aftertrans(renames))
533 aftertrans(renames))
534 self.transhandle = tr
534 self.transhandle = tr
535 return tr
535 return tr
536
536
537 def recover(self):
537 def recover(self):
538 l = self.lock()
538 l = self.lock()
539 if os.path.exists(self.sjoin("journal")):
539 if os.path.exists(self.sjoin("journal")):
540 self.ui.status(_("rolling back interrupted transaction\n"))
540 self.ui.status(_("rolling back interrupted transaction\n"))
541 transaction.rollback(self.sopener, self.sjoin("journal"))
541 transaction.rollback(self.sopener, self.sjoin("journal"))
542 self.reload()
542 self.reload()
543 return True
543 return True
544 else:
544 else:
545 self.ui.warn(_("no interrupted transaction available\n"))
545 self.ui.warn(_("no interrupted transaction available\n"))
546 return False
546 return False
547
547
548 def rollback(self, wlock=None):
548 def rollback(self, wlock=None):
549 if not wlock:
549 if not wlock:
550 wlock = self.wlock()
550 wlock = self.wlock()
551 l = self.lock()
551 l = self.lock()
552 if os.path.exists(self.sjoin("undo")):
552 if os.path.exists(self.sjoin("undo")):
553 self.ui.status(_("rolling back last transaction\n"))
553 self.ui.status(_("rolling back last transaction\n"))
554 transaction.rollback(self.sopener, self.sjoin("undo"))
554 transaction.rollback(self.sopener, self.sjoin("undo"))
555 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
555 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
556 self.reload()
556 self.reload()
557 self.wreload()
557 self.wreload()
558 else:
558 else:
559 self.ui.warn(_("no rollback information available\n"))
559 self.ui.warn(_("no rollback information available\n"))
560
560
561 def wreload(self):
561 def wreload(self):
562 self.dirstate.read()
562 self.dirstate.read()
563
563
564 def reload(self):
564 def reload(self):
565 self.changelog.load()
565 self.changelog.load()
566 self.manifest.load()
566 self.manifest.load()
567 self.tagscache = None
567 self.tagscache = None
568 self.nodetagscache = None
568 self.nodetagscache = None
569
569
570 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
570 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
571 desc=None):
571 desc=None):
572 try:
572 try:
573 l = lock.lock(lockname, 0, releasefn, desc=desc)
573 l = lock.lock(lockname, 0, releasefn, desc=desc)
574 except lock.LockHeld, inst:
574 except lock.LockHeld, inst:
575 if not wait:
575 if not wait:
576 raise
576 raise
577 self.ui.warn(_("waiting for lock on %s held by %r\n") %
577 self.ui.warn(_("waiting for lock on %s held by %r\n") %
578 (desc, inst.locker))
578 (desc, inst.locker))
579 # default to 600 seconds timeout
579 # default to 600 seconds timeout
580 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
580 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
581 releasefn, desc=desc)
581 releasefn, desc=desc)
582 if acquirefn:
582 if acquirefn:
583 acquirefn()
583 acquirefn()
584 return l
584 return l
585
585
586 def lock(self, wait=1):
586 def lock(self, wait=1):
587 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
587 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
588 desc=_('repository %s') % self.origroot)
588 desc=_('repository %s') % self.origroot)
589
589
590 def wlock(self, wait=1):
590 def wlock(self, wait=1):
591 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
591 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
592 self.wreload,
592 self.wreload,
593 desc=_('working directory of %s') % self.origroot)
593 desc=_('working directory of %s') % self.origroot)
594
594
595 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
595 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
596 """
596 """
597 commit an individual file as part of a larger transaction
597 commit an individual file as part of a larger transaction
598 """
598 """
599
599
600 t = self.wread(fn)
600 t = self.wread(fn)
601 fl = self.file(fn)
601 fl = self.file(fn)
602 fp1 = manifest1.get(fn, nullid)
602 fp1 = manifest1.get(fn, nullid)
603 fp2 = manifest2.get(fn, nullid)
603 fp2 = manifest2.get(fn, nullid)
604
604
605 meta = {}
605 meta = {}
606 cp = self.dirstate.copied(fn)
606 cp = self.dirstate.copied(fn)
607 if cp:
607 if cp:
608 meta["copy"] = cp
608 meta["copy"] = cp
609 if not manifest2: # not a branch merge
609 if not manifest2: # not a branch merge
610 meta["copyrev"] = hex(manifest1.get(cp, nullid))
610 meta["copyrev"] = hex(manifest1.get(cp, nullid))
611 fp2 = nullid
611 fp2 = nullid
612 elif fp2 != nullid: # copied on remote side
612 elif fp2 != nullid: # copied on remote side
613 meta["copyrev"] = hex(manifest1.get(cp, nullid))
613 meta["copyrev"] = hex(manifest1.get(cp, nullid))
614 elif fp1 != nullid: # copied on local side, reversed
614 elif fp1 != nullid: # copied on local side, reversed
615 meta["copyrev"] = hex(manifest2.get(cp))
615 meta["copyrev"] = hex(manifest2.get(cp))
616 fp2 = nullid
616 fp2 = nullid
617 else: # directory rename
617 else: # directory rename
618 meta["copyrev"] = hex(manifest1.get(cp, nullid))
618 meta["copyrev"] = hex(manifest1.get(cp, nullid))
619 self.ui.debug(_(" %s: copy %s:%s\n") %
619 self.ui.debug(_(" %s: copy %s:%s\n") %
620 (fn, cp, meta["copyrev"]))
620 (fn, cp, meta["copyrev"]))
621 fp1 = nullid
621 fp1 = nullid
622 elif fp2 != nullid:
622 elif fp2 != nullid:
623 # is one parent an ancestor of the other?
623 # is one parent an ancestor of the other?
624 fpa = fl.ancestor(fp1, fp2)
624 fpa = fl.ancestor(fp1, fp2)
625 if fpa == fp1:
625 if fpa == fp1:
626 fp1, fp2 = fp2, nullid
626 fp1, fp2 = fp2, nullid
627 elif fpa == fp2:
627 elif fpa == fp2:
628 fp2 = nullid
628 fp2 = nullid
629
629
630 # is the file unmodified from the parent? report existing entry
630 # is the file unmodified from the parent? report existing entry
631 if fp2 == nullid and not fl.cmp(fp1, t):
631 if fp2 == nullid and not fl.cmp(fp1, t):
632 return fp1
632 return fp1
633
633
634 changelist.append(fn)
634 changelist.append(fn)
635 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
635 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
636
636
637 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
637 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
638 if p1 is None:
638 if p1 is None:
639 p1, p2 = self.dirstate.parents()
639 p1, p2 = self.dirstate.parents()
640 return self.commit(files=files, text=text, user=user, date=date,
640 return self.commit(files=files, text=text, user=user, date=date,
641 p1=p1, p2=p2, wlock=wlock)
641 p1=p1, p2=p2, wlock=wlock, extra=extra)
642
642
643 def commit(self, files=None, text="", user=None, date=None,
643 def commit(self, files=None, text="", user=None, date=None,
644 match=util.always, force=False, lock=None, wlock=None,
644 match=util.always, force=False, lock=None, wlock=None,
645 force_editor=False, p1=None, p2=None, extra={}):
645 force_editor=False, p1=None, p2=None, extra={}):
646
646
647 commit = []
647 commit = []
648 remove = []
648 remove = []
649 changed = []
649 changed = []
650 use_dirstate = (p1 is None) # not rawcommit
650 use_dirstate = (p1 is None) # not rawcommit
651 extra = extra.copy()
651 extra = extra.copy()
652
652
653 if use_dirstate:
653 if use_dirstate:
654 if files:
654 if files:
655 for f in files:
655 for f in files:
656 s = self.dirstate.state(f)
656 s = self.dirstate.state(f)
657 if s in 'nmai':
657 if s in 'nmai':
658 commit.append(f)
658 commit.append(f)
659 elif s == 'r':
659 elif s == 'r':
660 remove.append(f)
660 remove.append(f)
661 else:
661 else:
662 self.ui.warn(_("%s not tracked!\n") % f)
662 self.ui.warn(_("%s not tracked!\n") % f)
663 else:
663 else:
664 changes = self.status(match=match)[:5]
664 changes = self.status(match=match)[:5]
665 modified, added, removed, deleted, unknown = changes
665 modified, added, removed, deleted, unknown = changes
666 commit = modified + added
666 commit = modified + added
667 remove = removed
667 remove = removed
668 else:
668 else:
669 commit = files
669 commit = files
670
670
671 if use_dirstate:
671 if use_dirstate:
672 p1, p2 = self.dirstate.parents()
672 p1, p2 = self.dirstate.parents()
673 update_dirstate = True
673 update_dirstate = True
674 else:
674 else:
675 p1, p2 = p1, p2 or nullid
675 p1, p2 = p1, p2 or nullid
676 update_dirstate = (self.dirstate.parents()[0] == p1)
676 update_dirstate = (self.dirstate.parents()[0] == p1)
677
677
678 c1 = self.changelog.read(p1)
678 c1 = self.changelog.read(p1)
679 c2 = self.changelog.read(p2)
679 c2 = self.changelog.read(p2)
680 m1 = self.manifest.read(c1[0]).copy()
680 m1 = self.manifest.read(c1[0]).copy()
681 m2 = self.manifest.read(c2[0])
681 m2 = self.manifest.read(c2[0])
682
682
683 if use_dirstate:
683 if use_dirstate:
684 branchname = self.workingctx().branch()
684 branchname = self.workingctx().branch()
685 try:
685 try:
686 branchname = branchname.decode('UTF-8').encode('UTF-8')
686 branchname = branchname.decode('UTF-8').encode('UTF-8')
687 except UnicodeDecodeError:
687 except UnicodeDecodeError:
688 raise util.Abort(_('branch name not in UTF-8!'))
688 raise util.Abort(_('branch name not in UTF-8!'))
689 else:
689 else:
690 branchname = ""
690 branchname = ""
691
691
692 if use_dirstate:
692 if use_dirstate:
693 oldname = c1[5].get("branch", "") # stored in UTF-8
693 oldname = c1[5].get("branch", "") # stored in UTF-8
694 if not commit and not remove and not force and p2 == nullid and \
694 if not commit and not remove and not force and p2 == nullid and \
695 branchname == oldname:
695 branchname == oldname:
696 self.ui.status(_("nothing changed\n"))
696 self.ui.status(_("nothing changed\n"))
697 return None
697 return None
698
698
699 xp1 = hex(p1)
699 xp1 = hex(p1)
700 if p2 == nullid: xp2 = ''
700 if p2 == nullid: xp2 = ''
701 else: xp2 = hex(p2)
701 else: xp2 = hex(p2)
702
702
703 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
703 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
704
704
705 if not wlock:
705 if not wlock:
706 wlock = self.wlock()
706 wlock = self.wlock()
707 if not lock:
707 if not lock:
708 lock = self.lock()
708 lock = self.lock()
709 tr = self.transaction()
709 tr = self.transaction()
710
710
711 # check in files
711 # check in files
712 new = {}
712 new = {}
713 linkrev = self.changelog.count()
713 linkrev = self.changelog.count()
714 commit.sort()
714 commit.sort()
715 for f in commit:
715 for f in commit:
716 self.ui.note(f + "\n")
716 self.ui.note(f + "\n")
717 try:
717 try:
718 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
718 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
719 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
719 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
720 except IOError:
720 except IOError:
721 if use_dirstate:
721 if use_dirstate:
722 self.ui.warn(_("trouble committing %s!\n") % f)
722 self.ui.warn(_("trouble committing %s!\n") % f)
723 raise
723 raise
724 else:
724 else:
725 remove.append(f)
725 remove.append(f)
726
726
727 # update manifest
727 # update manifest
728 m1.update(new)
728 m1.update(new)
729 remove.sort()
729 remove.sort()
730
730
731 for f in remove:
731 for f in remove:
732 if f in m1:
732 if f in m1:
733 del m1[f]
733 del m1[f]
734 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
734 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
735
735
736 # add changeset
736 # add changeset
737 new = new.keys()
737 new = new.keys()
738 new.sort()
738 new.sort()
739
739
740 user = user or self.ui.username()
740 user = user or self.ui.username()
741 if not text or force_editor:
741 if not text or force_editor:
742 edittext = []
742 edittext = []
743 if text:
743 if text:
744 edittext.append(text)
744 edittext.append(text)
745 edittext.append("")
745 edittext.append("")
746 edittext.append("HG: user: %s" % user)
746 edittext.append("HG: user: %s" % user)
747 if p2 != nullid:
747 if p2 != nullid:
748 edittext.append("HG: branch merge")
748 edittext.append("HG: branch merge")
749 edittext.extend(["HG: changed %s" % f for f in changed])
749 edittext.extend(["HG: changed %s" % f for f in changed])
750 edittext.extend(["HG: removed %s" % f for f in remove])
750 edittext.extend(["HG: removed %s" % f for f in remove])
751 if not changed and not remove:
751 if not changed and not remove:
752 edittext.append("HG: no files changed")
752 edittext.append("HG: no files changed")
753 edittext.append("")
753 edittext.append("")
754 # run editor in the repository root
754 # run editor in the repository root
755 olddir = os.getcwd()
755 olddir = os.getcwd()
756 os.chdir(self.root)
756 os.chdir(self.root)
757 text = self.ui.edit("\n".join(edittext), user)
757 text = self.ui.edit("\n".join(edittext), user)
758 os.chdir(olddir)
758 os.chdir(olddir)
759
759
760 lines = [line.rstrip() for line in text.rstrip().splitlines()]
760 lines = [line.rstrip() for line in text.rstrip().splitlines()]
761 while lines and not lines[0]:
761 while lines and not lines[0]:
762 del lines[0]
762 del lines[0]
763 if not lines:
763 if not lines:
764 return None
764 return None
765 text = '\n'.join(lines)
765 text = '\n'.join(lines)
766 if branchname:
766 if branchname:
767 extra["branch"] = branchname
767 extra["branch"] = branchname
768 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
768 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
769 user, date, extra)
769 user, date, extra)
770 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
770 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
771 parent2=xp2)
771 parent2=xp2)
772 tr.close()
772 tr.close()
773
773
774 if use_dirstate or update_dirstate:
774 if use_dirstate or update_dirstate:
775 self.dirstate.setparents(n)
775 self.dirstate.setparents(n)
776 if use_dirstate:
776 if use_dirstate:
777 self.dirstate.update(new, "n")
777 self.dirstate.update(new, "n")
778 self.dirstate.forget(remove)
778 self.dirstate.forget(remove)
779
779
780 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
780 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
781 return n
781 return n
782
782
783 def walk(self, node=None, files=[], match=util.always, badmatch=None):
783 def walk(self, node=None, files=[], match=util.always, badmatch=None):
784 '''
784 '''
785 walk recursively through the directory tree or a given
785 walk recursively through the directory tree or a given
786 changeset, finding all files matched by the match
786 changeset, finding all files matched by the match
787 function
787 function
788
788
789 results are yielded in a tuple (src, filename), where src
789 results are yielded in a tuple (src, filename), where src
790 is one of:
790 is one of:
791 'f' the file was found in the directory tree
791 'f' the file was found in the directory tree
792 'm' the file was only in the dirstate and not in the tree
792 'm' the file was only in the dirstate and not in the tree
793 'b' file was not found and matched badmatch
793 'b' file was not found and matched badmatch
794 '''
794 '''
795
795
796 if node:
796 if node:
797 fdict = dict.fromkeys(files)
797 fdict = dict.fromkeys(files)
798 for fn in self.manifest.read(self.changelog.read(node)[0]):
798 for fn in self.manifest.read(self.changelog.read(node)[0]):
799 for ffn in fdict:
799 for ffn in fdict:
800 # match if the file is the exact name or a directory
800 # match if the file is the exact name or a directory
801 if ffn == fn or fn.startswith("%s/" % ffn):
801 if ffn == fn or fn.startswith("%s/" % ffn):
802 del fdict[ffn]
802 del fdict[ffn]
803 break
803 break
804 if match(fn):
804 if match(fn):
805 yield 'm', fn
805 yield 'm', fn
806 for fn in fdict:
806 for fn in fdict:
807 if badmatch and badmatch(fn):
807 if badmatch and badmatch(fn):
808 if match(fn):
808 if match(fn):
809 yield 'b', fn
809 yield 'b', fn
810 else:
810 else:
811 self.ui.warn(_('%s: No such file in rev %s\n') % (
811 self.ui.warn(_('%s: No such file in rev %s\n') % (
812 util.pathto(self.getcwd(), fn), short(node)))
812 util.pathto(self.getcwd(), fn), short(node)))
813 else:
813 else:
814 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
814 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
815 yield src, fn
815 yield src, fn
816
816
817 def status(self, node1=None, node2=None, files=[], match=util.always,
817 def status(self, node1=None, node2=None, files=[], match=util.always,
818 wlock=None, list_ignored=False, list_clean=False):
818 wlock=None, list_ignored=False, list_clean=False):
819 """return status of files between two nodes or node and working directory
819 """return status of files between two nodes or node and working directory
820
820
821 If node1 is None, use the first dirstate parent instead.
821 If node1 is None, use the first dirstate parent instead.
822 If node2 is None, compare node1 with working directory.
822 If node2 is None, compare node1 with working directory.
823 """
823 """
824
824
825 def fcmp(fn, mf):
825 def fcmp(fn, mf):
826 t1 = self.wread(fn)
826 t1 = self.wread(fn)
827 return self.file(fn).cmp(mf.get(fn, nullid), t1)
827 return self.file(fn).cmp(mf.get(fn, nullid), t1)
828
828
829 def mfmatches(node):
829 def mfmatches(node):
830 change = self.changelog.read(node)
830 change = self.changelog.read(node)
831 mf = self.manifest.read(change[0]).copy()
831 mf = self.manifest.read(change[0]).copy()
832 for fn in mf.keys():
832 for fn in mf.keys():
833 if not match(fn):
833 if not match(fn):
834 del mf[fn]
834 del mf[fn]
835 return mf
835 return mf
836
836
837 modified, added, removed, deleted, unknown = [], [], [], [], []
837 modified, added, removed, deleted, unknown = [], [], [], [], []
838 ignored, clean = [], []
838 ignored, clean = [], []
839
839
840 compareworking = False
840 compareworking = False
841 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
841 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
842 compareworking = True
842 compareworking = True
843
843
844 if not compareworking:
844 if not compareworking:
845 # read the manifest from node1 before the manifest from node2,
845 # read the manifest from node1 before the manifest from node2,
846 # so that we'll hit the manifest cache if we're going through
846 # so that we'll hit the manifest cache if we're going through
847 # all the revisions in parent->child order.
847 # all the revisions in parent->child order.
848 mf1 = mfmatches(node1)
848 mf1 = mfmatches(node1)
849
849
850 # are we comparing the working directory?
850 # are we comparing the working directory?
851 if not node2:
851 if not node2:
852 if not wlock:
852 if not wlock:
853 try:
853 try:
854 wlock = self.wlock(wait=0)
854 wlock = self.wlock(wait=0)
855 except lock.LockException:
855 except lock.LockException:
856 wlock = None
856 wlock = None
857 (lookup, modified, added, removed, deleted, unknown,
857 (lookup, modified, added, removed, deleted, unknown,
858 ignored, clean) = self.dirstate.status(files, match,
858 ignored, clean) = self.dirstate.status(files, match,
859 list_ignored, list_clean)
859 list_ignored, list_clean)
860
860
861 # are we comparing working dir against its parent?
861 # are we comparing working dir against its parent?
862 if compareworking:
862 if compareworking:
863 if lookup:
863 if lookup:
864 # do a full compare of any files that might have changed
864 # do a full compare of any files that might have changed
865 mf2 = mfmatches(self.dirstate.parents()[0])
865 mf2 = mfmatches(self.dirstate.parents()[0])
866 for f in lookup:
866 for f in lookup:
867 if fcmp(f, mf2):
867 if fcmp(f, mf2):
868 modified.append(f)
868 modified.append(f)
869 else:
869 else:
870 clean.append(f)
870 clean.append(f)
871 if wlock is not None:
871 if wlock is not None:
872 self.dirstate.update([f], "n")
872 self.dirstate.update([f], "n")
873 else:
873 else:
874 # we are comparing working dir against non-parent
874 # we are comparing working dir against non-parent
875 # generate a pseudo-manifest for the working dir
875 # generate a pseudo-manifest for the working dir
876 # XXX: create it in dirstate.py ?
876 # XXX: create it in dirstate.py ?
877 mf2 = mfmatches(self.dirstate.parents()[0])
877 mf2 = mfmatches(self.dirstate.parents()[0])
878 for f in lookup + modified + added:
878 for f in lookup + modified + added:
879 mf2[f] = ""
879 mf2[f] = ""
880 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
880 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
881 for f in removed:
881 for f in removed:
882 if f in mf2:
882 if f in mf2:
883 del mf2[f]
883 del mf2[f]
884 else:
884 else:
885 # we are comparing two revisions
885 # we are comparing two revisions
886 mf2 = mfmatches(node2)
886 mf2 = mfmatches(node2)
887
887
888 if not compareworking:
888 if not compareworking:
889 # flush lists from dirstate before comparing manifests
889 # flush lists from dirstate before comparing manifests
890 modified, added, clean = [], [], []
890 modified, added, clean = [], [], []
891
891
892 # make sure to sort the files so we talk to the disk in a
892 # make sure to sort the files so we talk to the disk in a
893 # reasonable order
893 # reasonable order
894 mf2keys = mf2.keys()
894 mf2keys = mf2.keys()
895 mf2keys.sort()
895 mf2keys.sort()
896 for fn in mf2keys:
896 for fn in mf2keys:
897 if mf1.has_key(fn):
897 if mf1.has_key(fn):
898 if mf1.flags(fn) != mf2.flags(fn) or \
898 if mf1.flags(fn) != mf2.flags(fn) or \
899 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
899 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
900 modified.append(fn)
900 modified.append(fn)
901 elif list_clean:
901 elif list_clean:
902 clean.append(fn)
902 clean.append(fn)
903 del mf1[fn]
903 del mf1[fn]
904 else:
904 else:
905 added.append(fn)
905 added.append(fn)
906
906
907 removed = mf1.keys()
907 removed = mf1.keys()
908
908
909 # sort and return results:
909 # sort and return results:
910 for l in modified, added, removed, deleted, unknown, ignored, clean:
910 for l in modified, added, removed, deleted, unknown, ignored, clean:
911 l.sort()
911 l.sort()
912 return (modified, added, removed, deleted, unknown, ignored, clean)
912 return (modified, added, removed, deleted, unknown, ignored, clean)
913
913
914 def add(self, list, wlock=None):
914 def add(self, list, wlock=None):
915 if not wlock:
915 if not wlock:
916 wlock = self.wlock()
916 wlock = self.wlock()
917 for f in list:
917 for f in list:
918 p = self.wjoin(f)
918 p = self.wjoin(f)
919 if not os.path.exists(p):
919 if not os.path.exists(p):
920 self.ui.warn(_("%s does not exist!\n") % f)
920 self.ui.warn(_("%s does not exist!\n") % f)
921 elif not os.path.isfile(p):
921 elif not os.path.isfile(p):
922 self.ui.warn(_("%s not added: only files supported currently\n")
922 self.ui.warn(_("%s not added: only files supported currently\n")
923 % f)
923 % f)
924 elif self.dirstate.state(f) in 'an':
924 elif self.dirstate.state(f) in 'an':
925 self.ui.warn(_("%s already tracked!\n") % f)
925 self.ui.warn(_("%s already tracked!\n") % f)
926 else:
926 else:
927 self.dirstate.update([f], "a")
927 self.dirstate.update([f], "a")
928
928
929 def forget(self, list, wlock=None):
929 def forget(self, list, wlock=None):
930 if not wlock:
930 if not wlock:
931 wlock = self.wlock()
931 wlock = self.wlock()
932 for f in list:
932 for f in list:
933 if self.dirstate.state(f) not in 'ai':
933 if self.dirstate.state(f) not in 'ai':
934 self.ui.warn(_("%s not added!\n") % f)
934 self.ui.warn(_("%s not added!\n") % f)
935 else:
935 else:
936 self.dirstate.forget([f])
936 self.dirstate.forget([f])
937
937
938 def remove(self, list, unlink=False, wlock=None):
938 def remove(self, list, unlink=False, wlock=None):
939 if unlink:
939 if unlink:
940 for f in list:
940 for f in list:
941 try:
941 try:
942 util.unlink(self.wjoin(f))
942 util.unlink(self.wjoin(f))
943 except OSError, inst:
943 except OSError, inst:
944 if inst.errno != errno.ENOENT:
944 if inst.errno != errno.ENOENT:
945 raise
945 raise
946 if not wlock:
946 if not wlock:
947 wlock = self.wlock()
947 wlock = self.wlock()
948 for f in list:
948 for f in list:
949 p = self.wjoin(f)
949 p = self.wjoin(f)
950 if os.path.exists(p):
950 if os.path.exists(p):
951 self.ui.warn(_("%s still exists!\n") % f)
951 self.ui.warn(_("%s still exists!\n") % f)
952 elif self.dirstate.state(f) == 'a':
952 elif self.dirstate.state(f) == 'a':
953 self.dirstate.forget([f])
953 self.dirstate.forget([f])
954 elif f not in self.dirstate:
954 elif f not in self.dirstate:
955 self.ui.warn(_("%s not tracked!\n") % f)
955 self.ui.warn(_("%s not tracked!\n") % f)
956 else:
956 else:
957 self.dirstate.update([f], "r")
957 self.dirstate.update([f], "r")
958
958
959 def undelete(self, list, wlock=None):
959 def undelete(self, list, wlock=None):
960 p = self.dirstate.parents()[0]
960 p = self.dirstate.parents()[0]
961 mn = self.changelog.read(p)[0]
961 mn = self.changelog.read(p)[0]
962 m = self.manifest.read(mn)
962 m = self.manifest.read(mn)
963 if not wlock:
963 if not wlock:
964 wlock = self.wlock()
964 wlock = self.wlock()
965 for f in list:
965 for f in list:
966 if self.dirstate.state(f) not in "r":
966 if self.dirstate.state(f) not in "r":
967 self.ui.warn("%s not removed!\n" % f)
967 self.ui.warn("%s not removed!\n" % f)
968 else:
968 else:
969 t = self.file(f).read(m[f])
969 t = self.file(f).read(m[f])
970 self.wwrite(f, t)
970 self.wwrite(f, t)
971 util.set_exec(self.wjoin(f), m.execf(f))
971 util.set_exec(self.wjoin(f), m.execf(f))
972 self.dirstate.update([f], "n")
972 self.dirstate.update([f], "n")
973
973
974 def copy(self, source, dest, wlock=None):
974 def copy(self, source, dest, wlock=None):
975 p = self.wjoin(dest)
975 p = self.wjoin(dest)
976 if not os.path.exists(p):
976 if not os.path.exists(p):
977 self.ui.warn(_("%s does not exist!\n") % dest)
977 self.ui.warn(_("%s does not exist!\n") % dest)
978 elif not os.path.isfile(p):
978 elif not os.path.isfile(p):
979 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
979 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
980 else:
980 else:
981 if not wlock:
981 if not wlock:
982 wlock = self.wlock()
982 wlock = self.wlock()
983 if self.dirstate.state(dest) == '?':
983 if self.dirstate.state(dest) == '?':
984 self.dirstate.update([dest], "a")
984 self.dirstate.update([dest], "a")
985 self.dirstate.copy(source, dest)
985 self.dirstate.copy(source, dest)
986
986
987 def heads(self, start=None):
987 def heads(self, start=None):
988 heads = self.changelog.heads(start)
988 heads = self.changelog.heads(start)
989 # sort the output in rev descending order
989 # sort the output in rev descending order
990 heads = [(-self.changelog.rev(h), h) for h in heads]
990 heads = [(-self.changelog.rev(h), h) for h in heads]
991 heads.sort()
991 heads.sort()
992 return [n for (r, n) in heads]
992 return [n for (r, n) in heads]
993
993
994 def branches(self, nodes):
994 def branches(self, nodes):
995 if not nodes:
995 if not nodes:
996 nodes = [self.changelog.tip()]
996 nodes = [self.changelog.tip()]
997 b = []
997 b = []
998 for n in nodes:
998 for n in nodes:
999 t = n
999 t = n
1000 while 1:
1000 while 1:
1001 p = self.changelog.parents(n)
1001 p = self.changelog.parents(n)
1002 if p[1] != nullid or p[0] == nullid:
1002 if p[1] != nullid or p[0] == nullid:
1003 b.append((t, n, p[0], p[1]))
1003 b.append((t, n, p[0], p[1]))
1004 break
1004 break
1005 n = p[0]
1005 n = p[0]
1006 return b
1006 return b
1007
1007
1008 def between(self, pairs):
1008 def between(self, pairs):
1009 r = []
1009 r = []
1010
1010
1011 for top, bottom in pairs:
1011 for top, bottom in pairs:
1012 n, l, i = top, [], 0
1012 n, l, i = top, [], 0
1013 f = 1
1013 f = 1
1014
1014
1015 while n != bottom:
1015 while n != bottom:
1016 p = self.changelog.parents(n)[0]
1016 p = self.changelog.parents(n)[0]
1017 if i == f:
1017 if i == f:
1018 l.append(n)
1018 l.append(n)
1019 f = f * 2
1019 f = f * 2
1020 n = p
1020 n = p
1021 i += 1
1021 i += 1
1022
1022
1023 r.append(l)
1023 r.append(l)
1024
1024
1025 return r
1025 return r
1026
1026
1027 def findincoming(self, remote, base=None, heads=None, force=False):
1027 def findincoming(self, remote, base=None, heads=None, force=False):
1028 """Return list of roots of the subsets of missing nodes from remote
1028 """Return list of roots of the subsets of missing nodes from remote
1029
1029
1030 If base dict is specified, assume that these nodes and their parents
1030 If base dict is specified, assume that these nodes and their parents
1031 exist on the remote side and that no child of a node of base exists
1031 exist on the remote side and that no child of a node of base exists
1032 in both remote and self.
1032 in both remote and self.
1033 Furthermore base will be updated to include the nodes that exists
1033 Furthermore base will be updated to include the nodes that exists
1034 in self and remote but no children exists in self and remote.
1034 in self and remote but no children exists in self and remote.
1035 If a list of heads is specified, return only nodes which are heads
1035 If a list of heads is specified, return only nodes which are heads
1036 or ancestors of these heads.
1036 or ancestors of these heads.
1037
1037
1038 All the ancestors of base are in self and in remote.
1038 All the ancestors of base are in self and in remote.
1039 All the descendants of the list returned are missing in self.
1039 All the descendants of the list returned are missing in self.
1040 (and so we know that the rest of the nodes are missing in remote, see
1040 (and so we know that the rest of the nodes are missing in remote, see
1041 outgoing)
1041 outgoing)
1042 """
1042 """
1043 m = self.changelog.nodemap
1043 m = self.changelog.nodemap
1044 search = []
1044 search = []
1045 fetch = {}
1045 fetch = {}
1046 seen = {}
1046 seen = {}
1047 seenbranch = {}
1047 seenbranch = {}
1048 if base == None:
1048 if base == None:
1049 base = {}
1049 base = {}
1050
1050
1051 if not heads:
1051 if not heads:
1052 heads = remote.heads()
1052 heads = remote.heads()
1053
1053
1054 if self.changelog.tip() == nullid:
1054 if self.changelog.tip() == nullid:
1055 base[nullid] = 1
1055 base[nullid] = 1
1056 if heads != [nullid]:
1056 if heads != [nullid]:
1057 return [nullid]
1057 return [nullid]
1058 return []
1058 return []
1059
1059
1060 # assume we're closer to the tip than the root
1060 # assume we're closer to the tip than the root
1061 # and start by examining the heads
1061 # and start by examining the heads
1062 self.ui.status(_("searching for changes\n"))
1062 self.ui.status(_("searching for changes\n"))
1063
1063
1064 unknown = []
1064 unknown = []
1065 for h in heads:
1065 for h in heads:
1066 if h not in m:
1066 if h not in m:
1067 unknown.append(h)
1067 unknown.append(h)
1068 else:
1068 else:
1069 base[h] = 1
1069 base[h] = 1
1070
1070
1071 if not unknown:
1071 if not unknown:
1072 return []
1072 return []
1073
1073
1074 req = dict.fromkeys(unknown)
1074 req = dict.fromkeys(unknown)
1075 reqcnt = 0
1075 reqcnt = 0
1076
1076
1077 # search through remote branches
1077 # search through remote branches
1078 # a 'branch' here is a linear segment of history, with four parts:
1078 # a 'branch' here is a linear segment of history, with four parts:
1079 # head, root, first parent, second parent
1079 # head, root, first parent, second parent
1080 # (a branch always has two parents (or none) by definition)
1080 # (a branch always has two parents (or none) by definition)
1081 unknown = remote.branches(unknown)
1081 unknown = remote.branches(unknown)
1082 while unknown:
1082 while unknown:
1083 r = []
1083 r = []
1084 while unknown:
1084 while unknown:
1085 n = unknown.pop(0)
1085 n = unknown.pop(0)
1086 if n[0] in seen:
1086 if n[0] in seen:
1087 continue
1087 continue
1088
1088
1089 self.ui.debug(_("examining %s:%s\n")
1089 self.ui.debug(_("examining %s:%s\n")
1090 % (short(n[0]), short(n[1])))
1090 % (short(n[0]), short(n[1])))
1091 if n[0] == nullid: # found the end of the branch
1091 if n[0] == nullid: # found the end of the branch
1092 pass
1092 pass
1093 elif n in seenbranch:
1093 elif n in seenbranch:
1094 self.ui.debug(_("branch already found\n"))
1094 self.ui.debug(_("branch already found\n"))
1095 continue
1095 continue
1096 elif n[1] and n[1] in m: # do we know the base?
1096 elif n[1] and n[1] in m: # do we know the base?
1097 self.ui.debug(_("found incomplete branch %s:%s\n")
1097 self.ui.debug(_("found incomplete branch %s:%s\n")
1098 % (short(n[0]), short(n[1])))
1098 % (short(n[0]), short(n[1])))
1099 search.append(n) # schedule branch range for scanning
1099 search.append(n) # schedule branch range for scanning
1100 seenbranch[n] = 1
1100 seenbranch[n] = 1
1101 else:
1101 else:
1102 if n[1] not in seen and n[1] not in fetch:
1102 if n[1] not in seen and n[1] not in fetch:
1103 if n[2] in m and n[3] in m:
1103 if n[2] in m and n[3] in m:
1104 self.ui.debug(_("found new changeset %s\n") %
1104 self.ui.debug(_("found new changeset %s\n") %
1105 short(n[1]))
1105 short(n[1]))
1106 fetch[n[1]] = 1 # earliest unknown
1106 fetch[n[1]] = 1 # earliest unknown
1107 for p in n[2:4]:
1107 for p in n[2:4]:
1108 if p in m:
1108 if p in m:
1109 base[p] = 1 # latest known
1109 base[p] = 1 # latest known
1110
1110
1111 for p in n[2:4]:
1111 for p in n[2:4]:
1112 if p not in req and p not in m:
1112 if p not in req and p not in m:
1113 r.append(p)
1113 r.append(p)
1114 req[p] = 1
1114 req[p] = 1
1115 seen[n[0]] = 1
1115 seen[n[0]] = 1
1116
1116
1117 if r:
1117 if r:
1118 reqcnt += 1
1118 reqcnt += 1
1119 self.ui.debug(_("request %d: %s\n") %
1119 self.ui.debug(_("request %d: %s\n") %
1120 (reqcnt, " ".join(map(short, r))))
1120 (reqcnt, " ".join(map(short, r))))
1121 for p in xrange(0, len(r), 10):
1121 for p in xrange(0, len(r), 10):
1122 for b in remote.branches(r[p:p+10]):
1122 for b in remote.branches(r[p:p+10]):
1123 self.ui.debug(_("received %s:%s\n") %
1123 self.ui.debug(_("received %s:%s\n") %
1124 (short(b[0]), short(b[1])))
1124 (short(b[0]), short(b[1])))
1125 unknown.append(b)
1125 unknown.append(b)
1126
1126
1127 # do binary search on the branches we found
1127 # do binary search on the branches we found
1128 while search:
1128 while search:
1129 n = search.pop(0)
1129 n = search.pop(0)
1130 reqcnt += 1
1130 reqcnt += 1
1131 l = remote.between([(n[0], n[1])])[0]
1131 l = remote.between([(n[0], n[1])])[0]
1132 l.append(n[1])
1132 l.append(n[1])
1133 p = n[0]
1133 p = n[0]
1134 f = 1
1134 f = 1
1135 for i in l:
1135 for i in l:
1136 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1136 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1137 if i in m:
1137 if i in m:
1138 if f <= 2:
1138 if f <= 2:
1139 self.ui.debug(_("found new branch changeset %s\n") %
1139 self.ui.debug(_("found new branch changeset %s\n") %
1140 short(p))
1140 short(p))
1141 fetch[p] = 1
1141 fetch[p] = 1
1142 base[i] = 1
1142 base[i] = 1
1143 else:
1143 else:
1144 self.ui.debug(_("narrowed branch search to %s:%s\n")
1144 self.ui.debug(_("narrowed branch search to %s:%s\n")
1145 % (short(p), short(i)))
1145 % (short(p), short(i)))
1146 search.append((p, i))
1146 search.append((p, i))
1147 break
1147 break
1148 p, f = i, f * 2
1148 p, f = i, f * 2
1149
1149
1150 # sanity check our fetch list
1150 # sanity check our fetch list
1151 for f in fetch.keys():
1151 for f in fetch.keys():
1152 if f in m:
1152 if f in m:
1153 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1153 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1154
1154
1155 if base.keys() == [nullid]:
1155 if base.keys() == [nullid]:
1156 if force:
1156 if force:
1157 self.ui.warn(_("warning: repository is unrelated\n"))
1157 self.ui.warn(_("warning: repository is unrelated\n"))
1158 else:
1158 else:
1159 raise util.Abort(_("repository is unrelated"))
1159 raise util.Abort(_("repository is unrelated"))
1160
1160
1161 self.ui.debug(_("found new changesets starting at ") +
1161 self.ui.debug(_("found new changesets starting at ") +
1162 " ".join([short(f) for f in fetch]) + "\n")
1162 " ".join([short(f) for f in fetch]) + "\n")
1163
1163
1164 self.ui.debug(_("%d total queries\n") % reqcnt)
1164 self.ui.debug(_("%d total queries\n") % reqcnt)
1165
1165
1166 return fetch.keys()
1166 return fetch.keys()
1167
1167
1168 def findoutgoing(self, remote, base=None, heads=None, force=False):
1168 def findoutgoing(self, remote, base=None, heads=None, force=False):
1169 """Return list of nodes that are roots of subsets not in remote
1169 """Return list of nodes that are roots of subsets not in remote
1170
1170
1171 If base dict is specified, assume that these nodes and their parents
1171 If base dict is specified, assume that these nodes and their parents
1172 exist on the remote side.
1172 exist on the remote side.
1173 If a list of heads is specified, return only nodes which are heads
1173 If a list of heads is specified, return only nodes which are heads
1174 or ancestors of these heads, and return a second element which
1174 or ancestors of these heads, and return a second element which
1175 contains all remote heads which get new children.
1175 contains all remote heads which get new children.
1176 """
1176 """
1177 if base == None:
1177 if base == None:
1178 base = {}
1178 base = {}
1179 self.findincoming(remote, base, heads, force=force)
1179 self.findincoming(remote, base, heads, force=force)
1180
1180
1181 self.ui.debug(_("common changesets up to ")
1181 self.ui.debug(_("common changesets up to ")
1182 + " ".join(map(short, base.keys())) + "\n")
1182 + " ".join(map(short, base.keys())) + "\n")
1183
1183
1184 remain = dict.fromkeys(self.changelog.nodemap)
1184 remain = dict.fromkeys(self.changelog.nodemap)
1185
1185
1186 # prune everything remote has from the tree
1186 # prune everything remote has from the tree
1187 del remain[nullid]
1187 del remain[nullid]
1188 remove = base.keys()
1188 remove = base.keys()
1189 while remove:
1189 while remove:
1190 n = remove.pop(0)
1190 n = remove.pop(0)
1191 if n in remain:
1191 if n in remain:
1192 del remain[n]
1192 del remain[n]
1193 for p in self.changelog.parents(n):
1193 for p in self.changelog.parents(n):
1194 remove.append(p)
1194 remove.append(p)
1195
1195
1196 # find every node whose parents have been pruned
1196 # find every node whose parents have been pruned
1197 subset = []
1197 subset = []
1198 # find every remote head that will get new children
1198 # find every remote head that will get new children
1199 updated_heads = {}
1199 updated_heads = {}
1200 for n in remain:
1200 for n in remain:
1201 p1, p2 = self.changelog.parents(n)
1201 p1, p2 = self.changelog.parents(n)
1202 if p1 not in remain and p2 not in remain:
1202 if p1 not in remain and p2 not in remain:
1203 subset.append(n)
1203 subset.append(n)
1204 if heads:
1204 if heads:
1205 if p1 in heads:
1205 if p1 in heads:
1206 updated_heads[p1] = True
1206 updated_heads[p1] = True
1207 if p2 in heads:
1207 if p2 in heads:
1208 updated_heads[p2] = True
1208 updated_heads[p2] = True
1209
1209
1210 # this is the set of all roots we have to push
1210 # this is the set of all roots we have to push
1211 if heads:
1211 if heads:
1212 return subset, updated_heads.keys()
1212 return subset, updated_heads.keys()
1213 else:
1213 else:
1214 return subset
1214 return subset
1215
1215
1216 def pull(self, remote, heads=None, force=False, lock=None):
1216 def pull(self, remote, heads=None, force=False, lock=None):
1217 mylock = False
1217 mylock = False
1218 if not lock:
1218 if not lock:
1219 lock = self.lock()
1219 lock = self.lock()
1220 mylock = True
1220 mylock = True
1221
1221
1222 try:
1222 try:
1223 fetch = self.findincoming(remote, force=force)
1223 fetch = self.findincoming(remote, force=force)
1224 if fetch == [nullid]:
1224 if fetch == [nullid]:
1225 self.ui.status(_("requesting all changes\n"))
1225 self.ui.status(_("requesting all changes\n"))
1226
1226
1227 if not fetch:
1227 if not fetch:
1228 self.ui.status(_("no changes found\n"))
1228 self.ui.status(_("no changes found\n"))
1229 return 0
1229 return 0
1230
1230
1231 if heads is None:
1231 if heads is None:
1232 cg = remote.changegroup(fetch, 'pull')
1232 cg = remote.changegroup(fetch, 'pull')
1233 else:
1233 else:
1234 if 'changegroupsubset' not in remote.capabilities:
1234 if 'changegroupsubset' not in remote.capabilities:
1235 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1235 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1236 cg = remote.changegroupsubset(fetch, heads, 'pull')
1236 cg = remote.changegroupsubset(fetch, heads, 'pull')
1237 return self.addchangegroup(cg, 'pull', remote.url())
1237 return self.addchangegroup(cg, 'pull', remote.url())
1238 finally:
1238 finally:
1239 if mylock:
1239 if mylock:
1240 lock.release()
1240 lock.release()
1241
1241
1242 def push(self, remote, force=False, revs=None):
1242 def push(self, remote, force=False, revs=None):
1243 # there are two ways to push to remote repo:
1243 # there are two ways to push to remote repo:
1244 #
1244 #
1245 # addchangegroup assumes local user can lock remote
1245 # addchangegroup assumes local user can lock remote
1246 # repo (local filesystem, old ssh servers).
1246 # repo (local filesystem, old ssh servers).
1247 #
1247 #
1248 # unbundle assumes local user cannot lock remote repo (new ssh
1248 # unbundle assumes local user cannot lock remote repo (new ssh
1249 # servers, http servers).
1249 # servers, http servers).
1250
1250
1251 if remote.capable('unbundle'):
1251 if remote.capable('unbundle'):
1252 return self.push_unbundle(remote, force, revs)
1252 return self.push_unbundle(remote, force, revs)
1253 return self.push_addchangegroup(remote, force, revs)
1253 return self.push_addchangegroup(remote, force, revs)
1254
1254
1255 def prepush(self, remote, force, revs):
1255 def prepush(self, remote, force, revs):
1256 base = {}
1256 base = {}
1257 remote_heads = remote.heads()
1257 remote_heads = remote.heads()
1258 inc = self.findincoming(remote, base, remote_heads, force=force)
1258 inc = self.findincoming(remote, base, remote_heads, force=force)
1259
1259
1260 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1260 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1261 if revs is not None:
1261 if revs is not None:
1262 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1262 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1263 else:
1263 else:
1264 bases, heads = update, self.changelog.heads()
1264 bases, heads = update, self.changelog.heads()
1265
1265
1266 if not bases:
1266 if not bases:
1267 self.ui.status(_("no changes found\n"))
1267 self.ui.status(_("no changes found\n"))
1268 return None, 1
1268 return None, 1
1269 elif not force:
1269 elif not force:
1270 # check if we're creating new remote heads
1270 # check if we're creating new remote heads
1271 # to be a remote head after push, node must be either
1271 # to be a remote head after push, node must be either
1272 # - unknown locally
1272 # - unknown locally
1273 # - a local outgoing head descended from update
1273 # - a local outgoing head descended from update
1274 # - a remote head that's known locally and not
1274 # - a remote head that's known locally and not
1275 # ancestral to an outgoing head
1275 # ancestral to an outgoing head
1276
1276
1277 warn = 0
1277 warn = 0
1278
1278
1279 if remote_heads == [nullid]:
1279 if remote_heads == [nullid]:
1280 warn = 0
1280 warn = 0
1281 elif not revs and len(heads) > len(remote_heads):
1281 elif not revs and len(heads) > len(remote_heads):
1282 warn = 1
1282 warn = 1
1283 else:
1283 else:
1284 newheads = list(heads)
1284 newheads = list(heads)
1285 for r in remote_heads:
1285 for r in remote_heads:
1286 if r in self.changelog.nodemap:
1286 if r in self.changelog.nodemap:
1287 desc = self.changelog.heads(r, heads)
1287 desc = self.changelog.heads(r, heads)
1288 l = [h for h in heads if h in desc]
1288 l = [h for h in heads if h in desc]
1289 if not l:
1289 if not l:
1290 newheads.append(r)
1290 newheads.append(r)
1291 else:
1291 else:
1292 newheads.append(r)
1292 newheads.append(r)
1293 if len(newheads) > len(remote_heads):
1293 if len(newheads) > len(remote_heads):
1294 warn = 1
1294 warn = 1
1295
1295
1296 if warn:
1296 if warn:
1297 self.ui.warn(_("abort: push creates new remote branches!\n"))
1297 self.ui.warn(_("abort: push creates new remote branches!\n"))
1298 self.ui.status(_("(did you forget to merge?"
1298 self.ui.status(_("(did you forget to merge?"
1299 " use push -f to force)\n"))
1299 " use push -f to force)\n"))
1300 return None, 1
1300 return None, 1
1301 elif inc:
1301 elif inc:
1302 self.ui.warn(_("note: unsynced remote changes!\n"))
1302 self.ui.warn(_("note: unsynced remote changes!\n"))
1303
1303
1304
1304
1305 if revs is None:
1305 if revs is None:
1306 cg = self.changegroup(update, 'push')
1306 cg = self.changegroup(update, 'push')
1307 else:
1307 else:
1308 cg = self.changegroupsubset(update, revs, 'push')
1308 cg = self.changegroupsubset(update, revs, 'push')
1309 return cg, remote_heads
1309 return cg, remote_heads
1310
1310
1311 def push_addchangegroup(self, remote, force, revs):
1311 def push_addchangegroup(self, remote, force, revs):
1312 lock = remote.lock()
1312 lock = remote.lock()
1313
1313
1314 ret = self.prepush(remote, force, revs)
1314 ret = self.prepush(remote, force, revs)
1315 if ret[0] is not None:
1315 if ret[0] is not None:
1316 cg, remote_heads = ret
1316 cg, remote_heads = ret
1317 return remote.addchangegroup(cg, 'push', self.url())
1317 return remote.addchangegroup(cg, 'push', self.url())
1318 return ret[1]
1318 return ret[1]
1319
1319
1320 def push_unbundle(self, remote, force, revs):
1320 def push_unbundle(self, remote, force, revs):
1321 # local repo finds heads on server, finds out what revs it
1321 # local repo finds heads on server, finds out what revs it
1322 # must push. once revs transferred, if server finds it has
1322 # must push. once revs transferred, if server finds it has
1323 # different heads (someone else won commit/push race), server
1323 # different heads (someone else won commit/push race), server
1324 # aborts.
1324 # aborts.
1325
1325
1326 ret = self.prepush(remote, force, revs)
1326 ret = self.prepush(remote, force, revs)
1327 if ret[0] is not None:
1327 if ret[0] is not None:
1328 cg, remote_heads = ret
1328 cg, remote_heads = ret
1329 if force: remote_heads = ['force']
1329 if force: remote_heads = ['force']
1330 return remote.unbundle(cg, remote_heads, 'push')
1330 return remote.unbundle(cg, remote_heads, 'push')
1331 return ret[1]
1331 return ret[1]
1332
1332
1333 def changegroupinfo(self, nodes):
1333 def changegroupinfo(self, nodes):
1334 self.ui.note(_("%d changesets found\n") % len(nodes))
1334 self.ui.note(_("%d changesets found\n") % len(nodes))
1335 if self.ui.debugflag:
1335 if self.ui.debugflag:
1336 self.ui.debug(_("List of changesets:\n"))
1336 self.ui.debug(_("List of changesets:\n"))
1337 for node in nodes:
1337 for node in nodes:
1338 self.ui.debug("%s\n" % hex(node))
1338 self.ui.debug("%s\n" % hex(node))
1339
1339
1340 def changegroupsubset(self, bases, heads, source):
1340 def changegroupsubset(self, bases, heads, source):
1341 """This function generates a changegroup consisting of all the nodes
1341 """This function generates a changegroup consisting of all the nodes
1342 that are descendents of any of the bases, and ancestors of any of
1342 that are descendents of any of the bases, and ancestors of any of
1343 the heads.
1343 the heads.
1344
1344
1345 It is fairly complex as determining which filenodes and which
1345 It is fairly complex as determining which filenodes and which
1346 manifest nodes need to be included for the changeset to be complete
1346 manifest nodes need to be included for the changeset to be complete
1347 is non-trivial.
1347 is non-trivial.
1348
1348
1349 Another wrinkle is doing the reverse, figuring out which changeset in
1349 Another wrinkle is doing the reverse, figuring out which changeset in
1350 the changegroup a particular filenode or manifestnode belongs to."""
1350 the changegroup a particular filenode or manifestnode belongs to."""
1351
1351
1352 self.hook('preoutgoing', throw=True, source=source)
1352 self.hook('preoutgoing', throw=True, source=source)
1353
1353
1354 # Set up some initial variables
1354 # Set up some initial variables
1355 # Make it easy to refer to self.changelog
1355 # Make it easy to refer to self.changelog
1356 cl = self.changelog
1356 cl = self.changelog
1357 # msng is short for missing - compute the list of changesets in this
1357 # msng is short for missing - compute the list of changesets in this
1358 # changegroup.
1358 # changegroup.
1359 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1359 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1360 self.changegroupinfo(msng_cl_lst)
1360 self.changegroupinfo(msng_cl_lst)
1361 # Some bases may turn out to be superfluous, and some heads may be
1361 # Some bases may turn out to be superfluous, and some heads may be
1362 # too. nodesbetween will return the minimal set of bases and heads
1362 # too. nodesbetween will return the minimal set of bases and heads
1363 # necessary to re-create the changegroup.
1363 # necessary to re-create the changegroup.
1364
1364
1365 # Known heads are the list of heads that it is assumed the recipient
1365 # Known heads are the list of heads that it is assumed the recipient
1366 # of this changegroup will know about.
1366 # of this changegroup will know about.
1367 knownheads = {}
1367 knownheads = {}
1368 # We assume that all parents of bases are known heads.
1368 # We assume that all parents of bases are known heads.
1369 for n in bases:
1369 for n in bases:
1370 for p in cl.parents(n):
1370 for p in cl.parents(n):
1371 if p != nullid:
1371 if p != nullid:
1372 knownheads[p] = 1
1372 knownheads[p] = 1
1373 knownheads = knownheads.keys()
1373 knownheads = knownheads.keys()
1374 if knownheads:
1374 if knownheads:
1375 # Now that we know what heads are known, we can compute which
1375 # Now that we know what heads are known, we can compute which
1376 # changesets are known. The recipient must know about all
1376 # changesets are known. The recipient must know about all
1377 # changesets required to reach the known heads from the null
1377 # changesets required to reach the known heads from the null
1378 # changeset.
1378 # changeset.
1379 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1379 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1380 junk = None
1380 junk = None
1381 # Transform the list into an ersatz set.
1381 # Transform the list into an ersatz set.
1382 has_cl_set = dict.fromkeys(has_cl_set)
1382 has_cl_set = dict.fromkeys(has_cl_set)
1383 else:
1383 else:
1384 # If there were no known heads, the recipient cannot be assumed to
1384 # If there were no known heads, the recipient cannot be assumed to
1385 # know about any changesets.
1385 # know about any changesets.
1386 has_cl_set = {}
1386 has_cl_set = {}
1387
1387
1388 # Make it easy to refer to self.manifest
1388 # Make it easy to refer to self.manifest
1389 mnfst = self.manifest
1389 mnfst = self.manifest
1390 # We don't know which manifests are missing yet
1390 # We don't know which manifests are missing yet
1391 msng_mnfst_set = {}
1391 msng_mnfst_set = {}
1392 # Nor do we know which filenodes are missing.
1392 # Nor do we know which filenodes are missing.
1393 msng_filenode_set = {}
1393 msng_filenode_set = {}
1394
1394
1395 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1395 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1396 junk = None
1396 junk = None
1397
1397
1398 # A changeset always belongs to itself, so the changenode lookup
1398 # A changeset always belongs to itself, so the changenode lookup
1399 # function for a changenode is identity.
1399 # function for a changenode is identity.
1400 def identity(x):
1400 def identity(x):
1401 return x
1401 return x
1402
1402
1403 # A function generating function. Sets up an environment for the
1403 # A function generating function. Sets up an environment for the
1404 # inner function.
1404 # inner function.
1405 def cmp_by_rev_func(revlog):
1405 def cmp_by_rev_func(revlog):
1406 # Compare two nodes by their revision number in the environment's
1406 # Compare two nodes by their revision number in the environment's
1407 # revision history. Since the revision number both represents the
1407 # revision history. Since the revision number both represents the
1408 # most efficient order to read the nodes in, and represents a
1408 # most efficient order to read the nodes in, and represents a
1409 # topological sorting of the nodes, this function is often useful.
1409 # topological sorting of the nodes, this function is often useful.
1410 def cmp_by_rev(a, b):
1410 def cmp_by_rev(a, b):
1411 return cmp(revlog.rev(a), revlog.rev(b))
1411 return cmp(revlog.rev(a), revlog.rev(b))
1412 return cmp_by_rev
1412 return cmp_by_rev
1413
1413
1414 # If we determine that a particular file or manifest node must be a
1414 # If we determine that a particular file or manifest node must be a
1415 # node that the recipient of the changegroup will already have, we can
1415 # node that the recipient of the changegroup will already have, we can
1416 # also assume the recipient will have all the parents. This function
1416 # also assume the recipient will have all the parents. This function
1417 # prunes them from the set of missing nodes.
1417 # prunes them from the set of missing nodes.
1418 def prune_parents(revlog, hasset, msngset):
1418 def prune_parents(revlog, hasset, msngset):
1419 haslst = hasset.keys()
1419 haslst = hasset.keys()
1420 haslst.sort(cmp_by_rev_func(revlog))
1420 haslst.sort(cmp_by_rev_func(revlog))
1421 for node in haslst:
1421 for node in haslst:
1422 parentlst = [p for p in revlog.parents(node) if p != nullid]
1422 parentlst = [p for p in revlog.parents(node) if p != nullid]
1423 while parentlst:
1423 while parentlst:
1424 n = parentlst.pop()
1424 n = parentlst.pop()
1425 if n not in hasset:
1425 if n not in hasset:
1426 hasset[n] = 1
1426 hasset[n] = 1
1427 p = [p for p in revlog.parents(n) if p != nullid]
1427 p = [p for p in revlog.parents(n) if p != nullid]
1428 parentlst.extend(p)
1428 parentlst.extend(p)
1429 for n in hasset:
1429 for n in hasset:
1430 msngset.pop(n, None)
1430 msngset.pop(n, None)
1431
1431
1432 # This is a function generating function used to set up an environment
1432 # This is a function generating function used to set up an environment
1433 # for the inner function to execute in.
1433 # for the inner function to execute in.
1434 def manifest_and_file_collector(changedfileset):
1434 def manifest_and_file_collector(changedfileset):
1435 # This is an information gathering function that gathers
1435 # This is an information gathering function that gathers
1436 # information from each changeset node that goes out as part of
1436 # information from each changeset node that goes out as part of
1437 # the changegroup. The information gathered is a list of which
1437 # the changegroup. The information gathered is a list of which
1438 # manifest nodes are potentially required (the recipient may
1438 # manifest nodes are potentially required (the recipient may
1439 # already have them) and total list of all files which were
1439 # already have them) and total list of all files which were
1440 # changed in any changeset in the changegroup.
1440 # changed in any changeset in the changegroup.
1441 #
1441 #
1442 # We also remember the first changenode we saw any manifest
1442 # We also remember the first changenode we saw any manifest
1443 # referenced by so we can later determine which changenode 'owns'
1443 # referenced by so we can later determine which changenode 'owns'
1444 # the manifest.
1444 # the manifest.
1445 def collect_manifests_and_files(clnode):
1445 def collect_manifests_and_files(clnode):
1446 c = cl.read(clnode)
1446 c = cl.read(clnode)
1447 for f in c[3]:
1447 for f in c[3]:
1448 # This is to make sure we only have one instance of each
1448 # This is to make sure we only have one instance of each
1449 # filename string for each filename.
1449 # filename string for each filename.
1450 changedfileset.setdefault(f, f)
1450 changedfileset.setdefault(f, f)
1451 msng_mnfst_set.setdefault(c[0], clnode)
1451 msng_mnfst_set.setdefault(c[0], clnode)
1452 return collect_manifests_and_files
1452 return collect_manifests_and_files
1453
1453
1454 # Figure out which manifest nodes (of the ones we think might be part
1454 # Figure out which manifest nodes (of the ones we think might be part
1455 # of the changegroup) the recipient must know about and remove them
1455 # of the changegroup) the recipient must know about and remove them
1456 # from the changegroup.
1456 # from the changegroup.
1457 def prune_manifests():
1457 def prune_manifests():
1458 has_mnfst_set = {}
1458 has_mnfst_set = {}
1459 for n in msng_mnfst_set:
1459 for n in msng_mnfst_set:
1460 # If a 'missing' manifest thinks it belongs to a changenode
1460 # If a 'missing' manifest thinks it belongs to a changenode
1461 # the recipient is assumed to have, obviously the recipient
1461 # the recipient is assumed to have, obviously the recipient
1462 # must have that manifest.
1462 # must have that manifest.
1463 linknode = cl.node(mnfst.linkrev(n))
1463 linknode = cl.node(mnfst.linkrev(n))
1464 if linknode in has_cl_set:
1464 if linknode in has_cl_set:
1465 has_mnfst_set[n] = 1
1465 has_mnfst_set[n] = 1
1466 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1466 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1467
1467
1468 # Use the information collected in collect_manifests_and_files to say
1468 # Use the information collected in collect_manifests_and_files to say
1469 # which changenode any manifestnode belongs to.
1469 # which changenode any manifestnode belongs to.
1470 def lookup_manifest_link(mnfstnode):
1470 def lookup_manifest_link(mnfstnode):
1471 return msng_mnfst_set[mnfstnode]
1471 return msng_mnfst_set[mnfstnode]
1472
1472
1473 # A function generating function that sets up the initial environment
1473 # A function generating function that sets up the initial environment
1474 # the inner function.
1474 # the inner function.
1475 def filenode_collector(changedfiles):
1475 def filenode_collector(changedfiles):
1476 next_rev = [0]
1476 next_rev = [0]
1477 # This gathers information from each manifestnode included in the
1477 # This gathers information from each manifestnode included in the
1478 # changegroup about which filenodes the manifest node references
1478 # changegroup about which filenodes the manifest node references
1479 # so we can include those in the changegroup too.
1479 # so we can include those in the changegroup too.
1480 #
1480 #
1481 # It also remembers which changenode each filenode belongs to. It
1481 # It also remembers which changenode each filenode belongs to. It
1482 # does this by assuming the a filenode belongs to the changenode
1482 # does this by assuming the a filenode belongs to the changenode
1483 # the first manifest that references it belongs to.
1483 # the first manifest that references it belongs to.
1484 def collect_msng_filenodes(mnfstnode):
1484 def collect_msng_filenodes(mnfstnode):
1485 r = mnfst.rev(mnfstnode)
1485 r = mnfst.rev(mnfstnode)
1486 if r == next_rev[0]:
1486 if r == next_rev[0]:
1487 # If the last rev we looked at was the one just previous,
1487 # If the last rev we looked at was the one just previous,
1488 # we only need to see a diff.
1488 # we only need to see a diff.
1489 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1489 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1490 # For each line in the delta
1490 # For each line in the delta
1491 for dline in delta.splitlines():
1491 for dline in delta.splitlines():
1492 # get the filename and filenode for that line
1492 # get the filename and filenode for that line
1493 f, fnode = dline.split('\0')
1493 f, fnode = dline.split('\0')
1494 fnode = bin(fnode[:40])
1494 fnode = bin(fnode[:40])
1495 f = changedfiles.get(f, None)
1495 f = changedfiles.get(f, None)
1496 # And if the file is in the list of files we care
1496 # And if the file is in the list of files we care
1497 # about.
1497 # about.
1498 if f is not None:
1498 if f is not None:
1499 # Get the changenode this manifest belongs to
1499 # Get the changenode this manifest belongs to
1500 clnode = msng_mnfst_set[mnfstnode]
1500 clnode = msng_mnfst_set[mnfstnode]
1501 # Create the set of filenodes for the file if
1501 # Create the set of filenodes for the file if
1502 # there isn't one already.
1502 # there isn't one already.
1503 ndset = msng_filenode_set.setdefault(f, {})
1503 ndset = msng_filenode_set.setdefault(f, {})
1504 # And set the filenode's changelog node to the
1504 # And set the filenode's changelog node to the
1505 # manifest's if it hasn't been set already.
1505 # manifest's if it hasn't been set already.
1506 ndset.setdefault(fnode, clnode)
1506 ndset.setdefault(fnode, clnode)
1507 else:
1507 else:
1508 # Otherwise we need a full manifest.
1508 # Otherwise we need a full manifest.
1509 m = mnfst.read(mnfstnode)
1509 m = mnfst.read(mnfstnode)
1510 # For every file in we care about.
1510 # For every file in we care about.
1511 for f in changedfiles:
1511 for f in changedfiles:
1512 fnode = m.get(f, None)
1512 fnode = m.get(f, None)
1513 # If it's in the manifest
1513 # If it's in the manifest
1514 if fnode is not None:
1514 if fnode is not None:
1515 # See comments above.
1515 # See comments above.
1516 clnode = msng_mnfst_set[mnfstnode]
1516 clnode = msng_mnfst_set[mnfstnode]
1517 ndset = msng_filenode_set.setdefault(f, {})
1517 ndset = msng_filenode_set.setdefault(f, {})
1518 ndset.setdefault(fnode, clnode)
1518 ndset.setdefault(fnode, clnode)
1519 # Remember the revision we hope to see next.
1519 # Remember the revision we hope to see next.
1520 next_rev[0] = r + 1
1520 next_rev[0] = r + 1
1521 return collect_msng_filenodes
1521 return collect_msng_filenodes
1522
1522
1523 # We have a list of filenodes we think we need for a file, lets remove
1523 # We have a list of filenodes we think we need for a file, lets remove
1524 # all those we now the recipient must have.
1524 # all those we now the recipient must have.
1525 def prune_filenodes(f, filerevlog):
1525 def prune_filenodes(f, filerevlog):
1526 msngset = msng_filenode_set[f]
1526 msngset = msng_filenode_set[f]
1527 hasset = {}
1527 hasset = {}
1528 # If a 'missing' filenode thinks it belongs to a changenode we
1528 # If a 'missing' filenode thinks it belongs to a changenode we
1529 # assume the recipient must have, then the recipient must have
1529 # assume the recipient must have, then the recipient must have
1530 # that filenode.
1530 # that filenode.
1531 for n in msngset:
1531 for n in msngset:
1532 clnode = cl.node(filerevlog.linkrev(n))
1532 clnode = cl.node(filerevlog.linkrev(n))
1533 if clnode in has_cl_set:
1533 if clnode in has_cl_set:
1534 hasset[n] = 1
1534 hasset[n] = 1
1535 prune_parents(filerevlog, hasset, msngset)
1535 prune_parents(filerevlog, hasset, msngset)
1536
1536
1537 # A function generator function that sets up the a context for the
1537 # A function generator function that sets up the a context for the
1538 # inner function.
1538 # inner function.
1539 def lookup_filenode_link_func(fname):
1539 def lookup_filenode_link_func(fname):
1540 msngset = msng_filenode_set[fname]
1540 msngset = msng_filenode_set[fname]
1541 # Lookup the changenode the filenode belongs to.
1541 # Lookup the changenode the filenode belongs to.
1542 def lookup_filenode_link(fnode):
1542 def lookup_filenode_link(fnode):
1543 return msngset[fnode]
1543 return msngset[fnode]
1544 return lookup_filenode_link
1544 return lookup_filenode_link
1545
1545
1546 # Now that we have all theses utility functions to help out and
1546 # Now that we have all theses utility functions to help out and
1547 # logically divide up the task, generate the group.
1547 # logically divide up the task, generate the group.
1548 def gengroup():
1548 def gengroup():
1549 # The set of changed files starts empty.
1549 # The set of changed files starts empty.
1550 changedfiles = {}
1550 changedfiles = {}
1551 # Create a changenode group generator that will call our functions
1551 # Create a changenode group generator that will call our functions
1552 # back to lookup the owning changenode and collect information.
1552 # back to lookup the owning changenode and collect information.
1553 group = cl.group(msng_cl_lst, identity,
1553 group = cl.group(msng_cl_lst, identity,
1554 manifest_and_file_collector(changedfiles))
1554 manifest_and_file_collector(changedfiles))
1555 for chnk in group:
1555 for chnk in group:
1556 yield chnk
1556 yield chnk
1557
1557
1558 # The list of manifests has been collected by the generator
1558 # The list of manifests has been collected by the generator
1559 # calling our functions back.
1559 # calling our functions back.
1560 prune_manifests()
1560 prune_manifests()
1561 msng_mnfst_lst = msng_mnfst_set.keys()
1561 msng_mnfst_lst = msng_mnfst_set.keys()
1562 # Sort the manifestnodes by revision number.
1562 # Sort the manifestnodes by revision number.
1563 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1563 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1564 # Create a generator for the manifestnodes that calls our lookup
1564 # Create a generator for the manifestnodes that calls our lookup
1565 # and data collection functions back.
1565 # and data collection functions back.
1566 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1566 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1567 filenode_collector(changedfiles))
1567 filenode_collector(changedfiles))
1568 for chnk in group:
1568 for chnk in group:
1569 yield chnk
1569 yield chnk
1570
1570
1571 # These are no longer needed, dereference and toss the memory for
1571 # These are no longer needed, dereference and toss the memory for
1572 # them.
1572 # them.
1573 msng_mnfst_lst = None
1573 msng_mnfst_lst = None
1574 msng_mnfst_set.clear()
1574 msng_mnfst_set.clear()
1575
1575
1576 changedfiles = changedfiles.keys()
1576 changedfiles = changedfiles.keys()
1577 changedfiles.sort()
1577 changedfiles.sort()
1578 # Go through all our files in order sorted by name.
1578 # Go through all our files in order sorted by name.
1579 for fname in changedfiles:
1579 for fname in changedfiles:
1580 filerevlog = self.file(fname)
1580 filerevlog = self.file(fname)
1581 # Toss out the filenodes that the recipient isn't really
1581 # Toss out the filenodes that the recipient isn't really
1582 # missing.
1582 # missing.
1583 if msng_filenode_set.has_key(fname):
1583 if msng_filenode_set.has_key(fname):
1584 prune_filenodes(fname, filerevlog)
1584 prune_filenodes(fname, filerevlog)
1585 msng_filenode_lst = msng_filenode_set[fname].keys()
1585 msng_filenode_lst = msng_filenode_set[fname].keys()
1586 else:
1586 else:
1587 msng_filenode_lst = []
1587 msng_filenode_lst = []
1588 # If any filenodes are left, generate the group for them,
1588 # If any filenodes are left, generate the group for them,
1589 # otherwise don't bother.
1589 # otherwise don't bother.
1590 if len(msng_filenode_lst) > 0:
1590 if len(msng_filenode_lst) > 0:
1591 yield changegroup.genchunk(fname)
1591 yield changegroup.genchunk(fname)
1592 # Sort the filenodes by their revision #
1592 # Sort the filenodes by their revision #
1593 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1593 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1594 # Create a group generator and only pass in a changenode
1594 # Create a group generator and only pass in a changenode
1595 # lookup function as we need to collect no information
1595 # lookup function as we need to collect no information
1596 # from filenodes.
1596 # from filenodes.
1597 group = filerevlog.group(msng_filenode_lst,
1597 group = filerevlog.group(msng_filenode_lst,
1598 lookup_filenode_link_func(fname))
1598 lookup_filenode_link_func(fname))
1599 for chnk in group:
1599 for chnk in group:
1600 yield chnk
1600 yield chnk
1601 if msng_filenode_set.has_key(fname):
1601 if msng_filenode_set.has_key(fname):
1602 # Don't need this anymore, toss it to free memory.
1602 # Don't need this anymore, toss it to free memory.
1603 del msng_filenode_set[fname]
1603 del msng_filenode_set[fname]
1604 # Signal that no more groups are left.
1604 # Signal that no more groups are left.
1605 yield changegroup.closechunk()
1605 yield changegroup.closechunk()
1606
1606
1607 if msng_cl_lst:
1607 if msng_cl_lst:
1608 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1608 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1609
1609
1610 return util.chunkbuffer(gengroup())
1610 return util.chunkbuffer(gengroup())
1611
1611
1612 def changegroup(self, basenodes, source):
1612 def changegroup(self, basenodes, source):
1613 """Generate a changegroup of all nodes that we have that a recipient
1613 """Generate a changegroup of all nodes that we have that a recipient
1614 doesn't.
1614 doesn't.
1615
1615
1616 This is much easier than the previous function as we can assume that
1616 This is much easier than the previous function as we can assume that
1617 the recipient has any changenode we aren't sending them."""
1617 the recipient has any changenode we aren't sending them."""
1618
1618
1619 self.hook('preoutgoing', throw=True, source=source)
1619 self.hook('preoutgoing', throw=True, source=source)
1620
1620
1621 cl = self.changelog
1621 cl = self.changelog
1622 nodes = cl.nodesbetween(basenodes, None)[0]
1622 nodes = cl.nodesbetween(basenodes, None)[0]
1623 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1623 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1624 self.changegroupinfo(nodes)
1624 self.changegroupinfo(nodes)
1625
1625
1626 def identity(x):
1626 def identity(x):
1627 return x
1627 return x
1628
1628
1629 def gennodelst(revlog):
1629 def gennodelst(revlog):
1630 for r in xrange(0, revlog.count()):
1630 for r in xrange(0, revlog.count()):
1631 n = revlog.node(r)
1631 n = revlog.node(r)
1632 if revlog.linkrev(n) in revset:
1632 if revlog.linkrev(n) in revset:
1633 yield n
1633 yield n
1634
1634
1635 def changed_file_collector(changedfileset):
1635 def changed_file_collector(changedfileset):
1636 def collect_changed_files(clnode):
1636 def collect_changed_files(clnode):
1637 c = cl.read(clnode)
1637 c = cl.read(clnode)
1638 for fname in c[3]:
1638 for fname in c[3]:
1639 changedfileset[fname] = 1
1639 changedfileset[fname] = 1
1640 return collect_changed_files
1640 return collect_changed_files
1641
1641
1642 def lookuprevlink_func(revlog):
1642 def lookuprevlink_func(revlog):
1643 def lookuprevlink(n):
1643 def lookuprevlink(n):
1644 return cl.node(revlog.linkrev(n))
1644 return cl.node(revlog.linkrev(n))
1645 return lookuprevlink
1645 return lookuprevlink
1646
1646
1647 def gengroup():
1647 def gengroup():
1648 # construct a list of all changed files
1648 # construct a list of all changed files
1649 changedfiles = {}
1649 changedfiles = {}
1650
1650
1651 for chnk in cl.group(nodes, identity,
1651 for chnk in cl.group(nodes, identity,
1652 changed_file_collector(changedfiles)):
1652 changed_file_collector(changedfiles)):
1653 yield chnk
1653 yield chnk
1654 changedfiles = changedfiles.keys()
1654 changedfiles = changedfiles.keys()
1655 changedfiles.sort()
1655 changedfiles.sort()
1656
1656
1657 mnfst = self.manifest
1657 mnfst = self.manifest
1658 nodeiter = gennodelst(mnfst)
1658 nodeiter = gennodelst(mnfst)
1659 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1659 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1660 yield chnk
1660 yield chnk
1661
1661
1662 for fname in changedfiles:
1662 for fname in changedfiles:
1663 filerevlog = self.file(fname)
1663 filerevlog = self.file(fname)
1664 nodeiter = gennodelst(filerevlog)
1664 nodeiter = gennodelst(filerevlog)
1665 nodeiter = list(nodeiter)
1665 nodeiter = list(nodeiter)
1666 if nodeiter:
1666 if nodeiter:
1667 yield changegroup.genchunk(fname)
1667 yield changegroup.genchunk(fname)
1668 lookup = lookuprevlink_func(filerevlog)
1668 lookup = lookuprevlink_func(filerevlog)
1669 for chnk in filerevlog.group(nodeiter, lookup):
1669 for chnk in filerevlog.group(nodeiter, lookup):
1670 yield chnk
1670 yield chnk
1671
1671
1672 yield changegroup.closechunk()
1672 yield changegroup.closechunk()
1673
1673
1674 if nodes:
1674 if nodes:
1675 self.hook('outgoing', node=hex(nodes[0]), source=source)
1675 self.hook('outgoing', node=hex(nodes[0]), source=source)
1676
1676
1677 return util.chunkbuffer(gengroup())
1677 return util.chunkbuffer(gengroup())
1678
1678
1679 def addchangegroup(self, source, srctype, url):
1679 def addchangegroup(self, source, srctype, url):
1680 """add changegroup to repo.
1680 """add changegroup to repo.
1681
1681
1682 return values:
1682 return values:
1683 - nothing changed or no source: 0
1683 - nothing changed or no source: 0
1684 - more heads than before: 1+added heads (2..n)
1684 - more heads than before: 1+added heads (2..n)
1685 - less heads than before: -1-removed heads (-2..-n)
1685 - less heads than before: -1-removed heads (-2..-n)
1686 - number of heads stays the same: 1
1686 - number of heads stays the same: 1
1687 """
1687 """
1688 def csmap(x):
1688 def csmap(x):
1689 self.ui.debug(_("add changeset %s\n") % short(x))
1689 self.ui.debug(_("add changeset %s\n") % short(x))
1690 return cl.count()
1690 return cl.count()
1691
1691
1692 def revmap(x):
1692 def revmap(x):
1693 return cl.rev(x)
1693 return cl.rev(x)
1694
1694
1695 if not source:
1695 if not source:
1696 return 0
1696 return 0
1697
1697
1698 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1698 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1699
1699
1700 changesets = files = revisions = 0
1700 changesets = files = revisions = 0
1701
1701
1702 tr = self.transaction()
1702 tr = self.transaction()
1703
1703
1704 # write changelog data to temp files so concurrent readers will not see
1704 # write changelog data to temp files so concurrent readers will not see
1705 # inconsistent view
1705 # inconsistent view
1706 cl = None
1706 cl = None
1707 try:
1707 try:
1708 cl = appendfile.appendchangelog(self.sopener,
1708 cl = appendfile.appendchangelog(self.sopener,
1709 self.changelog.version)
1709 self.changelog.version)
1710
1710
1711 oldheads = len(cl.heads())
1711 oldheads = len(cl.heads())
1712
1712
1713 # pull off the changeset group
1713 # pull off the changeset group
1714 self.ui.status(_("adding changesets\n"))
1714 self.ui.status(_("adding changesets\n"))
1715 cor = cl.count() - 1
1715 cor = cl.count() - 1
1716 chunkiter = changegroup.chunkiter(source)
1716 chunkiter = changegroup.chunkiter(source)
1717 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1717 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1718 raise util.Abort(_("received changelog group is empty"))
1718 raise util.Abort(_("received changelog group is empty"))
1719 cnr = cl.count() - 1
1719 cnr = cl.count() - 1
1720 changesets = cnr - cor
1720 changesets = cnr - cor
1721
1721
1722 # pull off the manifest group
1722 # pull off the manifest group
1723 self.ui.status(_("adding manifests\n"))
1723 self.ui.status(_("adding manifests\n"))
1724 chunkiter = changegroup.chunkiter(source)
1724 chunkiter = changegroup.chunkiter(source)
1725 # no need to check for empty manifest group here:
1725 # no need to check for empty manifest group here:
1726 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1726 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1727 # no new manifest will be created and the manifest group will
1727 # no new manifest will be created and the manifest group will
1728 # be empty during the pull
1728 # be empty during the pull
1729 self.manifest.addgroup(chunkiter, revmap, tr)
1729 self.manifest.addgroup(chunkiter, revmap, tr)
1730
1730
1731 # process the files
1731 # process the files
1732 self.ui.status(_("adding file changes\n"))
1732 self.ui.status(_("adding file changes\n"))
1733 while 1:
1733 while 1:
1734 f = changegroup.getchunk(source)
1734 f = changegroup.getchunk(source)
1735 if not f:
1735 if not f:
1736 break
1736 break
1737 self.ui.debug(_("adding %s revisions\n") % f)
1737 self.ui.debug(_("adding %s revisions\n") % f)
1738 fl = self.file(f)
1738 fl = self.file(f)
1739 o = fl.count()
1739 o = fl.count()
1740 chunkiter = changegroup.chunkiter(source)
1740 chunkiter = changegroup.chunkiter(source)
1741 if fl.addgroup(chunkiter, revmap, tr) is None:
1741 if fl.addgroup(chunkiter, revmap, tr) is None:
1742 raise util.Abort(_("received file revlog group is empty"))
1742 raise util.Abort(_("received file revlog group is empty"))
1743 revisions += fl.count() - o
1743 revisions += fl.count() - o
1744 files += 1
1744 files += 1
1745
1745
1746 cl.writedata()
1746 cl.writedata()
1747 finally:
1747 finally:
1748 if cl:
1748 if cl:
1749 cl.cleanup()
1749 cl.cleanup()
1750
1750
1751 # make changelog see real files again
1751 # make changelog see real files again
1752 self.changelog = changelog.changelog(self.sopener,
1752 self.changelog = changelog.changelog(self.sopener,
1753 self.changelog.version)
1753 self.changelog.version)
1754 self.changelog.checkinlinesize(tr)
1754 self.changelog.checkinlinesize(tr)
1755
1755
1756 newheads = len(self.changelog.heads())
1756 newheads = len(self.changelog.heads())
1757 heads = ""
1757 heads = ""
1758 if oldheads and newheads != oldheads:
1758 if oldheads and newheads != oldheads:
1759 heads = _(" (%+d heads)") % (newheads - oldheads)
1759 heads = _(" (%+d heads)") % (newheads - oldheads)
1760
1760
1761 self.ui.status(_("added %d changesets"
1761 self.ui.status(_("added %d changesets"
1762 " with %d changes to %d files%s\n")
1762 " with %d changes to %d files%s\n")
1763 % (changesets, revisions, files, heads))
1763 % (changesets, revisions, files, heads))
1764
1764
1765 if changesets > 0:
1765 if changesets > 0:
1766 self.hook('pretxnchangegroup', throw=True,
1766 self.hook('pretxnchangegroup', throw=True,
1767 node=hex(self.changelog.node(cor+1)), source=srctype,
1767 node=hex(self.changelog.node(cor+1)), source=srctype,
1768 url=url)
1768 url=url)
1769
1769
1770 tr.close()
1770 tr.close()
1771
1771
1772 if changesets > 0:
1772 if changesets > 0:
1773 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1773 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1774 source=srctype, url=url)
1774 source=srctype, url=url)
1775
1775
1776 for i in xrange(cor + 1, cnr + 1):
1776 for i in xrange(cor + 1, cnr + 1):
1777 self.hook("incoming", node=hex(self.changelog.node(i)),
1777 self.hook("incoming", node=hex(self.changelog.node(i)),
1778 source=srctype, url=url)
1778 source=srctype, url=url)
1779
1779
1780 # never return 0 here:
1780 # never return 0 here:
1781 if newheads < oldheads:
1781 if newheads < oldheads:
1782 return newheads - oldheads - 1
1782 return newheads - oldheads - 1
1783 else:
1783 else:
1784 return newheads - oldheads + 1
1784 return newheads - oldheads + 1
1785
1785
1786
1786
1787 def stream_in(self, remote):
1787 def stream_in(self, remote):
1788 fp = remote.stream_out()
1788 fp = remote.stream_out()
1789 l = fp.readline()
1789 l = fp.readline()
1790 try:
1790 try:
1791 resp = int(l)
1791 resp = int(l)
1792 except ValueError:
1792 except ValueError:
1793 raise util.UnexpectedOutput(
1793 raise util.UnexpectedOutput(
1794 _('Unexpected response from remote server:'), l)
1794 _('Unexpected response from remote server:'), l)
1795 if resp == 1:
1795 if resp == 1:
1796 raise util.Abort(_('operation forbidden by server'))
1796 raise util.Abort(_('operation forbidden by server'))
1797 elif resp == 2:
1797 elif resp == 2:
1798 raise util.Abort(_('locking the remote repository failed'))
1798 raise util.Abort(_('locking the remote repository failed'))
1799 elif resp != 0:
1799 elif resp != 0:
1800 raise util.Abort(_('the server sent an unknown error code'))
1800 raise util.Abort(_('the server sent an unknown error code'))
1801 self.ui.status(_('streaming all changes\n'))
1801 self.ui.status(_('streaming all changes\n'))
1802 l = fp.readline()
1802 l = fp.readline()
1803 try:
1803 try:
1804 total_files, total_bytes = map(int, l.split(' ', 1))
1804 total_files, total_bytes = map(int, l.split(' ', 1))
1805 except ValueError, TypeError:
1805 except ValueError, TypeError:
1806 raise util.UnexpectedOutput(
1806 raise util.UnexpectedOutput(
1807 _('Unexpected response from remote server:'), l)
1807 _('Unexpected response from remote server:'), l)
1808 self.ui.status(_('%d files to transfer, %s of data\n') %
1808 self.ui.status(_('%d files to transfer, %s of data\n') %
1809 (total_files, util.bytecount(total_bytes)))
1809 (total_files, util.bytecount(total_bytes)))
1810 start = time.time()
1810 start = time.time()
1811 for i in xrange(total_files):
1811 for i in xrange(total_files):
1812 # XXX doesn't support '\n' or '\r' in filenames
1812 # XXX doesn't support '\n' or '\r' in filenames
1813 l = fp.readline()
1813 l = fp.readline()
1814 try:
1814 try:
1815 name, size = l.split('\0', 1)
1815 name, size = l.split('\0', 1)
1816 size = int(size)
1816 size = int(size)
1817 except ValueError, TypeError:
1817 except ValueError, TypeError:
1818 raise util.UnexpectedOutput(
1818 raise util.UnexpectedOutput(
1819 _('Unexpected response from remote server:'), l)
1819 _('Unexpected response from remote server:'), l)
1820 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1820 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1821 ofp = self.sopener(name, 'w')
1821 ofp = self.sopener(name, 'w')
1822 for chunk in util.filechunkiter(fp, limit=size):
1822 for chunk in util.filechunkiter(fp, limit=size):
1823 ofp.write(chunk)
1823 ofp.write(chunk)
1824 ofp.close()
1824 ofp.close()
1825 elapsed = time.time() - start
1825 elapsed = time.time() - start
1826 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1826 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1827 (util.bytecount(total_bytes), elapsed,
1827 (util.bytecount(total_bytes), elapsed,
1828 util.bytecount(total_bytes / elapsed)))
1828 util.bytecount(total_bytes / elapsed)))
1829 self.reload()
1829 self.reload()
1830 return len(self.heads()) + 1
1830 return len(self.heads()) + 1
1831
1831
1832 def clone(self, remote, heads=[], stream=False):
1832 def clone(self, remote, heads=[], stream=False):
1833 '''clone remote repository.
1833 '''clone remote repository.
1834
1834
1835 keyword arguments:
1835 keyword arguments:
1836 heads: list of revs to clone (forces use of pull)
1836 heads: list of revs to clone (forces use of pull)
1837 stream: use streaming clone if possible'''
1837 stream: use streaming clone if possible'''
1838
1838
1839 # now, all clients that can request uncompressed clones can
1839 # now, all clients that can request uncompressed clones can
1840 # read repo formats supported by all servers that can serve
1840 # read repo formats supported by all servers that can serve
1841 # them.
1841 # them.
1842
1842
1843 # if revlog format changes, client will have to check version
1843 # if revlog format changes, client will have to check version
1844 # and format flags on "stream" capability, and use
1844 # and format flags on "stream" capability, and use
1845 # uncompressed only if compatible.
1845 # uncompressed only if compatible.
1846
1846
1847 if stream and not heads and remote.capable('stream'):
1847 if stream and not heads and remote.capable('stream'):
1848 return self.stream_in(remote)
1848 return self.stream_in(remote)
1849 return self.pull(remote, heads)
1849 return self.pull(remote, heads)
1850
1850
1851 # used to avoid circular references so destructors work
1851 # used to avoid circular references so destructors work
1852 def aftertrans(files):
1852 def aftertrans(files):
1853 renamefiles = [tuple(t) for t in files]
1853 renamefiles = [tuple(t) for t in files]
1854 def a():
1854 def a():
1855 for src, dest in renamefiles:
1855 for src, dest in renamefiles:
1856 util.rename(src, dest)
1856 util.rename(src, dest)
1857 return a
1857 return a
1858
1858
1859 def instance(ui, path, create):
1859 def instance(ui, path, create):
1860 return localrepository(ui, util.drop_scheme('file', path), create)
1860 return localrepository(ui, util.drop_scheme('file', path), create)
1861
1861
1862 def islocal(path):
1862 def islocal(path):
1863 return True
1863 return True
General Comments 0
You need to be logged in to leave comments. Login now