##// END OF EJS Templates
Fix removed file cornercase for CVS convert-repo
Matt Mackall -
r3955:497c6972 default
parent child Browse files
Show More
@@ -1,1863 +1,1865 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import _
9 from i18n import _
10 import repo, appendfile, changegroup
10 import repo, appendfile, changegroup
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
13 import os, revlog, time, util
13 import os, revlog, time, util
14
14
15 class localrepository(repo.repository):
15 class localrepository(repo.repository):
16 capabilities = ('lookup', 'changegroupsubset')
16 capabilities = ('lookup', 'changegroupsubset')
17 supported = ('revlogv1', 'store')
17 supported = ('revlogv1', 'store')
18
18
19 def __del__(self):
19 def __del__(self):
20 self.transhandle = None
20 self.transhandle = None
21 def __init__(self, parentui, path=None, create=0):
21 def __init__(self, parentui, path=None, create=0):
22 repo.repository.__init__(self)
22 repo.repository.__init__(self)
23 if not path:
23 if not path:
24 p = os.getcwd()
24 p = os.getcwd()
25 while not os.path.isdir(os.path.join(p, ".hg")):
25 while not os.path.isdir(os.path.join(p, ".hg")):
26 oldp = p
26 oldp = p
27 p = os.path.dirname(p)
27 p = os.path.dirname(p)
28 if p == oldp:
28 if p == oldp:
29 raise repo.RepoError(_("There is no Mercurial repository"
29 raise repo.RepoError(_("There is no Mercurial repository"
30 " here (.hg not found)"))
30 " here (.hg not found)"))
31 path = p
31 path = p
32
32
33 self.path = os.path.join(path, ".hg")
33 self.path = os.path.join(path, ".hg")
34 self.root = os.path.realpath(path)
34 self.root = os.path.realpath(path)
35 self.origroot = path
35 self.origroot = path
36 self.opener = util.opener(self.path)
36 self.opener = util.opener(self.path)
37 self.wopener = util.opener(self.root)
37 self.wopener = util.opener(self.root)
38
38
39 if not os.path.isdir(self.path):
39 if not os.path.isdir(self.path):
40 if create:
40 if create:
41 if not os.path.exists(path):
41 if not os.path.exists(path):
42 os.mkdir(path)
42 os.mkdir(path)
43 os.mkdir(self.path)
43 os.mkdir(self.path)
44 os.mkdir(os.path.join(self.path, "store"))
44 os.mkdir(os.path.join(self.path, "store"))
45 requirements = ("revlogv1", "store")
45 requirements = ("revlogv1", "store")
46 reqfile = self.opener("requires", "w")
46 reqfile = self.opener("requires", "w")
47 for r in requirements:
47 for r in requirements:
48 reqfile.write("%s\n" % r)
48 reqfile.write("%s\n" % r)
49 reqfile.close()
49 reqfile.close()
50 # create an invalid changelog
50 # create an invalid changelog
51 self.opener("00changelog.i", "a").write(
51 self.opener("00changelog.i", "a").write(
52 '\0\0\0\2' # represents revlogv2
52 '\0\0\0\2' # represents revlogv2
53 ' dummy changelog to prevent using the old repo layout'
53 ' dummy changelog to prevent using the old repo layout'
54 )
54 )
55 else:
55 else:
56 raise repo.RepoError(_("repository %s not found") % path)
56 raise repo.RepoError(_("repository %s not found") % path)
57 elif create:
57 elif create:
58 raise repo.RepoError(_("repository %s already exists") % path)
58 raise repo.RepoError(_("repository %s already exists") % path)
59 else:
59 else:
60 # find requirements
60 # find requirements
61 try:
61 try:
62 requirements = self.opener("requires").read().splitlines()
62 requirements = self.opener("requires").read().splitlines()
63 except IOError, inst:
63 except IOError, inst:
64 if inst.errno != errno.ENOENT:
64 if inst.errno != errno.ENOENT:
65 raise
65 raise
66 requirements = []
66 requirements = []
67 # check them
67 # check them
68 for r in requirements:
68 for r in requirements:
69 if r not in self.supported:
69 if r not in self.supported:
70 raise repo.RepoError(_("requirement '%s' not supported") % r)
70 raise repo.RepoError(_("requirement '%s' not supported") % r)
71
71
72 # setup store
72 # setup store
73 if "store" in requirements:
73 if "store" in requirements:
74 self.encodefn = util.encodefilename
74 self.encodefn = util.encodefilename
75 self.decodefn = util.decodefilename
75 self.decodefn = util.decodefilename
76 self.spath = os.path.join(self.path, "store")
76 self.spath = os.path.join(self.path, "store")
77 else:
77 else:
78 self.encodefn = lambda x: x
78 self.encodefn = lambda x: x
79 self.decodefn = lambda x: x
79 self.decodefn = lambda x: x
80 self.spath = self.path
80 self.spath = self.path
81 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
81 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
82
82
83 self.ui = ui.ui(parentui=parentui)
83 self.ui = ui.ui(parentui=parentui)
84 try:
84 try:
85 self.ui.readconfig(self.join("hgrc"), self.root)
85 self.ui.readconfig(self.join("hgrc"), self.root)
86 except IOError:
86 except IOError:
87 pass
87 pass
88
88
89 v = self.ui.configrevlog()
89 v = self.ui.configrevlog()
90 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
90 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
91 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
91 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
92 fl = v.get('flags', None)
92 fl = v.get('flags', None)
93 flags = 0
93 flags = 0
94 if fl != None:
94 if fl != None:
95 for x in fl.split():
95 for x in fl.split():
96 flags |= revlog.flagstr(x)
96 flags |= revlog.flagstr(x)
97 elif self.revlogv1:
97 elif self.revlogv1:
98 flags = revlog.REVLOG_DEFAULT_FLAGS
98 flags = revlog.REVLOG_DEFAULT_FLAGS
99
99
100 v = self.revlogversion | flags
100 v = self.revlogversion | flags
101 self.manifest = manifest.manifest(self.sopener, v)
101 self.manifest = manifest.manifest(self.sopener, v)
102 self.changelog = changelog.changelog(self.sopener, v)
102 self.changelog = changelog.changelog(self.sopener, v)
103
103
104 fallback = self.ui.config('ui', 'fallbackencoding')
104 fallback = self.ui.config('ui', 'fallbackencoding')
105 if fallback:
105 if fallback:
106 util._fallbackencoding = fallback
106 util._fallbackencoding = fallback
107
107
108 # the changelog might not have the inline index flag
108 # the changelog might not have the inline index flag
109 # on. If the format of the changelog is the same as found in
109 # on. If the format of the changelog is the same as found in
110 # .hgrc, apply any flags found in the .hgrc as well.
110 # .hgrc, apply any flags found in the .hgrc as well.
111 # Otherwise, just version from the changelog
111 # Otherwise, just version from the changelog
112 v = self.changelog.version
112 v = self.changelog.version
113 if v == self.revlogversion:
113 if v == self.revlogversion:
114 v |= flags
114 v |= flags
115 self.revlogversion = v
115 self.revlogversion = v
116
116
117 self.tagscache = None
117 self.tagscache = None
118 self.branchcache = None
118 self.branchcache = None
119 self.nodetagscache = None
119 self.nodetagscache = None
120 self.encodepats = None
120 self.encodepats = None
121 self.decodepats = None
121 self.decodepats = None
122 self.transhandle = None
122 self.transhandle = None
123
123
124 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
124 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
125
125
126 def url(self):
126 def url(self):
127 return 'file:' + self.root
127 return 'file:' + self.root
128
128
129 def hook(self, name, throw=False, **args):
129 def hook(self, name, throw=False, **args):
130 def callhook(hname, funcname):
130 def callhook(hname, funcname):
131 '''call python hook. hook is callable object, looked up as
131 '''call python hook. hook is callable object, looked up as
132 name in python module. if callable returns "true", hook
132 name in python module. if callable returns "true", hook
133 fails, else passes. if hook raises exception, treated as
133 fails, else passes. if hook raises exception, treated as
134 hook failure. exception propagates if throw is "true".
134 hook failure. exception propagates if throw is "true".
135
135
136 reason for "true" meaning "hook failed" is so that
136 reason for "true" meaning "hook failed" is so that
137 unmodified commands (e.g. mercurial.commands.update) can
137 unmodified commands (e.g. mercurial.commands.update) can
138 be run as hooks without wrappers to convert return values.'''
138 be run as hooks without wrappers to convert return values.'''
139
139
140 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
140 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
141 d = funcname.rfind('.')
141 d = funcname.rfind('.')
142 if d == -1:
142 if d == -1:
143 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
143 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
144 % (hname, funcname))
144 % (hname, funcname))
145 modname = funcname[:d]
145 modname = funcname[:d]
146 try:
146 try:
147 obj = __import__(modname)
147 obj = __import__(modname)
148 except ImportError:
148 except ImportError:
149 try:
149 try:
150 # extensions are loaded with hgext_ prefix
150 # extensions are loaded with hgext_ prefix
151 obj = __import__("hgext_%s" % modname)
151 obj = __import__("hgext_%s" % modname)
152 except ImportError:
152 except ImportError:
153 raise util.Abort(_('%s hook is invalid '
153 raise util.Abort(_('%s hook is invalid '
154 '(import of "%s" failed)') %
154 '(import of "%s" failed)') %
155 (hname, modname))
155 (hname, modname))
156 try:
156 try:
157 for p in funcname.split('.')[1:]:
157 for p in funcname.split('.')[1:]:
158 obj = getattr(obj, p)
158 obj = getattr(obj, p)
159 except AttributeError, err:
159 except AttributeError, err:
160 raise util.Abort(_('%s hook is invalid '
160 raise util.Abort(_('%s hook is invalid '
161 '("%s" is not defined)') %
161 '("%s" is not defined)') %
162 (hname, funcname))
162 (hname, funcname))
163 if not callable(obj):
163 if not callable(obj):
164 raise util.Abort(_('%s hook is invalid '
164 raise util.Abort(_('%s hook is invalid '
165 '("%s" is not callable)') %
165 '("%s" is not callable)') %
166 (hname, funcname))
166 (hname, funcname))
167 try:
167 try:
168 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
168 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
169 except (KeyboardInterrupt, util.SignalInterrupt):
169 except (KeyboardInterrupt, util.SignalInterrupt):
170 raise
170 raise
171 except Exception, exc:
171 except Exception, exc:
172 if isinstance(exc, util.Abort):
172 if isinstance(exc, util.Abort):
173 self.ui.warn(_('error: %s hook failed: %s\n') %
173 self.ui.warn(_('error: %s hook failed: %s\n') %
174 (hname, exc.args[0]))
174 (hname, exc.args[0]))
175 else:
175 else:
176 self.ui.warn(_('error: %s hook raised an exception: '
176 self.ui.warn(_('error: %s hook raised an exception: '
177 '%s\n') % (hname, exc))
177 '%s\n') % (hname, exc))
178 if throw:
178 if throw:
179 raise
179 raise
180 self.ui.print_exc()
180 self.ui.print_exc()
181 return True
181 return True
182 if r:
182 if r:
183 if throw:
183 if throw:
184 raise util.Abort(_('%s hook failed') % hname)
184 raise util.Abort(_('%s hook failed') % hname)
185 self.ui.warn(_('warning: %s hook failed\n') % hname)
185 self.ui.warn(_('warning: %s hook failed\n') % hname)
186 return r
186 return r
187
187
188 def runhook(name, cmd):
188 def runhook(name, cmd):
189 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
189 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
190 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
190 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
191 r = util.system(cmd, environ=env, cwd=self.root)
191 r = util.system(cmd, environ=env, cwd=self.root)
192 if r:
192 if r:
193 desc, r = util.explain_exit(r)
193 desc, r = util.explain_exit(r)
194 if throw:
194 if throw:
195 raise util.Abort(_('%s hook %s') % (name, desc))
195 raise util.Abort(_('%s hook %s') % (name, desc))
196 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
196 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
197 return r
197 return r
198
198
199 r = False
199 r = False
200 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
200 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
201 if hname.split(".", 1)[0] == name and cmd]
201 if hname.split(".", 1)[0] == name and cmd]
202 hooks.sort()
202 hooks.sort()
203 for hname, cmd in hooks:
203 for hname, cmd in hooks:
204 if cmd.startswith('python:'):
204 if cmd.startswith('python:'):
205 r = callhook(hname, cmd[7:].strip()) or r
205 r = callhook(hname, cmd[7:].strip()) or r
206 else:
206 else:
207 r = runhook(hname, cmd) or r
207 r = runhook(hname, cmd) or r
208 return r
208 return r
209
209
210 tag_disallowed = ':\r\n'
210 tag_disallowed = ':\r\n'
211
211
212 def tag(self, name, node, message, local, user, date):
212 def tag(self, name, node, message, local, user, date):
213 '''tag a revision with a symbolic name.
213 '''tag a revision with a symbolic name.
214
214
215 if local is True, the tag is stored in a per-repository file.
215 if local is True, the tag is stored in a per-repository file.
216 otherwise, it is stored in the .hgtags file, and a new
216 otherwise, it is stored in the .hgtags file, and a new
217 changeset is committed with the change.
217 changeset is committed with the change.
218
218
219 keyword arguments:
219 keyword arguments:
220
220
221 local: whether to store tag in non-version-controlled file
221 local: whether to store tag in non-version-controlled file
222 (default False)
222 (default False)
223
223
224 message: commit message to use if committing
224 message: commit message to use if committing
225
225
226 user: name of user to use if committing
226 user: name of user to use if committing
227
227
228 date: date tuple to use if committing'''
228 date: date tuple to use if committing'''
229
229
230 for c in self.tag_disallowed:
230 for c in self.tag_disallowed:
231 if c in name:
231 if c in name:
232 raise util.Abort(_('%r cannot be used in a tag name') % c)
232 raise util.Abort(_('%r cannot be used in a tag name') % c)
233
233
234 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
234 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
235
235
236 if local:
236 if local:
237 # local tags are stored in the current charset
237 # local tags are stored in the current charset
238 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
238 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
239 self.hook('tag', node=hex(node), tag=name, local=local)
239 self.hook('tag', node=hex(node), tag=name, local=local)
240 return
240 return
241
241
242 for x in self.status()[:5]:
242 for x in self.status()[:5]:
243 if '.hgtags' in x:
243 if '.hgtags' in x:
244 raise util.Abort(_('working copy of .hgtags is changed '
244 raise util.Abort(_('working copy of .hgtags is changed '
245 '(please commit .hgtags manually)'))
245 '(please commit .hgtags manually)'))
246
246
247 # committed tags are stored in UTF-8
247 # committed tags are stored in UTF-8
248 line = '%s %s\n' % (hex(node), util.fromlocal(name))
248 line = '%s %s\n' % (hex(node), util.fromlocal(name))
249 self.wfile('.hgtags', 'ab').write(line)
249 self.wfile('.hgtags', 'ab').write(line)
250 if self.dirstate.state('.hgtags') == '?':
250 if self.dirstate.state('.hgtags') == '?':
251 self.add(['.hgtags'])
251 self.add(['.hgtags'])
252
252
253 self.commit(['.hgtags'], message, user, date)
253 self.commit(['.hgtags'], message, user, date)
254 self.hook('tag', node=hex(node), tag=name, local=local)
254 self.hook('tag', node=hex(node), tag=name, local=local)
255
255
256 def tags(self):
256 def tags(self):
257 '''return a mapping of tag to node'''
257 '''return a mapping of tag to node'''
258 if not self.tagscache:
258 if not self.tagscache:
259 self.tagscache = {}
259 self.tagscache = {}
260
260
261 def parsetag(line, context):
261 def parsetag(line, context):
262 if not line:
262 if not line:
263 return
263 return
264 s = l.split(" ", 1)
264 s = l.split(" ", 1)
265 if len(s) != 2:
265 if len(s) != 2:
266 self.ui.warn(_("%s: cannot parse entry\n") % context)
266 self.ui.warn(_("%s: cannot parse entry\n") % context)
267 return
267 return
268 node, key = s
268 node, key = s
269 key = util.tolocal(key.strip()) # stored in UTF-8
269 key = util.tolocal(key.strip()) # stored in UTF-8
270 try:
270 try:
271 bin_n = bin(node)
271 bin_n = bin(node)
272 except TypeError:
272 except TypeError:
273 self.ui.warn(_("%s: node '%s' is not well formed\n") %
273 self.ui.warn(_("%s: node '%s' is not well formed\n") %
274 (context, node))
274 (context, node))
275 return
275 return
276 if bin_n not in self.changelog.nodemap:
276 if bin_n not in self.changelog.nodemap:
277 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
277 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
278 (context, key))
278 (context, key))
279 return
279 return
280 self.tagscache[key] = bin_n
280 self.tagscache[key] = bin_n
281
281
282 # read the tags file from each head, ending with the tip,
282 # read the tags file from each head, ending with the tip,
283 # and add each tag found to the map, with "newer" ones
283 # and add each tag found to the map, with "newer" ones
284 # taking precedence
284 # taking precedence
285 f = None
285 f = None
286 for rev, node, fnode in self._hgtagsnodes():
286 for rev, node, fnode in self._hgtagsnodes():
287 f = (f and f.filectx(fnode) or
287 f = (f and f.filectx(fnode) or
288 self.filectx('.hgtags', fileid=fnode))
288 self.filectx('.hgtags', fileid=fnode))
289 count = 0
289 count = 0
290 for l in f.data().splitlines():
290 for l in f.data().splitlines():
291 count += 1
291 count += 1
292 parsetag(l, _("%s, line %d") % (str(f), count))
292 parsetag(l, _("%s, line %d") % (str(f), count))
293
293
294 try:
294 try:
295 f = self.opener("localtags")
295 f = self.opener("localtags")
296 count = 0
296 count = 0
297 for l in f:
297 for l in f:
298 # localtags are stored in the local character set
298 # localtags are stored in the local character set
299 # while the internal tag table is stored in UTF-8
299 # while the internal tag table is stored in UTF-8
300 l = util.fromlocal(l)
300 l = util.fromlocal(l)
301 count += 1
301 count += 1
302 parsetag(l, _("localtags, line %d") % count)
302 parsetag(l, _("localtags, line %d") % count)
303 except IOError:
303 except IOError:
304 pass
304 pass
305
305
306 self.tagscache['tip'] = self.changelog.tip()
306 self.tagscache['tip'] = self.changelog.tip()
307
307
308 return self.tagscache
308 return self.tagscache
309
309
310 def _hgtagsnodes(self):
310 def _hgtagsnodes(self):
311 heads = self.heads()
311 heads = self.heads()
312 heads.reverse()
312 heads.reverse()
313 last = {}
313 last = {}
314 ret = []
314 ret = []
315 for node in heads:
315 for node in heads:
316 c = self.changectx(node)
316 c = self.changectx(node)
317 rev = c.rev()
317 rev = c.rev()
318 try:
318 try:
319 fnode = c.filenode('.hgtags')
319 fnode = c.filenode('.hgtags')
320 except revlog.LookupError:
320 except revlog.LookupError:
321 continue
321 continue
322 ret.append((rev, node, fnode))
322 ret.append((rev, node, fnode))
323 if fnode in last:
323 if fnode in last:
324 ret[last[fnode]] = None
324 ret[last[fnode]] = None
325 last[fnode] = len(ret) - 1
325 last[fnode] = len(ret) - 1
326 return [item for item in ret if item]
326 return [item for item in ret if item]
327
327
328 def tagslist(self):
328 def tagslist(self):
329 '''return a list of tags ordered by revision'''
329 '''return a list of tags ordered by revision'''
330 l = []
330 l = []
331 for t, n in self.tags().items():
331 for t, n in self.tags().items():
332 try:
332 try:
333 r = self.changelog.rev(n)
333 r = self.changelog.rev(n)
334 except:
334 except:
335 r = -2 # sort to the beginning of the list if unknown
335 r = -2 # sort to the beginning of the list if unknown
336 l.append((r, t, n))
336 l.append((r, t, n))
337 l.sort()
337 l.sort()
338 return [(t, n) for r, t, n in l]
338 return [(t, n) for r, t, n in l]
339
339
340 def nodetags(self, node):
340 def nodetags(self, node):
341 '''return the tags associated with a node'''
341 '''return the tags associated with a node'''
342 if not self.nodetagscache:
342 if not self.nodetagscache:
343 self.nodetagscache = {}
343 self.nodetagscache = {}
344 for t, n in self.tags().items():
344 for t, n in self.tags().items():
345 self.nodetagscache.setdefault(n, []).append(t)
345 self.nodetagscache.setdefault(n, []).append(t)
346 return self.nodetagscache.get(node, [])
346 return self.nodetagscache.get(node, [])
347
347
348 def _branchtags(self):
348 def _branchtags(self):
349 partial, last, lrev = self._readbranchcache()
349 partial, last, lrev = self._readbranchcache()
350
350
351 tiprev = self.changelog.count() - 1
351 tiprev = self.changelog.count() - 1
352 if lrev != tiprev:
352 if lrev != tiprev:
353 self._updatebranchcache(partial, lrev+1, tiprev+1)
353 self._updatebranchcache(partial, lrev+1, tiprev+1)
354 self._writebranchcache(partial, self.changelog.tip(), tiprev)
354 self._writebranchcache(partial, self.changelog.tip(), tiprev)
355
355
356 return partial
356 return partial
357
357
358 def branchtags(self):
358 def branchtags(self):
359 if self.branchcache is not None:
359 if self.branchcache is not None:
360 return self.branchcache
360 return self.branchcache
361
361
362 self.branchcache = {} # avoid recursion in changectx
362 self.branchcache = {} # avoid recursion in changectx
363 partial = self._branchtags()
363 partial = self._branchtags()
364
364
365 # the branch cache is stored on disk as UTF-8, but in the local
365 # the branch cache is stored on disk as UTF-8, but in the local
366 # charset internally
366 # charset internally
367 for k, v in partial.items():
367 for k, v in partial.items():
368 self.branchcache[util.tolocal(k)] = v
368 self.branchcache[util.tolocal(k)] = v
369 return self.branchcache
369 return self.branchcache
370
370
371 def _readbranchcache(self):
371 def _readbranchcache(self):
372 partial = {}
372 partial = {}
373 try:
373 try:
374 f = self.opener("branches.cache")
374 f = self.opener("branches.cache")
375 lines = f.read().split('\n')
375 lines = f.read().split('\n')
376 f.close()
376 f.close()
377 last, lrev = lines.pop(0).rstrip().split(" ", 1)
377 last, lrev = lines.pop(0).rstrip().split(" ", 1)
378 last, lrev = bin(last), int(lrev)
378 last, lrev = bin(last), int(lrev)
379 if not (lrev < self.changelog.count() and
379 if not (lrev < self.changelog.count() and
380 self.changelog.node(lrev) == last): # sanity check
380 self.changelog.node(lrev) == last): # sanity check
381 # invalidate the cache
381 # invalidate the cache
382 raise ValueError('Invalid branch cache: unknown tip')
382 raise ValueError('Invalid branch cache: unknown tip')
383 for l in lines:
383 for l in lines:
384 if not l: continue
384 if not l: continue
385 node, label = l.rstrip().split(" ", 1)
385 node, label = l.rstrip().split(" ", 1)
386 partial[label] = bin(node)
386 partial[label] = bin(node)
387 except (KeyboardInterrupt, util.SignalInterrupt):
387 except (KeyboardInterrupt, util.SignalInterrupt):
388 raise
388 raise
389 except Exception, inst:
389 except Exception, inst:
390 if self.ui.debugflag:
390 if self.ui.debugflag:
391 self.ui.warn(str(inst), '\n')
391 self.ui.warn(str(inst), '\n')
392 partial, last, lrev = {}, nullid, nullrev
392 partial, last, lrev = {}, nullid, nullrev
393 return partial, last, lrev
393 return partial, last, lrev
394
394
395 def _writebranchcache(self, branches, tip, tiprev):
395 def _writebranchcache(self, branches, tip, tiprev):
396 try:
396 try:
397 f = self.opener("branches.cache", "w")
397 f = self.opener("branches.cache", "w")
398 f.write("%s %s\n" % (hex(tip), tiprev))
398 f.write("%s %s\n" % (hex(tip), tiprev))
399 for label, node in branches.iteritems():
399 for label, node in branches.iteritems():
400 f.write("%s %s\n" % (hex(node), label))
400 f.write("%s %s\n" % (hex(node), label))
401 except IOError:
401 except IOError:
402 pass
402 pass
403
403
404 def _updatebranchcache(self, partial, start, end):
404 def _updatebranchcache(self, partial, start, end):
405 for r in xrange(start, end):
405 for r in xrange(start, end):
406 c = self.changectx(r)
406 c = self.changectx(r)
407 b = c.branch()
407 b = c.branch()
408 if b:
408 if b:
409 partial[b] = c.node()
409 partial[b] = c.node()
410
410
411 def lookup(self, key):
411 def lookup(self, key):
412 if key == '.':
412 if key == '.':
413 key = self.dirstate.parents()[0]
413 key = self.dirstate.parents()[0]
414 if key == nullid:
414 if key == nullid:
415 raise repo.RepoError(_("no revision checked out"))
415 raise repo.RepoError(_("no revision checked out"))
416 elif key == 'null':
416 elif key == 'null':
417 return nullid
417 return nullid
418 n = self.changelog._match(key)
418 n = self.changelog._match(key)
419 if n:
419 if n:
420 return n
420 return n
421 if key in self.tags():
421 if key in self.tags():
422 return self.tags()[key]
422 return self.tags()[key]
423 if key in self.branchtags():
423 if key in self.branchtags():
424 return self.branchtags()[key]
424 return self.branchtags()[key]
425 n = self.changelog._partialmatch(key)
425 n = self.changelog._partialmatch(key)
426 if n:
426 if n:
427 return n
427 return n
428 raise repo.RepoError(_("unknown revision '%s'") % key)
428 raise repo.RepoError(_("unknown revision '%s'") % key)
429
429
430 def dev(self):
430 def dev(self):
431 return os.lstat(self.path).st_dev
431 return os.lstat(self.path).st_dev
432
432
433 def local(self):
433 def local(self):
434 return True
434 return True
435
435
436 def join(self, f):
436 def join(self, f):
437 return os.path.join(self.path, f)
437 return os.path.join(self.path, f)
438
438
439 def sjoin(self, f):
439 def sjoin(self, f):
440 f = self.encodefn(f)
440 f = self.encodefn(f)
441 return os.path.join(self.spath, f)
441 return os.path.join(self.spath, f)
442
442
443 def wjoin(self, f):
443 def wjoin(self, f):
444 return os.path.join(self.root, f)
444 return os.path.join(self.root, f)
445
445
446 def file(self, f):
446 def file(self, f):
447 if f[0] == '/':
447 if f[0] == '/':
448 f = f[1:]
448 f = f[1:]
449 return filelog.filelog(self.sopener, f, self.revlogversion)
449 return filelog.filelog(self.sopener, f, self.revlogversion)
450
450
451 def changectx(self, changeid=None):
451 def changectx(self, changeid=None):
452 return context.changectx(self, changeid)
452 return context.changectx(self, changeid)
453
453
454 def workingctx(self):
454 def workingctx(self):
455 return context.workingctx(self)
455 return context.workingctx(self)
456
456
457 def parents(self, changeid=None):
457 def parents(self, changeid=None):
458 '''
458 '''
459 get list of changectxs for parents of changeid or working directory
459 get list of changectxs for parents of changeid or working directory
460 '''
460 '''
461 if changeid is None:
461 if changeid is None:
462 pl = self.dirstate.parents()
462 pl = self.dirstate.parents()
463 else:
463 else:
464 n = self.changelog.lookup(changeid)
464 n = self.changelog.lookup(changeid)
465 pl = self.changelog.parents(n)
465 pl = self.changelog.parents(n)
466 if pl[1] == nullid:
466 if pl[1] == nullid:
467 return [self.changectx(pl[0])]
467 return [self.changectx(pl[0])]
468 return [self.changectx(pl[0]), self.changectx(pl[1])]
468 return [self.changectx(pl[0]), self.changectx(pl[1])]
469
469
470 def filectx(self, path, changeid=None, fileid=None):
470 def filectx(self, path, changeid=None, fileid=None):
471 """changeid can be a changeset revision, node, or tag.
471 """changeid can be a changeset revision, node, or tag.
472 fileid can be a file revision or node."""
472 fileid can be a file revision or node."""
473 return context.filectx(self, path, changeid, fileid)
473 return context.filectx(self, path, changeid, fileid)
474
474
475 def getcwd(self):
475 def getcwd(self):
476 return self.dirstate.getcwd()
476 return self.dirstate.getcwd()
477
477
478 def wfile(self, f, mode='r'):
478 def wfile(self, f, mode='r'):
479 return self.wopener(f, mode)
479 return self.wopener(f, mode)
480
480
481 def wread(self, filename):
481 def wread(self, filename):
482 if self.encodepats == None:
482 if self.encodepats == None:
483 l = []
483 l = []
484 for pat, cmd in self.ui.configitems("encode"):
484 for pat, cmd in self.ui.configitems("encode"):
485 mf = util.matcher(self.root, "", [pat], [], [])[1]
485 mf = util.matcher(self.root, "", [pat], [], [])[1]
486 l.append((mf, cmd))
486 l.append((mf, cmd))
487 self.encodepats = l
487 self.encodepats = l
488
488
489 data = self.wopener(filename, 'r').read()
489 data = self.wopener(filename, 'r').read()
490
490
491 for mf, cmd in self.encodepats:
491 for mf, cmd in self.encodepats:
492 if mf(filename):
492 if mf(filename):
493 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
493 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
494 data = util.filter(data, cmd)
494 data = util.filter(data, cmd)
495 break
495 break
496
496
497 return data
497 return data
498
498
499 def wwrite(self, filename, data, fd=None):
499 def wwrite(self, filename, data, fd=None):
500 if self.decodepats == None:
500 if self.decodepats == None:
501 l = []
501 l = []
502 for pat, cmd in self.ui.configitems("decode"):
502 for pat, cmd in self.ui.configitems("decode"):
503 mf = util.matcher(self.root, "", [pat], [], [])[1]
503 mf = util.matcher(self.root, "", [pat], [], [])[1]
504 l.append((mf, cmd))
504 l.append((mf, cmd))
505 self.decodepats = l
505 self.decodepats = l
506
506
507 for mf, cmd in self.decodepats:
507 for mf, cmd in self.decodepats:
508 if mf(filename):
508 if mf(filename):
509 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
509 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
510 data = util.filter(data, cmd)
510 data = util.filter(data, cmd)
511 break
511 break
512
512
513 if fd:
513 if fd:
514 return fd.write(data)
514 return fd.write(data)
515 return self.wopener(filename, 'w').write(data)
515 return self.wopener(filename, 'w').write(data)
516
516
517 def transaction(self):
517 def transaction(self):
518 tr = self.transhandle
518 tr = self.transhandle
519 if tr != None and tr.running():
519 if tr != None and tr.running():
520 return tr.nest()
520 return tr.nest()
521
521
522 # save dirstate for rollback
522 # save dirstate for rollback
523 try:
523 try:
524 ds = self.opener("dirstate").read()
524 ds = self.opener("dirstate").read()
525 except IOError:
525 except IOError:
526 ds = ""
526 ds = ""
527 self.opener("journal.dirstate", "w").write(ds)
527 self.opener("journal.dirstate", "w").write(ds)
528
528
529 renames = [(self.sjoin("journal"), self.sjoin("undo")),
529 renames = [(self.sjoin("journal"), self.sjoin("undo")),
530 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
530 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
531 tr = transaction.transaction(self.ui.warn, self.sopener,
531 tr = transaction.transaction(self.ui.warn, self.sopener,
532 self.sjoin("journal"),
532 self.sjoin("journal"),
533 aftertrans(renames))
533 aftertrans(renames))
534 self.transhandle = tr
534 self.transhandle = tr
535 return tr
535 return tr
536
536
537 def recover(self):
537 def recover(self):
538 l = self.lock()
538 l = self.lock()
539 if os.path.exists(self.sjoin("journal")):
539 if os.path.exists(self.sjoin("journal")):
540 self.ui.status(_("rolling back interrupted transaction\n"))
540 self.ui.status(_("rolling back interrupted transaction\n"))
541 transaction.rollback(self.sopener, self.sjoin("journal"))
541 transaction.rollback(self.sopener, self.sjoin("journal"))
542 self.reload()
542 self.reload()
543 return True
543 return True
544 else:
544 else:
545 self.ui.warn(_("no interrupted transaction available\n"))
545 self.ui.warn(_("no interrupted transaction available\n"))
546 return False
546 return False
547
547
548 def rollback(self, wlock=None):
548 def rollback(self, wlock=None):
549 if not wlock:
549 if not wlock:
550 wlock = self.wlock()
550 wlock = self.wlock()
551 l = self.lock()
551 l = self.lock()
552 if os.path.exists(self.sjoin("undo")):
552 if os.path.exists(self.sjoin("undo")):
553 self.ui.status(_("rolling back last transaction\n"))
553 self.ui.status(_("rolling back last transaction\n"))
554 transaction.rollback(self.sopener, self.sjoin("undo"))
554 transaction.rollback(self.sopener, self.sjoin("undo"))
555 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
555 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
556 self.reload()
556 self.reload()
557 self.wreload()
557 self.wreload()
558 else:
558 else:
559 self.ui.warn(_("no rollback information available\n"))
559 self.ui.warn(_("no rollback information available\n"))
560
560
561 def wreload(self):
561 def wreload(self):
562 self.dirstate.read()
562 self.dirstate.read()
563
563
564 def reload(self):
564 def reload(self):
565 self.changelog.load()
565 self.changelog.load()
566 self.manifest.load()
566 self.manifest.load()
567 self.tagscache = None
567 self.tagscache = None
568 self.nodetagscache = None
568 self.nodetagscache = None
569
569
570 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
570 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
571 desc=None):
571 desc=None):
572 try:
572 try:
573 l = lock.lock(lockname, 0, releasefn, desc=desc)
573 l = lock.lock(lockname, 0, releasefn, desc=desc)
574 except lock.LockHeld, inst:
574 except lock.LockHeld, inst:
575 if not wait:
575 if not wait:
576 raise
576 raise
577 self.ui.warn(_("waiting for lock on %s held by %r\n") %
577 self.ui.warn(_("waiting for lock on %s held by %r\n") %
578 (desc, inst.locker))
578 (desc, inst.locker))
579 # default to 600 seconds timeout
579 # default to 600 seconds timeout
580 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
580 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
581 releasefn, desc=desc)
581 releasefn, desc=desc)
582 if acquirefn:
582 if acquirefn:
583 acquirefn()
583 acquirefn()
584 return l
584 return l
585
585
586 def lock(self, wait=1):
586 def lock(self, wait=1):
587 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
587 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
588 desc=_('repository %s') % self.origroot)
588 desc=_('repository %s') % self.origroot)
589
589
590 def wlock(self, wait=1):
590 def wlock(self, wait=1):
591 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
591 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
592 self.wreload,
592 self.wreload,
593 desc=_('working directory of %s') % self.origroot)
593 desc=_('working directory of %s') % self.origroot)
594
594
595 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
595 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
596 """
596 """
597 commit an individual file as part of a larger transaction
597 commit an individual file as part of a larger transaction
598 """
598 """
599
599
600 t = self.wread(fn)
600 t = self.wread(fn)
601 fl = self.file(fn)
601 fl = self.file(fn)
602 fp1 = manifest1.get(fn, nullid)
602 fp1 = manifest1.get(fn, nullid)
603 fp2 = manifest2.get(fn, nullid)
603 fp2 = manifest2.get(fn, nullid)
604
604
605 meta = {}
605 meta = {}
606 cp = self.dirstate.copied(fn)
606 cp = self.dirstate.copied(fn)
607 if cp:
607 if cp:
608 meta["copy"] = cp
608 meta["copy"] = cp
609 if not manifest2: # not a branch merge
609 if not manifest2: # not a branch merge
610 meta["copyrev"] = hex(manifest1.get(cp, nullid))
610 meta["copyrev"] = hex(manifest1.get(cp, nullid))
611 fp2 = nullid
611 fp2 = nullid
612 elif fp2 != nullid: # copied on remote side
612 elif fp2 != nullid: # copied on remote side
613 meta["copyrev"] = hex(manifest1.get(cp, nullid))
613 meta["copyrev"] = hex(manifest1.get(cp, nullid))
614 elif fp1 != nullid: # copied on local side, reversed
614 elif fp1 != nullid: # copied on local side, reversed
615 meta["copyrev"] = hex(manifest2.get(cp))
615 meta["copyrev"] = hex(manifest2.get(cp))
616 fp2 = nullid
616 fp2 = nullid
617 else: # directory rename
617 else: # directory rename
618 meta["copyrev"] = hex(manifest1.get(cp, nullid))
618 meta["copyrev"] = hex(manifest1.get(cp, nullid))
619 self.ui.debug(_(" %s: copy %s:%s\n") %
619 self.ui.debug(_(" %s: copy %s:%s\n") %
620 (fn, cp, meta["copyrev"]))
620 (fn, cp, meta["copyrev"]))
621 fp1 = nullid
621 fp1 = nullid
622 elif fp2 != nullid:
622 elif fp2 != nullid:
623 # is one parent an ancestor of the other?
623 # is one parent an ancestor of the other?
624 fpa = fl.ancestor(fp1, fp2)
624 fpa = fl.ancestor(fp1, fp2)
625 if fpa == fp1:
625 if fpa == fp1:
626 fp1, fp2 = fp2, nullid
626 fp1, fp2 = fp2, nullid
627 elif fpa == fp2:
627 elif fpa == fp2:
628 fp2 = nullid
628 fp2 = nullid
629
629
630 # is the file unmodified from the parent? report existing entry
630 # is the file unmodified from the parent? report existing entry
631 if fp2 == nullid and not fl.cmp(fp1, t):
631 if fp2 == nullid and not fl.cmp(fp1, t):
632 return fp1
632 return fp1
633
633
634 changelist.append(fn)
634 changelist.append(fn)
635 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
635 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
636
636
637 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
637 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
638 if p1 is None:
638 if p1 is None:
639 p1, p2 = self.dirstate.parents()
639 p1, p2 = self.dirstate.parents()
640 return self.commit(files=files, text=text, user=user, date=date,
640 return self.commit(files=files, text=text, user=user, date=date,
641 p1=p1, p2=p2, wlock=wlock, extra=extra)
641 p1=p1, p2=p2, wlock=wlock, extra=extra)
642
642
643 def commit(self, files=None, text="", user=None, date=None,
643 def commit(self, files=None, text="", user=None, date=None,
644 match=util.always, force=False, lock=None, wlock=None,
644 match=util.always, force=False, lock=None, wlock=None,
645 force_editor=False, p1=None, p2=None, extra={}):
645 force_editor=False, p1=None, p2=None, extra={}):
646
646
647 commit = []
647 commit = []
648 remove = []
648 remove = []
649 changed = []
649 changed = []
650 use_dirstate = (p1 is None) # not rawcommit
650 use_dirstate = (p1 is None) # not rawcommit
651 extra = extra.copy()
651 extra = extra.copy()
652
652
653 if use_dirstate:
653 if use_dirstate:
654 if files:
654 if files:
655 for f in files:
655 for f in files:
656 s = self.dirstate.state(f)
656 s = self.dirstate.state(f)
657 if s in 'nmai':
657 if s in 'nmai':
658 commit.append(f)
658 commit.append(f)
659 elif s == 'r':
659 elif s == 'r':
660 remove.append(f)
660 remove.append(f)
661 else:
661 else:
662 self.ui.warn(_("%s not tracked!\n") % f)
662 self.ui.warn(_("%s not tracked!\n") % f)
663 else:
663 else:
664 changes = self.status(match=match)[:5]
664 changes = self.status(match=match)[:5]
665 modified, added, removed, deleted, unknown = changes
665 modified, added, removed, deleted, unknown = changes
666 commit = modified + added
666 commit = modified + added
667 remove = removed
667 remove = removed
668 else:
668 else:
669 commit = files
669 commit = files
670
670
671 if use_dirstate:
671 if use_dirstate:
672 p1, p2 = self.dirstate.parents()
672 p1, p2 = self.dirstate.parents()
673 update_dirstate = True
673 update_dirstate = True
674 else:
674 else:
675 p1, p2 = p1, p2 or nullid
675 p1, p2 = p1, p2 or nullid
676 update_dirstate = (self.dirstate.parents()[0] == p1)
676 update_dirstate = (self.dirstate.parents()[0] == p1)
677
677
678 c1 = self.changelog.read(p1)
678 c1 = self.changelog.read(p1)
679 c2 = self.changelog.read(p2)
679 c2 = self.changelog.read(p2)
680 m1 = self.manifest.read(c1[0]).copy()
680 m1 = self.manifest.read(c1[0]).copy()
681 m2 = self.manifest.read(c2[0])
681 m2 = self.manifest.read(c2[0])
682
682
683 if use_dirstate:
683 if use_dirstate:
684 branchname = self.workingctx().branch()
684 branchname = self.workingctx().branch()
685 try:
685 try:
686 branchname = branchname.decode('UTF-8').encode('UTF-8')
686 branchname = branchname.decode('UTF-8').encode('UTF-8')
687 except UnicodeDecodeError:
687 except UnicodeDecodeError:
688 raise util.Abort(_('branch name not in UTF-8!'))
688 raise util.Abort(_('branch name not in UTF-8!'))
689 else:
689 else:
690 branchname = ""
690 branchname = ""
691
691
692 if use_dirstate:
692 if use_dirstate:
693 oldname = c1[5].get("branch", "") # stored in UTF-8
693 oldname = c1[5].get("branch", "") # stored in UTF-8
694 if not commit and not remove and not force and p2 == nullid and \
694 if not commit and not remove and not force and p2 == nullid and \
695 branchname == oldname:
695 branchname == oldname:
696 self.ui.status(_("nothing changed\n"))
696 self.ui.status(_("nothing changed\n"))
697 return None
697 return None
698
698
699 xp1 = hex(p1)
699 xp1 = hex(p1)
700 if p2 == nullid: xp2 = ''
700 if p2 == nullid: xp2 = ''
701 else: xp2 = hex(p2)
701 else: xp2 = hex(p2)
702
702
703 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
703 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
704
704
705 if not wlock:
705 if not wlock:
706 wlock = self.wlock()
706 wlock = self.wlock()
707 if not lock:
707 if not lock:
708 lock = self.lock()
708 lock = self.lock()
709 tr = self.transaction()
709 tr = self.transaction()
710
710
711 # check in files
711 # check in files
712 new = {}
712 new = {}
713 linkrev = self.changelog.count()
713 linkrev = self.changelog.count()
714 commit.sort()
714 commit.sort()
715 for f in commit:
715 for f in commit:
716 self.ui.note(f + "\n")
716 self.ui.note(f + "\n")
717 try:
717 try:
718 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
718 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
719 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
719 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
720 except IOError:
720 except IOError:
721 if use_dirstate:
721 if use_dirstate:
722 self.ui.warn(_("trouble committing %s!\n") % f)
722 self.ui.warn(_("trouble committing %s!\n") % f)
723 raise
723 raise
724 else:
724 else:
725 remove.append(f)
725 remove.append(f)
726
726
727 # update manifest
727 # update manifest
728 m1.update(new)
728 m1.update(new)
729 remove.sort()
729 remove.sort()
730 removed = []
730
731
731 for f in remove:
732 for f in remove:
732 if f in m1:
733 if f in m1:
733 del m1[f]
734 del m1[f]
734 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
735 removed.append(f)
736 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
735
737
736 # add changeset
738 # add changeset
737 new = new.keys()
739 new = new.keys()
738 new.sort()
740 new.sort()
739
741
740 user = user or self.ui.username()
742 user = user or self.ui.username()
741 if not text or force_editor:
743 if not text or force_editor:
742 edittext = []
744 edittext = []
743 if text:
745 if text:
744 edittext.append(text)
746 edittext.append(text)
745 edittext.append("")
747 edittext.append("")
746 edittext.append("HG: user: %s" % user)
748 edittext.append("HG: user: %s" % user)
747 if p2 != nullid:
749 if p2 != nullid:
748 edittext.append("HG: branch merge")
750 edittext.append("HG: branch merge")
749 edittext.extend(["HG: changed %s" % f for f in changed])
751 edittext.extend(["HG: changed %s" % f for f in changed])
750 edittext.extend(["HG: removed %s" % f for f in remove])
752 edittext.extend(["HG: removed %s" % f for f in removed])
751 if not changed and not remove:
753 if not changed and not remove:
752 edittext.append("HG: no files changed")
754 edittext.append("HG: no files changed")
753 edittext.append("")
755 edittext.append("")
754 # run editor in the repository root
756 # run editor in the repository root
755 olddir = os.getcwd()
757 olddir = os.getcwd()
756 os.chdir(self.root)
758 os.chdir(self.root)
757 text = self.ui.edit("\n".join(edittext), user)
759 text = self.ui.edit("\n".join(edittext), user)
758 os.chdir(olddir)
760 os.chdir(olddir)
759
761
760 lines = [line.rstrip() for line in text.rstrip().splitlines()]
762 lines = [line.rstrip() for line in text.rstrip().splitlines()]
761 while lines and not lines[0]:
763 while lines and not lines[0]:
762 del lines[0]
764 del lines[0]
763 if not lines:
765 if not lines:
764 return None
766 return None
765 text = '\n'.join(lines)
767 text = '\n'.join(lines)
766 if branchname:
768 if branchname:
767 extra["branch"] = branchname
769 extra["branch"] = branchname
768 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
770 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
769 user, date, extra)
771 user, date, extra)
770 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
772 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
771 parent2=xp2)
773 parent2=xp2)
772 tr.close()
774 tr.close()
773
775
774 if use_dirstate or update_dirstate:
776 if use_dirstate or update_dirstate:
775 self.dirstate.setparents(n)
777 self.dirstate.setparents(n)
776 if use_dirstate:
778 if use_dirstate:
777 self.dirstate.update(new, "n")
779 self.dirstate.update(new, "n")
778 self.dirstate.forget(remove)
780 self.dirstate.forget(removed)
779
781
780 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
782 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
781 return n
783 return n
782
784
783 def walk(self, node=None, files=[], match=util.always, badmatch=None):
785 def walk(self, node=None, files=[], match=util.always, badmatch=None):
784 '''
786 '''
785 walk recursively through the directory tree or a given
787 walk recursively through the directory tree or a given
786 changeset, finding all files matched by the match
788 changeset, finding all files matched by the match
787 function
789 function
788
790
789 results are yielded in a tuple (src, filename), where src
791 results are yielded in a tuple (src, filename), where src
790 is one of:
792 is one of:
791 'f' the file was found in the directory tree
793 'f' the file was found in the directory tree
792 'm' the file was only in the dirstate and not in the tree
794 'm' the file was only in the dirstate and not in the tree
793 'b' file was not found and matched badmatch
795 'b' file was not found and matched badmatch
794 '''
796 '''
795
797
796 if node:
798 if node:
797 fdict = dict.fromkeys(files)
799 fdict = dict.fromkeys(files)
798 for fn in self.manifest.read(self.changelog.read(node)[0]):
800 for fn in self.manifest.read(self.changelog.read(node)[0]):
799 for ffn in fdict:
801 for ffn in fdict:
800 # match if the file is the exact name or a directory
802 # match if the file is the exact name or a directory
801 if ffn == fn or fn.startswith("%s/" % ffn):
803 if ffn == fn or fn.startswith("%s/" % ffn):
802 del fdict[ffn]
804 del fdict[ffn]
803 break
805 break
804 if match(fn):
806 if match(fn):
805 yield 'm', fn
807 yield 'm', fn
806 for fn in fdict:
808 for fn in fdict:
807 if badmatch and badmatch(fn):
809 if badmatch and badmatch(fn):
808 if match(fn):
810 if match(fn):
809 yield 'b', fn
811 yield 'b', fn
810 else:
812 else:
811 self.ui.warn(_('%s: No such file in rev %s\n') % (
813 self.ui.warn(_('%s: No such file in rev %s\n') % (
812 util.pathto(self.getcwd(), fn), short(node)))
814 util.pathto(self.getcwd(), fn), short(node)))
813 else:
815 else:
814 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
816 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
815 yield src, fn
817 yield src, fn
816
818
817 def status(self, node1=None, node2=None, files=[], match=util.always,
819 def status(self, node1=None, node2=None, files=[], match=util.always,
818 wlock=None, list_ignored=False, list_clean=False):
820 wlock=None, list_ignored=False, list_clean=False):
819 """return status of files between two nodes or node and working directory
821 """return status of files between two nodes or node and working directory
820
822
821 If node1 is None, use the first dirstate parent instead.
823 If node1 is None, use the first dirstate parent instead.
822 If node2 is None, compare node1 with working directory.
824 If node2 is None, compare node1 with working directory.
823 """
825 """
824
826
825 def fcmp(fn, mf):
827 def fcmp(fn, mf):
826 t1 = self.wread(fn)
828 t1 = self.wread(fn)
827 return self.file(fn).cmp(mf.get(fn, nullid), t1)
829 return self.file(fn).cmp(mf.get(fn, nullid), t1)
828
830
829 def mfmatches(node):
831 def mfmatches(node):
830 change = self.changelog.read(node)
832 change = self.changelog.read(node)
831 mf = self.manifest.read(change[0]).copy()
833 mf = self.manifest.read(change[0]).copy()
832 for fn in mf.keys():
834 for fn in mf.keys():
833 if not match(fn):
835 if not match(fn):
834 del mf[fn]
836 del mf[fn]
835 return mf
837 return mf
836
838
837 modified, added, removed, deleted, unknown = [], [], [], [], []
839 modified, added, removed, deleted, unknown = [], [], [], [], []
838 ignored, clean = [], []
840 ignored, clean = [], []
839
841
840 compareworking = False
842 compareworking = False
841 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
843 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
842 compareworking = True
844 compareworking = True
843
845
844 if not compareworking:
846 if not compareworking:
845 # read the manifest from node1 before the manifest from node2,
847 # read the manifest from node1 before the manifest from node2,
846 # so that we'll hit the manifest cache if we're going through
848 # so that we'll hit the manifest cache if we're going through
847 # all the revisions in parent->child order.
849 # all the revisions in parent->child order.
848 mf1 = mfmatches(node1)
850 mf1 = mfmatches(node1)
849
851
850 # are we comparing the working directory?
852 # are we comparing the working directory?
851 if not node2:
853 if not node2:
852 if not wlock:
854 if not wlock:
853 try:
855 try:
854 wlock = self.wlock(wait=0)
856 wlock = self.wlock(wait=0)
855 except lock.LockException:
857 except lock.LockException:
856 wlock = None
858 wlock = None
857 (lookup, modified, added, removed, deleted, unknown,
859 (lookup, modified, added, removed, deleted, unknown,
858 ignored, clean) = self.dirstate.status(files, match,
860 ignored, clean) = self.dirstate.status(files, match,
859 list_ignored, list_clean)
861 list_ignored, list_clean)
860
862
861 # are we comparing working dir against its parent?
863 # are we comparing working dir against its parent?
862 if compareworking:
864 if compareworking:
863 if lookup:
865 if lookup:
864 # do a full compare of any files that might have changed
866 # do a full compare of any files that might have changed
865 mf2 = mfmatches(self.dirstate.parents()[0])
867 mf2 = mfmatches(self.dirstate.parents()[0])
866 for f in lookup:
868 for f in lookup:
867 if fcmp(f, mf2):
869 if fcmp(f, mf2):
868 modified.append(f)
870 modified.append(f)
869 else:
871 else:
870 clean.append(f)
872 clean.append(f)
871 if wlock is not None:
873 if wlock is not None:
872 self.dirstate.update([f], "n")
874 self.dirstate.update([f], "n")
873 else:
875 else:
874 # we are comparing working dir against non-parent
876 # we are comparing working dir against non-parent
875 # generate a pseudo-manifest for the working dir
877 # generate a pseudo-manifest for the working dir
876 # XXX: create it in dirstate.py ?
878 # XXX: create it in dirstate.py ?
877 mf2 = mfmatches(self.dirstate.parents()[0])
879 mf2 = mfmatches(self.dirstate.parents()[0])
878 for f in lookup + modified + added:
880 for f in lookup + modified + added:
879 mf2[f] = ""
881 mf2[f] = ""
880 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
882 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
881 for f in removed:
883 for f in removed:
882 if f in mf2:
884 if f in mf2:
883 del mf2[f]
885 del mf2[f]
884 else:
886 else:
885 # we are comparing two revisions
887 # we are comparing two revisions
886 mf2 = mfmatches(node2)
888 mf2 = mfmatches(node2)
887
889
888 if not compareworking:
890 if not compareworking:
889 # flush lists from dirstate before comparing manifests
891 # flush lists from dirstate before comparing manifests
890 modified, added, clean = [], [], []
892 modified, added, clean = [], [], []
891
893
892 # make sure to sort the files so we talk to the disk in a
894 # make sure to sort the files so we talk to the disk in a
893 # reasonable order
895 # reasonable order
894 mf2keys = mf2.keys()
896 mf2keys = mf2.keys()
895 mf2keys.sort()
897 mf2keys.sort()
896 for fn in mf2keys:
898 for fn in mf2keys:
897 if mf1.has_key(fn):
899 if mf1.has_key(fn):
898 if mf1.flags(fn) != mf2.flags(fn) or \
900 if mf1.flags(fn) != mf2.flags(fn) or \
899 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
901 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
900 modified.append(fn)
902 modified.append(fn)
901 elif list_clean:
903 elif list_clean:
902 clean.append(fn)
904 clean.append(fn)
903 del mf1[fn]
905 del mf1[fn]
904 else:
906 else:
905 added.append(fn)
907 added.append(fn)
906
908
907 removed = mf1.keys()
909 removed = mf1.keys()
908
910
909 # sort and return results:
911 # sort and return results:
910 for l in modified, added, removed, deleted, unknown, ignored, clean:
912 for l in modified, added, removed, deleted, unknown, ignored, clean:
911 l.sort()
913 l.sort()
912 return (modified, added, removed, deleted, unknown, ignored, clean)
914 return (modified, added, removed, deleted, unknown, ignored, clean)
913
915
914 def add(self, list, wlock=None):
916 def add(self, list, wlock=None):
915 if not wlock:
917 if not wlock:
916 wlock = self.wlock()
918 wlock = self.wlock()
917 for f in list:
919 for f in list:
918 p = self.wjoin(f)
920 p = self.wjoin(f)
919 if not os.path.exists(p):
921 if not os.path.exists(p):
920 self.ui.warn(_("%s does not exist!\n") % f)
922 self.ui.warn(_("%s does not exist!\n") % f)
921 elif not os.path.isfile(p):
923 elif not os.path.isfile(p):
922 self.ui.warn(_("%s not added: only files supported currently\n")
924 self.ui.warn(_("%s not added: only files supported currently\n")
923 % f)
925 % f)
924 elif self.dirstate.state(f) in 'an':
926 elif self.dirstate.state(f) in 'an':
925 self.ui.warn(_("%s already tracked!\n") % f)
927 self.ui.warn(_("%s already tracked!\n") % f)
926 else:
928 else:
927 self.dirstate.update([f], "a")
929 self.dirstate.update([f], "a")
928
930
929 def forget(self, list, wlock=None):
931 def forget(self, list, wlock=None):
930 if not wlock:
932 if not wlock:
931 wlock = self.wlock()
933 wlock = self.wlock()
932 for f in list:
934 for f in list:
933 if self.dirstate.state(f) not in 'ai':
935 if self.dirstate.state(f) not in 'ai':
934 self.ui.warn(_("%s not added!\n") % f)
936 self.ui.warn(_("%s not added!\n") % f)
935 else:
937 else:
936 self.dirstate.forget([f])
938 self.dirstate.forget([f])
937
939
938 def remove(self, list, unlink=False, wlock=None):
940 def remove(self, list, unlink=False, wlock=None):
939 if unlink:
941 if unlink:
940 for f in list:
942 for f in list:
941 try:
943 try:
942 util.unlink(self.wjoin(f))
944 util.unlink(self.wjoin(f))
943 except OSError, inst:
945 except OSError, inst:
944 if inst.errno != errno.ENOENT:
946 if inst.errno != errno.ENOENT:
945 raise
947 raise
946 if not wlock:
948 if not wlock:
947 wlock = self.wlock()
949 wlock = self.wlock()
948 for f in list:
950 for f in list:
949 p = self.wjoin(f)
951 p = self.wjoin(f)
950 if os.path.exists(p):
952 if os.path.exists(p):
951 self.ui.warn(_("%s still exists!\n") % f)
953 self.ui.warn(_("%s still exists!\n") % f)
952 elif self.dirstate.state(f) == 'a':
954 elif self.dirstate.state(f) == 'a':
953 self.dirstate.forget([f])
955 self.dirstate.forget([f])
954 elif f not in self.dirstate:
956 elif f not in self.dirstate:
955 self.ui.warn(_("%s not tracked!\n") % f)
957 self.ui.warn(_("%s not tracked!\n") % f)
956 else:
958 else:
957 self.dirstate.update([f], "r")
959 self.dirstate.update([f], "r")
958
960
959 def undelete(self, list, wlock=None):
961 def undelete(self, list, wlock=None):
960 p = self.dirstate.parents()[0]
962 p = self.dirstate.parents()[0]
961 mn = self.changelog.read(p)[0]
963 mn = self.changelog.read(p)[0]
962 m = self.manifest.read(mn)
964 m = self.manifest.read(mn)
963 if not wlock:
965 if not wlock:
964 wlock = self.wlock()
966 wlock = self.wlock()
965 for f in list:
967 for f in list:
966 if self.dirstate.state(f) not in "r":
968 if self.dirstate.state(f) not in "r":
967 self.ui.warn("%s not removed!\n" % f)
969 self.ui.warn("%s not removed!\n" % f)
968 else:
970 else:
969 t = self.file(f).read(m[f])
971 t = self.file(f).read(m[f])
970 self.wwrite(f, t)
972 self.wwrite(f, t)
971 util.set_exec(self.wjoin(f), m.execf(f))
973 util.set_exec(self.wjoin(f), m.execf(f))
972 self.dirstate.update([f], "n")
974 self.dirstate.update([f], "n")
973
975
974 def copy(self, source, dest, wlock=None):
976 def copy(self, source, dest, wlock=None):
975 p = self.wjoin(dest)
977 p = self.wjoin(dest)
976 if not os.path.exists(p):
978 if not os.path.exists(p):
977 self.ui.warn(_("%s does not exist!\n") % dest)
979 self.ui.warn(_("%s does not exist!\n") % dest)
978 elif not os.path.isfile(p):
980 elif not os.path.isfile(p):
979 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
981 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
980 else:
982 else:
981 if not wlock:
983 if not wlock:
982 wlock = self.wlock()
984 wlock = self.wlock()
983 if self.dirstate.state(dest) == '?':
985 if self.dirstate.state(dest) == '?':
984 self.dirstate.update([dest], "a")
986 self.dirstate.update([dest], "a")
985 self.dirstate.copy(source, dest)
987 self.dirstate.copy(source, dest)
986
988
987 def heads(self, start=None):
989 def heads(self, start=None):
988 heads = self.changelog.heads(start)
990 heads = self.changelog.heads(start)
989 # sort the output in rev descending order
991 # sort the output in rev descending order
990 heads = [(-self.changelog.rev(h), h) for h in heads]
992 heads = [(-self.changelog.rev(h), h) for h in heads]
991 heads.sort()
993 heads.sort()
992 return [n for (r, n) in heads]
994 return [n for (r, n) in heads]
993
995
994 def branches(self, nodes):
996 def branches(self, nodes):
995 if not nodes:
997 if not nodes:
996 nodes = [self.changelog.tip()]
998 nodes = [self.changelog.tip()]
997 b = []
999 b = []
998 for n in nodes:
1000 for n in nodes:
999 t = n
1001 t = n
1000 while 1:
1002 while 1:
1001 p = self.changelog.parents(n)
1003 p = self.changelog.parents(n)
1002 if p[1] != nullid or p[0] == nullid:
1004 if p[1] != nullid or p[0] == nullid:
1003 b.append((t, n, p[0], p[1]))
1005 b.append((t, n, p[0], p[1]))
1004 break
1006 break
1005 n = p[0]
1007 n = p[0]
1006 return b
1008 return b
1007
1009
1008 def between(self, pairs):
1010 def between(self, pairs):
1009 r = []
1011 r = []
1010
1012
1011 for top, bottom in pairs:
1013 for top, bottom in pairs:
1012 n, l, i = top, [], 0
1014 n, l, i = top, [], 0
1013 f = 1
1015 f = 1
1014
1016
1015 while n != bottom:
1017 while n != bottom:
1016 p = self.changelog.parents(n)[0]
1018 p = self.changelog.parents(n)[0]
1017 if i == f:
1019 if i == f:
1018 l.append(n)
1020 l.append(n)
1019 f = f * 2
1021 f = f * 2
1020 n = p
1022 n = p
1021 i += 1
1023 i += 1
1022
1024
1023 r.append(l)
1025 r.append(l)
1024
1026
1025 return r
1027 return r
1026
1028
1027 def findincoming(self, remote, base=None, heads=None, force=False):
1029 def findincoming(self, remote, base=None, heads=None, force=False):
1028 """Return list of roots of the subsets of missing nodes from remote
1030 """Return list of roots of the subsets of missing nodes from remote
1029
1031
1030 If base dict is specified, assume that these nodes and their parents
1032 If base dict is specified, assume that these nodes and their parents
1031 exist on the remote side and that no child of a node of base exists
1033 exist on the remote side and that no child of a node of base exists
1032 in both remote and self.
1034 in both remote and self.
1033 Furthermore base will be updated to include the nodes that exists
1035 Furthermore base will be updated to include the nodes that exists
1034 in self and remote but no children exists in self and remote.
1036 in self and remote but no children exists in self and remote.
1035 If a list of heads is specified, return only nodes which are heads
1037 If a list of heads is specified, return only nodes which are heads
1036 or ancestors of these heads.
1038 or ancestors of these heads.
1037
1039
1038 All the ancestors of base are in self and in remote.
1040 All the ancestors of base are in self and in remote.
1039 All the descendants of the list returned are missing in self.
1041 All the descendants of the list returned are missing in self.
1040 (and so we know that the rest of the nodes are missing in remote, see
1042 (and so we know that the rest of the nodes are missing in remote, see
1041 outgoing)
1043 outgoing)
1042 """
1044 """
1043 m = self.changelog.nodemap
1045 m = self.changelog.nodemap
1044 search = []
1046 search = []
1045 fetch = {}
1047 fetch = {}
1046 seen = {}
1048 seen = {}
1047 seenbranch = {}
1049 seenbranch = {}
1048 if base == None:
1050 if base == None:
1049 base = {}
1051 base = {}
1050
1052
1051 if not heads:
1053 if not heads:
1052 heads = remote.heads()
1054 heads = remote.heads()
1053
1055
1054 if self.changelog.tip() == nullid:
1056 if self.changelog.tip() == nullid:
1055 base[nullid] = 1
1057 base[nullid] = 1
1056 if heads != [nullid]:
1058 if heads != [nullid]:
1057 return [nullid]
1059 return [nullid]
1058 return []
1060 return []
1059
1061
1060 # assume we're closer to the tip than the root
1062 # assume we're closer to the tip than the root
1061 # and start by examining the heads
1063 # and start by examining the heads
1062 self.ui.status(_("searching for changes\n"))
1064 self.ui.status(_("searching for changes\n"))
1063
1065
1064 unknown = []
1066 unknown = []
1065 for h in heads:
1067 for h in heads:
1066 if h not in m:
1068 if h not in m:
1067 unknown.append(h)
1069 unknown.append(h)
1068 else:
1070 else:
1069 base[h] = 1
1071 base[h] = 1
1070
1072
1071 if not unknown:
1073 if not unknown:
1072 return []
1074 return []
1073
1075
1074 req = dict.fromkeys(unknown)
1076 req = dict.fromkeys(unknown)
1075 reqcnt = 0
1077 reqcnt = 0
1076
1078
1077 # search through remote branches
1079 # search through remote branches
1078 # a 'branch' here is a linear segment of history, with four parts:
1080 # a 'branch' here is a linear segment of history, with four parts:
1079 # head, root, first parent, second parent
1081 # head, root, first parent, second parent
1080 # (a branch always has two parents (or none) by definition)
1082 # (a branch always has two parents (or none) by definition)
1081 unknown = remote.branches(unknown)
1083 unknown = remote.branches(unknown)
1082 while unknown:
1084 while unknown:
1083 r = []
1085 r = []
1084 while unknown:
1086 while unknown:
1085 n = unknown.pop(0)
1087 n = unknown.pop(0)
1086 if n[0] in seen:
1088 if n[0] in seen:
1087 continue
1089 continue
1088
1090
1089 self.ui.debug(_("examining %s:%s\n")
1091 self.ui.debug(_("examining %s:%s\n")
1090 % (short(n[0]), short(n[1])))
1092 % (short(n[0]), short(n[1])))
1091 if n[0] == nullid: # found the end of the branch
1093 if n[0] == nullid: # found the end of the branch
1092 pass
1094 pass
1093 elif n in seenbranch:
1095 elif n in seenbranch:
1094 self.ui.debug(_("branch already found\n"))
1096 self.ui.debug(_("branch already found\n"))
1095 continue
1097 continue
1096 elif n[1] and n[1] in m: # do we know the base?
1098 elif n[1] and n[1] in m: # do we know the base?
1097 self.ui.debug(_("found incomplete branch %s:%s\n")
1099 self.ui.debug(_("found incomplete branch %s:%s\n")
1098 % (short(n[0]), short(n[1])))
1100 % (short(n[0]), short(n[1])))
1099 search.append(n) # schedule branch range for scanning
1101 search.append(n) # schedule branch range for scanning
1100 seenbranch[n] = 1
1102 seenbranch[n] = 1
1101 else:
1103 else:
1102 if n[1] not in seen and n[1] not in fetch:
1104 if n[1] not in seen and n[1] not in fetch:
1103 if n[2] in m and n[3] in m:
1105 if n[2] in m and n[3] in m:
1104 self.ui.debug(_("found new changeset %s\n") %
1106 self.ui.debug(_("found new changeset %s\n") %
1105 short(n[1]))
1107 short(n[1]))
1106 fetch[n[1]] = 1 # earliest unknown
1108 fetch[n[1]] = 1 # earliest unknown
1107 for p in n[2:4]:
1109 for p in n[2:4]:
1108 if p in m:
1110 if p in m:
1109 base[p] = 1 # latest known
1111 base[p] = 1 # latest known
1110
1112
1111 for p in n[2:4]:
1113 for p in n[2:4]:
1112 if p not in req and p not in m:
1114 if p not in req and p not in m:
1113 r.append(p)
1115 r.append(p)
1114 req[p] = 1
1116 req[p] = 1
1115 seen[n[0]] = 1
1117 seen[n[0]] = 1
1116
1118
1117 if r:
1119 if r:
1118 reqcnt += 1
1120 reqcnt += 1
1119 self.ui.debug(_("request %d: %s\n") %
1121 self.ui.debug(_("request %d: %s\n") %
1120 (reqcnt, " ".join(map(short, r))))
1122 (reqcnt, " ".join(map(short, r))))
1121 for p in xrange(0, len(r), 10):
1123 for p in xrange(0, len(r), 10):
1122 for b in remote.branches(r[p:p+10]):
1124 for b in remote.branches(r[p:p+10]):
1123 self.ui.debug(_("received %s:%s\n") %
1125 self.ui.debug(_("received %s:%s\n") %
1124 (short(b[0]), short(b[1])))
1126 (short(b[0]), short(b[1])))
1125 unknown.append(b)
1127 unknown.append(b)
1126
1128
1127 # do binary search on the branches we found
1129 # do binary search on the branches we found
1128 while search:
1130 while search:
1129 n = search.pop(0)
1131 n = search.pop(0)
1130 reqcnt += 1
1132 reqcnt += 1
1131 l = remote.between([(n[0], n[1])])[0]
1133 l = remote.between([(n[0], n[1])])[0]
1132 l.append(n[1])
1134 l.append(n[1])
1133 p = n[0]
1135 p = n[0]
1134 f = 1
1136 f = 1
1135 for i in l:
1137 for i in l:
1136 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1138 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1137 if i in m:
1139 if i in m:
1138 if f <= 2:
1140 if f <= 2:
1139 self.ui.debug(_("found new branch changeset %s\n") %
1141 self.ui.debug(_("found new branch changeset %s\n") %
1140 short(p))
1142 short(p))
1141 fetch[p] = 1
1143 fetch[p] = 1
1142 base[i] = 1
1144 base[i] = 1
1143 else:
1145 else:
1144 self.ui.debug(_("narrowed branch search to %s:%s\n")
1146 self.ui.debug(_("narrowed branch search to %s:%s\n")
1145 % (short(p), short(i)))
1147 % (short(p), short(i)))
1146 search.append((p, i))
1148 search.append((p, i))
1147 break
1149 break
1148 p, f = i, f * 2
1150 p, f = i, f * 2
1149
1151
1150 # sanity check our fetch list
1152 # sanity check our fetch list
1151 for f in fetch.keys():
1153 for f in fetch.keys():
1152 if f in m:
1154 if f in m:
1153 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1155 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1154
1156
1155 if base.keys() == [nullid]:
1157 if base.keys() == [nullid]:
1156 if force:
1158 if force:
1157 self.ui.warn(_("warning: repository is unrelated\n"))
1159 self.ui.warn(_("warning: repository is unrelated\n"))
1158 else:
1160 else:
1159 raise util.Abort(_("repository is unrelated"))
1161 raise util.Abort(_("repository is unrelated"))
1160
1162
1161 self.ui.debug(_("found new changesets starting at ") +
1163 self.ui.debug(_("found new changesets starting at ") +
1162 " ".join([short(f) for f in fetch]) + "\n")
1164 " ".join([short(f) for f in fetch]) + "\n")
1163
1165
1164 self.ui.debug(_("%d total queries\n") % reqcnt)
1166 self.ui.debug(_("%d total queries\n") % reqcnt)
1165
1167
1166 return fetch.keys()
1168 return fetch.keys()
1167
1169
1168 def findoutgoing(self, remote, base=None, heads=None, force=False):
1170 def findoutgoing(self, remote, base=None, heads=None, force=False):
1169 """Return list of nodes that are roots of subsets not in remote
1171 """Return list of nodes that are roots of subsets not in remote
1170
1172
1171 If base dict is specified, assume that these nodes and their parents
1173 If base dict is specified, assume that these nodes and their parents
1172 exist on the remote side.
1174 exist on the remote side.
1173 If a list of heads is specified, return only nodes which are heads
1175 If a list of heads is specified, return only nodes which are heads
1174 or ancestors of these heads, and return a second element which
1176 or ancestors of these heads, and return a second element which
1175 contains all remote heads which get new children.
1177 contains all remote heads which get new children.
1176 """
1178 """
1177 if base == None:
1179 if base == None:
1178 base = {}
1180 base = {}
1179 self.findincoming(remote, base, heads, force=force)
1181 self.findincoming(remote, base, heads, force=force)
1180
1182
1181 self.ui.debug(_("common changesets up to ")
1183 self.ui.debug(_("common changesets up to ")
1182 + " ".join(map(short, base.keys())) + "\n")
1184 + " ".join(map(short, base.keys())) + "\n")
1183
1185
1184 remain = dict.fromkeys(self.changelog.nodemap)
1186 remain = dict.fromkeys(self.changelog.nodemap)
1185
1187
1186 # prune everything remote has from the tree
1188 # prune everything remote has from the tree
1187 del remain[nullid]
1189 del remain[nullid]
1188 remove = base.keys()
1190 remove = base.keys()
1189 while remove:
1191 while remove:
1190 n = remove.pop(0)
1192 n = remove.pop(0)
1191 if n in remain:
1193 if n in remain:
1192 del remain[n]
1194 del remain[n]
1193 for p in self.changelog.parents(n):
1195 for p in self.changelog.parents(n):
1194 remove.append(p)
1196 remove.append(p)
1195
1197
1196 # find every node whose parents have been pruned
1198 # find every node whose parents have been pruned
1197 subset = []
1199 subset = []
1198 # find every remote head that will get new children
1200 # find every remote head that will get new children
1199 updated_heads = {}
1201 updated_heads = {}
1200 for n in remain:
1202 for n in remain:
1201 p1, p2 = self.changelog.parents(n)
1203 p1, p2 = self.changelog.parents(n)
1202 if p1 not in remain and p2 not in remain:
1204 if p1 not in remain and p2 not in remain:
1203 subset.append(n)
1205 subset.append(n)
1204 if heads:
1206 if heads:
1205 if p1 in heads:
1207 if p1 in heads:
1206 updated_heads[p1] = True
1208 updated_heads[p1] = True
1207 if p2 in heads:
1209 if p2 in heads:
1208 updated_heads[p2] = True
1210 updated_heads[p2] = True
1209
1211
1210 # this is the set of all roots we have to push
1212 # this is the set of all roots we have to push
1211 if heads:
1213 if heads:
1212 return subset, updated_heads.keys()
1214 return subset, updated_heads.keys()
1213 else:
1215 else:
1214 return subset
1216 return subset
1215
1217
1216 def pull(self, remote, heads=None, force=False, lock=None):
1218 def pull(self, remote, heads=None, force=False, lock=None):
1217 mylock = False
1219 mylock = False
1218 if not lock:
1220 if not lock:
1219 lock = self.lock()
1221 lock = self.lock()
1220 mylock = True
1222 mylock = True
1221
1223
1222 try:
1224 try:
1223 fetch = self.findincoming(remote, force=force)
1225 fetch = self.findincoming(remote, force=force)
1224 if fetch == [nullid]:
1226 if fetch == [nullid]:
1225 self.ui.status(_("requesting all changes\n"))
1227 self.ui.status(_("requesting all changes\n"))
1226
1228
1227 if not fetch:
1229 if not fetch:
1228 self.ui.status(_("no changes found\n"))
1230 self.ui.status(_("no changes found\n"))
1229 return 0
1231 return 0
1230
1232
1231 if heads is None:
1233 if heads is None:
1232 cg = remote.changegroup(fetch, 'pull')
1234 cg = remote.changegroup(fetch, 'pull')
1233 else:
1235 else:
1234 if 'changegroupsubset' not in remote.capabilities:
1236 if 'changegroupsubset' not in remote.capabilities:
1235 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1237 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1236 cg = remote.changegroupsubset(fetch, heads, 'pull')
1238 cg = remote.changegroupsubset(fetch, heads, 'pull')
1237 return self.addchangegroup(cg, 'pull', remote.url())
1239 return self.addchangegroup(cg, 'pull', remote.url())
1238 finally:
1240 finally:
1239 if mylock:
1241 if mylock:
1240 lock.release()
1242 lock.release()
1241
1243
1242 def push(self, remote, force=False, revs=None):
1244 def push(self, remote, force=False, revs=None):
1243 # there are two ways to push to remote repo:
1245 # there are two ways to push to remote repo:
1244 #
1246 #
1245 # addchangegroup assumes local user can lock remote
1247 # addchangegroup assumes local user can lock remote
1246 # repo (local filesystem, old ssh servers).
1248 # repo (local filesystem, old ssh servers).
1247 #
1249 #
1248 # unbundle assumes local user cannot lock remote repo (new ssh
1250 # unbundle assumes local user cannot lock remote repo (new ssh
1249 # servers, http servers).
1251 # servers, http servers).
1250
1252
1251 if remote.capable('unbundle'):
1253 if remote.capable('unbundle'):
1252 return self.push_unbundle(remote, force, revs)
1254 return self.push_unbundle(remote, force, revs)
1253 return self.push_addchangegroup(remote, force, revs)
1255 return self.push_addchangegroup(remote, force, revs)
1254
1256
1255 def prepush(self, remote, force, revs):
1257 def prepush(self, remote, force, revs):
1256 base = {}
1258 base = {}
1257 remote_heads = remote.heads()
1259 remote_heads = remote.heads()
1258 inc = self.findincoming(remote, base, remote_heads, force=force)
1260 inc = self.findincoming(remote, base, remote_heads, force=force)
1259
1261
1260 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1262 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1261 if revs is not None:
1263 if revs is not None:
1262 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1264 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1263 else:
1265 else:
1264 bases, heads = update, self.changelog.heads()
1266 bases, heads = update, self.changelog.heads()
1265
1267
1266 if not bases:
1268 if not bases:
1267 self.ui.status(_("no changes found\n"))
1269 self.ui.status(_("no changes found\n"))
1268 return None, 1
1270 return None, 1
1269 elif not force:
1271 elif not force:
1270 # check if we're creating new remote heads
1272 # check if we're creating new remote heads
1271 # to be a remote head after push, node must be either
1273 # to be a remote head after push, node must be either
1272 # - unknown locally
1274 # - unknown locally
1273 # - a local outgoing head descended from update
1275 # - a local outgoing head descended from update
1274 # - a remote head that's known locally and not
1276 # - a remote head that's known locally and not
1275 # ancestral to an outgoing head
1277 # ancestral to an outgoing head
1276
1278
1277 warn = 0
1279 warn = 0
1278
1280
1279 if remote_heads == [nullid]:
1281 if remote_heads == [nullid]:
1280 warn = 0
1282 warn = 0
1281 elif not revs and len(heads) > len(remote_heads):
1283 elif not revs and len(heads) > len(remote_heads):
1282 warn = 1
1284 warn = 1
1283 else:
1285 else:
1284 newheads = list(heads)
1286 newheads = list(heads)
1285 for r in remote_heads:
1287 for r in remote_heads:
1286 if r in self.changelog.nodemap:
1288 if r in self.changelog.nodemap:
1287 desc = self.changelog.heads(r, heads)
1289 desc = self.changelog.heads(r, heads)
1288 l = [h for h in heads if h in desc]
1290 l = [h for h in heads if h in desc]
1289 if not l:
1291 if not l:
1290 newheads.append(r)
1292 newheads.append(r)
1291 else:
1293 else:
1292 newheads.append(r)
1294 newheads.append(r)
1293 if len(newheads) > len(remote_heads):
1295 if len(newheads) > len(remote_heads):
1294 warn = 1
1296 warn = 1
1295
1297
1296 if warn:
1298 if warn:
1297 self.ui.warn(_("abort: push creates new remote branches!\n"))
1299 self.ui.warn(_("abort: push creates new remote branches!\n"))
1298 self.ui.status(_("(did you forget to merge?"
1300 self.ui.status(_("(did you forget to merge?"
1299 " use push -f to force)\n"))
1301 " use push -f to force)\n"))
1300 return None, 1
1302 return None, 1
1301 elif inc:
1303 elif inc:
1302 self.ui.warn(_("note: unsynced remote changes!\n"))
1304 self.ui.warn(_("note: unsynced remote changes!\n"))
1303
1305
1304
1306
1305 if revs is None:
1307 if revs is None:
1306 cg = self.changegroup(update, 'push')
1308 cg = self.changegroup(update, 'push')
1307 else:
1309 else:
1308 cg = self.changegroupsubset(update, revs, 'push')
1310 cg = self.changegroupsubset(update, revs, 'push')
1309 return cg, remote_heads
1311 return cg, remote_heads
1310
1312
1311 def push_addchangegroup(self, remote, force, revs):
1313 def push_addchangegroup(self, remote, force, revs):
1312 lock = remote.lock()
1314 lock = remote.lock()
1313
1315
1314 ret = self.prepush(remote, force, revs)
1316 ret = self.prepush(remote, force, revs)
1315 if ret[0] is not None:
1317 if ret[0] is not None:
1316 cg, remote_heads = ret
1318 cg, remote_heads = ret
1317 return remote.addchangegroup(cg, 'push', self.url())
1319 return remote.addchangegroup(cg, 'push', self.url())
1318 return ret[1]
1320 return ret[1]
1319
1321
1320 def push_unbundle(self, remote, force, revs):
1322 def push_unbundle(self, remote, force, revs):
1321 # local repo finds heads on server, finds out what revs it
1323 # local repo finds heads on server, finds out what revs it
1322 # must push. once revs transferred, if server finds it has
1324 # must push. once revs transferred, if server finds it has
1323 # different heads (someone else won commit/push race), server
1325 # different heads (someone else won commit/push race), server
1324 # aborts.
1326 # aborts.
1325
1327
1326 ret = self.prepush(remote, force, revs)
1328 ret = self.prepush(remote, force, revs)
1327 if ret[0] is not None:
1329 if ret[0] is not None:
1328 cg, remote_heads = ret
1330 cg, remote_heads = ret
1329 if force: remote_heads = ['force']
1331 if force: remote_heads = ['force']
1330 return remote.unbundle(cg, remote_heads, 'push')
1332 return remote.unbundle(cg, remote_heads, 'push')
1331 return ret[1]
1333 return ret[1]
1332
1334
1333 def changegroupinfo(self, nodes):
1335 def changegroupinfo(self, nodes):
1334 self.ui.note(_("%d changesets found\n") % len(nodes))
1336 self.ui.note(_("%d changesets found\n") % len(nodes))
1335 if self.ui.debugflag:
1337 if self.ui.debugflag:
1336 self.ui.debug(_("List of changesets:\n"))
1338 self.ui.debug(_("List of changesets:\n"))
1337 for node in nodes:
1339 for node in nodes:
1338 self.ui.debug("%s\n" % hex(node))
1340 self.ui.debug("%s\n" % hex(node))
1339
1341
1340 def changegroupsubset(self, bases, heads, source):
1342 def changegroupsubset(self, bases, heads, source):
1341 """This function generates a changegroup consisting of all the nodes
1343 """This function generates a changegroup consisting of all the nodes
1342 that are descendents of any of the bases, and ancestors of any of
1344 that are descendents of any of the bases, and ancestors of any of
1343 the heads.
1345 the heads.
1344
1346
1345 It is fairly complex as determining which filenodes and which
1347 It is fairly complex as determining which filenodes and which
1346 manifest nodes need to be included for the changeset to be complete
1348 manifest nodes need to be included for the changeset to be complete
1347 is non-trivial.
1349 is non-trivial.
1348
1350
1349 Another wrinkle is doing the reverse, figuring out which changeset in
1351 Another wrinkle is doing the reverse, figuring out which changeset in
1350 the changegroup a particular filenode or manifestnode belongs to."""
1352 the changegroup a particular filenode or manifestnode belongs to."""
1351
1353
1352 self.hook('preoutgoing', throw=True, source=source)
1354 self.hook('preoutgoing', throw=True, source=source)
1353
1355
1354 # Set up some initial variables
1356 # Set up some initial variables
1355 # Make it easy to refer to self.changelog
1357 # Make it easy to refer to self.changelog
1356 cl = self.changelog
1358 cl = self.changelog
1357 # msng is short for missing - compute the list of changesets in this
1359 # msng is short for missing - compute the list of changesets in this
1358 # changegroup.
1360 # changegroup.
1359 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1361 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1360 self.changegroupinfo(msng_cl_lst)
1362 self.changegroupinfo(msng_cl_lst)
1361 # Some bases may turn out to be superfluous, and some heads may be
1363 # Some bases may turn out to be superfluous, and some heads may be
1362 # too. nodesbetween will return the minimal set of bases and heads
1364 # too. nodesbetween will return the minimal set of bases and heads
1363 # necessary to re-create the changegroup.
1365 # necessary to re-create the changegroup.
1364
1366
1365 # Known heads are the list of heads that it is assumed the recipient
1367 # Known heads are the list of heads that it is assumed the recipient
1366 # of this changegroup will know about.
1368 # of this changegroup will know about.
1367 knownheads = {}
1369 knownheads = {}
1368 # We assume that all parents of bases are known heads.
1370 # We assume that all parents of bases are known heads.
1369 for n in bases:
1371 for n in bases:
1370 for p in cl.parents(n):
1372 for p in cl.parents(n):
1371 if p != nullid:
1373 if p != nullid:
1372 knownheads[p] = 1
1374 knownheads[p] = 1
1373 knownheads = knownheads.keys()
1375 knownheads = knownheads.keys()
1374 if knownheads:
1376 if knownheads:
1375 # Now that we know what heads are known, we can compute which
1377 # Now that we know what heads are known, we can compute which
1376 # changesets are known. The recipient must know about all
1378 # changesets are known. The recipient must know about all
1377 # changesets required to reach the known heads from the null
1379 # changesets required to reach the known heads from the null
1378 # changeset.
1380 # changeset.
1379 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1381 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1380 junk = None
1382 junk = None
1381 # Transform the list into an ersatz set.
1383 # Transform the list into an ersatz set.
1382 has_cl_set = dict.fromkeys(has_cl_set)
1384 has_cl_set = dict.fromkeys(has_cl_set)
1383 else:
1385 else:
1384 # If there were no known heads, the recipient cannot be assumed to
1386 # If there were no known heads, the recipient cannot be assumed to
1385 # know about any changesets.
1387 # know about any changesets.
1386 has_cl_set = {}
1388 has_cl_set = {}
1387
1389
1388 # Make it easy to refer to self.manifest
1390 # Make it easy to refer to self.manifest
1389 mnfst = self.manifest
1391 mnfst = self.manifest
1390 # We don't know which manifests are missing yet
1392 # We don't know which manifests are missing yet
1391 msng_mnfst_set = {}
1393 msng_mnfst_set = {}
1392 # Nor do we know which filenodes are missing.
1394 # Nor do we know which filenodes are missing.
1393 msng_filenode_set = {}
1395 msng_filenode_set = {}
1394
1396
1395 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1397 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1396 junk = None
1398 junk = None
1397
1399
1398 # A changeset always belongs to itself, so the changenode lookup
1400 # A changeset always belongs to itself, so the changenode lookup
1399 # function for a changenode is identity.
1401 # function for a changenode is identity.
1400 def identity(x):
1402 def identity(x):
1401 return x
1403 return x
1402
1404
1403 # A function generating function. Sets up an environment for the
1405 # A function generating function. Sets up an environment for the
1404 # inner function.
1406 # inner function.
1405 def cmp_by_rev_func(revlog):
1407 def cmp_by_rev_func(revlog):
1406 # Compare two nodes by their revision number in the environment's
1408 # Compare two nodes by their revision number in the environment's
1407 # revision history. Since the revision number both represents the
1409 # revision history. Since the revision number both represents the
1408 # most efficient order to read the nodes in, and represents a
1410 # most efficient order to read the nodes in, and represents a
1409 # topological sorting of the nodes, this function is often useful.
1411 # topological sorting of the nodes, this function is often useful.
1410 def cmp_by_rev(a, b):
1412 def cmp_by_rev(a, b):
1411 return cmp(revlog.rev(a), revlog.rev(b))
1413 return cmp(revlog.rev(a), revlog.rev(b))
1412 return cmp_by_rev
1414 return cmp_by_rev
1413
1415
1414 # If we determine that a particular file or manifest node must be a
1416 # If we determine that a particular file or manifest node must be a
1415 # node that the recipient of the changegroup will already have, we can
1417 # node that the recipient of the changegroup will already have, we can
1416 # also assume the recipient will have all the parents. This function
1418 # also assume the recipient will have all the parents. This function
1417 # prunes them from the set of missing nodes.
1419 # prunes them from the set of missing nodes.
1418 def prune_parents(revlog, hasset, msngset):
1420 def prune_parents(revlog, hasset, msngset):
1419 haslst = hasset.keys()
1421 haslst = hasset.keys()
1420 haslst.sort(cmp_by_rev_func(revlog))
1422 haslst.sort(cmp_by_rev_func(revlog))
1421 for node in haslst:
1423 for node in haslst:
1422 parentlst = [p for p in revlog.parents(node) if p != nullid]
1424 parentlst = [p for p in revlog.parents(node) if p != nullid]
1423 while parentlst:
1425 while parentlst:
1424 n = parentlst.pop()
1426 n = parentlst.pop()
1425 if n not in hasset:
1427 if n not in hasset:
1426 hasset[n] = 1
1428 hasset[n] = 1
1427 p = [p for p in revlog.parents(n) if p != nullid]
1429 p = [p for p in revlog.parents(n) if p != nullid]
1428 parentlst.extend(p)
1430 parentlst.extend(p)
1429 for n in hasset:
1431 for n in hasset:
1430 msngset.pop(n, None)
1432 msngset.pop(n, None)
1431
1433
1432 # This is a function generating function used to set up an environment
1434 # This is a function generating function used to set up an environment
1433 # for the inner function to execute in.
1435 # for the inner function to execute in.
1434 def manifest_and_file_collector(changedfileset):
1436 def manifest_and_file_collector(changedfileset):
1435 # This is an information gathering function that gathers
1437 # This is an information gathering function that gathers
1436 # information from each changeset node that goes out as part of
1438 # information from each changeset node that goes out as part of
1437 # the changegroup. The information gathered is a list of which
1439 # the changegroup. The information gathered is a list of which
1438 # manifest nodes are potentially required (the recipient may
1440 # manifest nodes are potentially required (the recipient may
1439 # already have them) and total list of all files which were
1441 # already have them) and total list of all files which were
1440 # changed in any changeset in the changegroup.
1442 # changed in any changeset in the changegroup.
1441 #
1443 #
1442 # We also remember the first changenode we saw any manifest
1444 # We also remember the first changenode we saw any manifest
1443 # referenced by so we can later determine which changenode 'owns'
1445 # referenced by so we can later determine which changenode 'owns'
1444 # the manifest.
1446 # the manifest.
1445 def collect_manifests_and_files(clnode):
1447 def collect_manifests_and_files(clnode):
1446 c = cl.read(clnode)
1448 c = cl.read(clnode)
1447 for f in c[3]:
1449 for f in c[3]:
1448 # This is to make sure we only have one instance of each
1450 # This is to make sure we only have one instance of each
1449 # filename string for each filename.
1451 # filename string for each filename.
1450 changedfileset.setdefault(f, f)
1452 changedfileset.setdefault(f, f)
1451 msng_mnfst_set.setdefault(c[0], clnode)
1453 msng_mnfst_set.setdefault(c[0], clnode)
1452 return collect_manifests_and_files
1454 return collect_manifests_and_files
1453
1455
1454 # Figure out which manifest nodes (of the ones we think might be part
1456 # Figure out which manifest nodes (of the ones we think might be part
1455 # of the changegroup) the recipient must know about and remove them
1457 # of the changegroup) the recipient must know about and remove them
1456 # from the changegroup.
1458 # from the changegroup.
1457 def prune_manifests():
1459 def prune_manifests():
1458 has_mnfst_set = {}
1460 has_mnfst_set = {}
1459 for n in msng_mnfst_set:
1461 for n in msng_mnfst_set:
1460 # If a 'missing' manifest thinks it belongs to a changenode
1462 # If a 'missing' manifest thinks it belongs to a changenode
1461 # the recipient is assumed to have, obviously the recipient
1463 # the recipient is assumed to have, obviously the recipient
1462 # must have that manifest.
1464 # must have that manifest.
1463 linknode = cl.node(mnfst.linkrev(n))
1465 linknode = cl.node(mnfst.linkrev(n))
1464 if linknode in has_cl_set:
1466 if linknode in has_cl_set:
1465 has_mnfst_set[n] = 1
1467 has_mnfst_set[n] = 1
1466 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1468 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1467
1469
1468 # Use the information collected in collect_manifests_and_files to say
1470 # Use the information collected in collect_manifests_and_files to say
1469 # which changenode any manifestnode belongs to.
1471 # which changenode any manifestnode belongs to.
1470 def lookup_manifest_link(mnfstnode):
1472 def lookup_manifest_link(mnfstnode):
1471 return msng_mnfst_set[mnfstnode]
1473 return msng_mnfst_set[mnfstnode]
1472
1474
1473 # A function generating function that sets up the initial environment
1475 # A function generating function that sets up the initial environment
1474 # the inner function.
1476 # the inner function.
1475 def filenode_collector(changedfiles):
1477 def filenode_collector(changedfiles):
1476 next_rev = [0]
1478 next_rev = [0]
1477 # This gathers information from each manifestnode included in the
1479 # This gathers information from each manifestnode included in the
1478 # changegroup about which filenodes the manifest node references
1480 # changegroup about which filenodes the manifest node references
1479 # so we can include those in the changegroup too.
1481 # so we can include those in the changegroup too.
1480 #
1482 #
1481 # It also remembers which changenode each filenode belongs to. It
1483 # It also remembers which changenode each filenode belongs to. It
1482 # does this by assuming the a filenode belongs to the changenode
1484 # does this by assuming the a filenode belongs to the changenode
1483 # the first manifest that references it belongs to.
1485 # the first manifest that references it belongs to.
1484 def collect_msng_filenodes(mnfstnode):
1486 def collect_msng_filenodes(mnfstnode):
1485 r = mnfst.rev(mnfstnode)
1487 r = mnfst.rev(mnfstnode)
1486 if r == next_rev[0]:
1488 if r == next_rev[0]:
1487 # If the last rev we looked at was the one just previous,
1489 # If the last rev we looked at was the one just previous,
1488 # we only need to see a diff.
1490 # we only need to see a diff.
1489 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1491 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1490 # For each line in the delta
1492 # For each line in the delta
1491 for dline in delta.splitlines():
1493 for dline in delta.splitlines():
1492 # get the filename and filenode for that line
1494 # get the filename and filenode for that line
1493 f, fnode = dline.split('\0')
1495 f, fnode = dline.split('\0')
1494 fnode = bin(fnode[:40])
1496 fnode = bin(fnode[:40])
1495 f = changedfiles.get(f, None)
1497 f = changedfiles.get(f, None)
1496 # And if the file is in the list of files we care
1498 # And if the file is in the list of files we care
1497 # about.
1499 # about.
1498 if f is not None:
1500 if f is not None:
1499 # Get the changenode this manifest belongs to
1501 # Get the changenode this manifest belongs to
1500 clnode = msng_mnfst_set[mnfstnode]
1502 clnode = msng_mnfst_set[mnfstnode]
1501 # Create the set of filenodes for the file if
1503 # Create the set of filenodes for the file if
1502 # there isn't one already.
1504 # there isn't one already.
1503 ndset = msng_filenode_set.setdefault(f, {})
1505 ndset = msng_filenode_set.setdefault(f, {})
1504 # And set the filenode's changelog node to the
1506 # And set the filenode's changelog node to the
1505 # manifest's if it hasn't been set already.
1507 # manifest's if it hasn't been set already.
1506 ndset.setdefault(fnode, clnode)
1508 ndset.setdefault(fnode, clnode)
1507 else:
1509 else:
1508 # Otherwise we need a full manifest.
1510 # Otherwise we need a full manifest.
1509 m = mnfst.read(mnfstnode)
1511 m = mnfst.read(mnfstnode)
1510 # For every file in we care about.
1512 # For every file in we care about.
1511 for f in changedfiles:
1513 for f in changedfiles:
1512 fnode = m.get(f, None)
1514 fnode = m.get(f, None)
1513 # If it's in the manifest
1515 # If it's in the manifest
1514 if fnode is not None:
1516 if fnode is not None:
1515 # See comments above.
1517 # See comments above.
1516 clnode = msng_mnfst_set[mnfstnode]
1518 clnode = msng_mnfst_set[mnfstnode]
1517 ndset = msng_filenode_set.setdefault(f, {})
1519 ndset = msng_filenode_set.setdefault(f, {})
1518 ndset.setdefault(fnode, clnode)
1520 ndset.setdefault(fnode, clnode)
1519 # Remember the revision we hope to see next.
1521 # Remember the revision we hope to see next.
1520 next_rev[0] = r + 1
1522 next_rev[0] = r + 1
1521 return collect_msng_filenodes
1523 return collect_msng_filenodes
1522
1524
1523 # We have a list of filenodes we think we need for a file, lets remove
1525 # We have a list of filenodes we think we need for a file, lets remove
1524 # all those we now the recipient must have.
1526 # all those we now the recipient must have.
1525 def prune_filenodes(f, filerevlog):
1527 def prune_filenodes(f, filerevlog):
1526 msngset = msng_filenode_set[f]
1528 msngset = msng_filenode_set[f]
1527 hasset = {}
1529 hasset = {}
1528 # If a 'missing' filenode thinks it belongs to a changenode we
1530 # If a 'missing' filenode thinks it belongs to a changenode we
1529 # assume the recipient must have, then the recipient must have
1531 # assume the recipient must have, then the recipient must have
1530 # that filenode.
1532 # that filenode.
1531 for n in msngset:
1533 for n in msngset:
1532 clnode = cl.node(filerevlog.linkrev(n))
1534 clnode = cl.node(filerevlog.linkrev(n))
1533 if clnode in has_cl_set:
1535 if clnode in has_cl_set:
1534 hasset[n] = 1
1536 hasset[n] = 1
1535 prune_parents(filerevlog, hasset, msngset)
1537 prune_parents(filerevlog, hasset, msngset)
1536
1538
1537 # A function generator function that sets up the a context for the
1539 # A function generator function that sets up the a context for the
1538 # inner function.
1540 # inner function.
1539 def lookup_filenode_link_func(fname):
1541 def lookup_filenode_link_func(fname):
1540 msngset = msng_filenode_set[fname]
1542 msngset = msng_filenode_set[fname]
1541 # Lookup the changenode the filenode belongs to.
1543 # Lookup the changenode the filenode belongs to.
1542 def lookup_filenode_link(fnode):
1544 def lookup_filenode_link(fnode):
1543 return msngset[fnode]
1545 return msngset[fnode]
1544 return lookup_filenode_link
1546 return lookup_filenode_link
1545
1547
1546 # Now that we have all theses utility functions to help out and
1548 # Now that we have all theses utility functions to help out and
1547 # logically divide up the task, generate the group.
1549 # logically divide up the task, generate the group.
1548 def gengroup():
1550 def gengroup():
1549 # The set of changed files starts empty.
1551 # The set of changed files starts empty.
1550 changedfiles = {}
1552 changedfiles = {}
1551 # Create a changenode group generator that will call our functions
1553 # Create a changenode group generator that will call our functions
1552 # back to lookup the owning changenode and collect information.
1554 # back to lookup the owning changenode and collect information.
1553 group = cl.group(msng_cl_lst, identity,
1555 group = cl.group(msng_cl_lst, identity,
1554 manifest_and_file_collector(changedfiles))
1556 manifest_and_file_collector(changedfiles))
1555 for chnk in group:
1557 for chnk in group:
1556 yield chnk
1558 yield chnk
1557
1559
1558 # The list of manifests has been collected by the generator
1560 # The list of manifests has been collected by the generator
1559 # calling our functions back.
1561 # calling our functions back.
1560 prune_manifests()
1562 prune_manifests()
1561 msng_mnfst_lst = msng_mnfst_set.keys()
1563 msng_mnfst_lst = msng_mnfst_set.keys()
1562 # Sort the manifestnodes by revision number.
1564 # Sort the manifestnodes by revision number.
1563 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1565 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1564 # Create a generator for the manifestnodes that calls our lookup
1566 # Create a generator for the manifestnodes that calls our lookup
1565 # and data collection functions back.
1567 # and data collection functions back.
1566 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1568 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1567 filenode_collector(changedfiles))
1569 filenode_collector(changedfiles))
1568 for chnk in group:
1570 for chnk in group:
1569 yield chnk
1571 yield chnk
1570
1572
1571 # These are no longer needed, dereference and toss the memory for
1573 # These are no longer needed, dereference and toss the memory for
1572 # them.
1574 # them.
1573 msng_mnfst_lst = None
1575 msng_mnfst_lst = None
1574 msng_mnfst_set.clear()
1576 msng_mnfst_set.clear()
1575
1577
1576 changedfiles = changedfiles.keys()
1578 changedfiles = changedfiles.keys()
1577 changedfiles.sort()
1579 changedfiles.sort()
1578 # Go through all our files in order sorted by name.
1580 # Go through all our files in order sorted by name.
1579 for fname in changedfiles:
1581 for fname in changedfiles:
1580 filerevlog = self.file(fname)
1582 filerevlog = self.file(fname)
1581 # Toss out the filenodes that the recipient isn't really
1583 # Toss out the filenodes that the recipient isn't really
1582 # missing.
1584 # missing.
1583 if msng_filenode_set.has_key(fname):
1585 if msng_filenode_set.has_key(fname):
1584 prune_filenodes(fname, filerevlog)
1586 prune_filenodes(fname, filerevlog)
1585 msng_filenode_lst = msng_filenode_set[fname].keys()
1587 msng_filenode_lst = msng_filenode_set[fname].keys()
1586 else:
1588 else:
1587 msng_filenode_lst = []
1589 msng_filenode_lst = []
1588 # If any filenodes are left, generate the group for them,
1590 # If any filenodes are left, generate the group for them,
1589 # otherwise don't bother.
1591 # otherwise don't bother.
1590 if len(msng_filenode_lst) > 0:
1592 if len(msng_filenode_lst) > 0:
1591 yield changegroup.genchunk(fname)
1593 yield changegroup.genchunk(fname)
1592 # Sort the filenodes by their revision #
1594 # Sort the filenodes by their revision #
1593 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1595 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1594 # Create a group generator and only pass in a changenode
1596 # Create a group generator and only pass in a changenode
1595 # lookup function as we need to collect no information
1597 # lookup function as we need to collect no information
1596 # from filenodes.
1598 # from filenodes.
1597 group = filerevlog.group(msng_filenode_lst,
1599 group = filerevlog.group(msng_filenode_lst,
1598 lookup_filenode_link_func(fname))
1600 lookup_filenode_link_func(fname))
1599 for chnk in group:
1601 for chnk in group:
1600 yield chnk
1602 yield chnk
1601 if msng_filenode_set.has_key(fname):
1603 if msng_filenode_set.has_key(fname):
1602 # Don't need this anymore, toss it to free memory.
1604 # Don't need this anymore, toss it to free memory.
1603 del msng_filenode_set[fname]
1605 del msng_filenode_set[fname]
1604 # Signal that no more groups are left.
1606 # Signal that no more groups are left.
1605 yield changegroup.closechunk()
1607 yield changegroup.closechunk()
1606
1608
1607 if msng_cl_lst:
1609 if msng_cl_lst:
1608 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1610 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1609
1611
1610 return util.chunkbuffer(gengroup())
1612 return util.chunkbuffer(gengroup())
1611
1613
1612 def changegroup(self, basenodes, source):
1614 def changegroup(self, basenodes, source):
1613 """Generate a changegroup of all nodes that we have that a recipient
1615 """Generate a changegroup of all nodes that we have that a recipient
1614 doesn't.
1616 doesn't.
1615
1617
1616 This is much easier than the previous function as we can assume that
1618 This is much easier than the previous function as we can assume that
1617 the recipient has any changenode we aren't sending them."""
1619 the recipient has any changenode we aren't sending them."""
1618
1620
1619 self.hook('preoutgoing', throw=True, source=source)
1621 self.hook('preoutgoing', throw=True, source=source)
1620
1622
1621 cl = self.changelog
1623 cl = self.changelog
1622 nodes = cl.nodesbetween(basenodes, None)[0]
1624 nodes = cl.nodesbetween(basenodes, None)[0]
1623 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1625 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1624 self.changegroupinfo(nodes)
1626 self.changegroupinfo(nodes)
1625
1627
1626 def identity(x):
1628 def identity(x):
1627 return x
1629 return x
1628
1630
1629 def gennodelst(revlog):
1631 def gennodelst(revlog):
1630 for r in xrange(0, revlog.count()):
1632 for r in xrange(0, revlog.count()):
1631 n = revlog.node(r)
1633 n = revlog.node(r)
1632 if revlog.linkrev(n) in revset:
1634 if revlog.linkrev(n) in revset:
1633 yield n
1635 yield n
1634
1636
1635 def changed_file_collector(changedfileset):
1637 def changed_file_collector(changedfileset):
1636 def collect_changed_files(clnode):
1638 def collect_changed_files(clnode):
1637 c = cl.read(clnode)
1639 c = cl.read(clnode)
1638 for fname in c[3]:
1640 for fname in c[3]:
1639 changedfileset[fname] = 1
1641 changedfileset[fname] = 1
1640 return collect_changed_files
1642 return collect_changed_files
1641
1643
1642 def lookuprevlink_func(revlog):
1644 def lookuprevlink_func(revlog):
1643 def lookuprevlink(n):
1645 def lookuprevlink(n):
1644 return cl.node(revlog.linkrev(n))
1646 return cl.node(revlog.linkrev(n))
1645 return lookuprevlink
1647 return lookuprevlink
1646
1648
1647 def gengroup():
1649 def gengroup():
1648 # construct a list of all changed files
1650 # construct a list of all changed files
1649 changedfiles = {}
1651 changedfiles = {}
1650
1652
1651 for chnk in cl.group(nodes, identity,
1653 for chnk in cl.group(nodes, identity,
1652 changed_file_collector(changedfiles)):
1654 changed_file_collector(changedfiles)):
1653 yield chnk
1655 yield chnk
1654 changedfiles = changedfiles.keys()
1656 changedfiles = changedfiles.keys()
1655 changedfiles.sort()
1657 changedfiles.sort()
1656
1658
1657 mnfst = self.manifest
1659 mnfst = self.manifest
1658 nodeiter = gennodelst(mnfst)
1660 nodeiter = gennodelst(mnfst)
1659 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1661 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1660 yield chnk
1662 yield chnk
1661
1663
1662 for fname in changedfiles:
1664 for fname in changedfiles:
1663 filerevlog = self.file(fname)
1665 filerevlog = self.file(fname)
1664 nodeiter = gennodelst(filerevlog)
1666 nodeiter = gennodelst(filerevlog)
1665 nodeiter = list(nodeiter)
1667 nodeiter = list(nodeiter)
1666 if nodeiter:
1668 if nodeiter:
1667 yield changegroup.genchunk(fname)
1669 yield changegroup.genchunk(fname)
1668 lookup = lookuprevlink_func(filerevlog)
1670 lookup = lookuprevlink_func(filerevlog)
1669 for chnk in filerevlog.group(nodeiter, lookup):
1671 for chnk in filerevlog.group(nodeiter, lookup):
1670 yield chnk
1672 yield chnk
1671
1673
1672 yield changegroup.closechunk()
1674 yield changegroup.closechunk()
1673
1675
1674 if nodes:
1676 if nodes:
1675 self.hook('outgoing', node=hex(nodes[0]), source=source)
1677 self.hook('outgoing', node=hex(nodes[0]), source=source)
1676
1678
1677 return util.chunkbuffer(gengroup())
1679 return util.chunkbuffer(gengroup())
1678
1680
1679 def addchangegroup(self, source, srctype, url):
1681 def addchangegroup(self, source, srctype, url):
1680 """add changegroup to repo.
1682 """add changegroup to repo.
1681
1683
1682 return values:
1684 return values:
1683 - nothing changed or no source: 0
1685 - nothing changed or no source: 0
1684 - more heads than before: 1+added heads (2..n)
1686 - more heads than before: 1+added heads (2..n)
1685 - less heads than before: -1-removed heads (-2..-n)
1687 - less heads than before: -1-removed heads (-2..-n)
1686 - number of heads stays the same: 1
1688 - number of heads stays the same: 1
1687 """
1689 """
1688 def csmap(x):
1690 def csmap(x):
1689 self.ui.debug(_("add changeset %s\n") % short(x))
1691 self.ui.debug(_("add changeset %s\n") % short(x))
1690 return cl.count()
1692 return cl.count()
1691
1693
1692 def revmap(x):
1694 def revmap(x):
1693 return cl.rev(x)
1695 return cl.rev(x)
1694
1696
1695 if not source:
1697 if not source:
1696 return 0
1698 return 0
1697
1699
1698 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1700 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1699
1701
1700 changesets = files = revisions = 0
1702 changesets = files = revisions = 0
1701
1703
1702 tr = self.transaction()
1704 tr = self.transaction()
1703
1705
1704 # write changelog data to temp files so concurrent readers will not see
1706 # write changelog data to temp files so concurrent readers will not see
1705 # inconsistent view
1707 # inconsistent view
1706 cl = None
1708 cl = None
1707 try:
1709 try:
1708 cl = appendfile.appendchangelog(self.sopener,
1710 cl = appendfile.appendchangelog(self.sopener,
1709 self.changelog.version)
1711 self.changelog.version)
1710
1712
1711 oldheads = len(cl.heads())
1713 oldheads = len(cl.heads())
1712
1714
1713 # pull off the changeset group
1715 # pull off the changeset group
1714 self.ui.status(_("adding changesets\n"))
1716 self.ui.status(_("adding changesets\n"))
1715 cor = cl.count() - 1
1717 cor = cl.count() - 1
1716 chunkiter = changegroup.chunkiter(source)
1718 chunkiter = changegroup.chunkiter(source)
1717 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1719 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1718 raise util.Abort(_("received changelog group is empty"))
1720 raise util.Abort(_("received changelog group is empty"))
1719 cnr = cl.count() - 1
1721 cnr = cl.count() - 1
1720 changesets = cnr - cor
1722 changesets = cnr - cor
1721
1723
1722 # pull off the manifest group
1724 # pull off the manifest group
1723 self.ui.status(_("adding manifests\n"))
1725 self.ui.status(_("adding manifests\n"))
1724 chunkiter = changegroup.chunkiter(source)
1726 chunkiter = changegroup.chunkiter(source)
1725 # no need to check for empty manifest group here:
1727 # no need to check for empty manifest group here:
1726 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1728 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1727 # no new manifest will be created and the manifest group will
1729 # no new manifest will be created and the manifest group will
1728 # be empty during the pull
1730 # be empty during the pull
1729 self.manifest.addgroup(chunkiter, revmap, tr)
1731 self.manifest.addgroup(chunkiter, revmap, tr)
1730
1732
1731 # process the files
1733 # process the files
1732 self.ui.status(_("adding file changes\n"))
1734 self.ui.status(_("adding file changes\n"))
1733 while 1:
1735 while 1:
1734 f = changegroup.getchunk(source)
1736 f = changegroup.getchunk(source)
1735 if not f:
1737 if not f:
1736 break
1738 break
1737 self.ui.debug(_("adding %s revisions\n") % f)
1739 self.ui.debug(_("adding %s revisions\n") % f)
1738 fl = self.file(f)
1740 fl = self.file(f)
1739 o = fl.count()
1741 o = fl.count()
1740 chunkiter = changegroup.chunkiter(source)
1742 chunkiter = changegroup.chunkiter(source)
1741 if fl.addgroup(chunkiter, revmap, tr) is None:
1743 if fl.addgroup(chunkiter, revmap, tr) is None:
1742 raise util.Abort(_("received file revlog group is empty"))
1744 raise util.Abort(_("received file revlog group is empty"))
1743 revisions += fl.count() - o
1745 revisions += fl.count() - o
1744 files += 1
1746 files += 1
1745
1747
1746 cl.writedata()
1748 cl.writedata()
1747 finally:
1749 finally:
1748 if cl:
1750 if cl:
1749 cl.cleanup()
1751 cl.cleanup()
1750
1752
1751 # make changelog see real files again
1753 # make changelog see real files again
1752 self.changelog = changelog.changelog(self.sopener,
1754 self.changelog = changelog.changelog(self.sopener,
1753 self.changelog.version)
1755 self.changelog.version)
1754 self.changelog.checkinlinesize(tr)
1756 self.changelog.checkinlinesize(tr)
1755
1757
1756 newheads = len(self.changelog.heads())
1758 newheads = len(self.changelog.heads())
1757 heads = ""
1759 heads = ""
1758 if oldheads and newheads != oldheads:
1760 if oldheads and newheads != oldheads:
1759 heads = _(" (%+d heads)") % (newheads - oldheads)
1761 heads = _(" (%+d heads)") % (newheads - oldheads)
1760
1762
1761 self.ui.status(_("added %d changesets"
1763 self.ui.status(_("added %d changesets"
1762 " with %d changes to %d files%s\n")
1764 " with %d changes to %d files%s\n")
1763 % (changesets, revisions, files, heads))
1765 % (changesets, revisions, files, heads))
1764
1766
1765 if changesets > 0:
1767 if changesets > 0:
1766 self.hook('pretxnchangegroup', throw=True,
1768 self.hook('pretxnchangegroup', throw=True,
1767 node=hex(self.changelog.node(cor+1)), source=srctype,
1769 node=hex(self.changelog.node(cor+1)), source=srctype,
1768 url=url)
1770 url=url)
1769
1771
1770 tr.close()
1772 tr.close()
1771
1773
1772 if changesets > 0:
1774 if changesets > 0:
1773 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1775 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1774 source=srctype, url=url)
1776 source=srctype, url=url)
1775
1777
1776 for i in xrange(cor + 1, cnr + 1):
1778 for i in xrange(cor + 1, cnr + 1):
1777 self.hook("incoming", node=hex(self.changelog.node(i)),
1779 self.hook("incoming", node=hex(self.changelog.node(i)),
1778 source=srctype, url=url)
1780 source=srctype, url=url)
1779
1781
1780 # never return 0 here:
1782 # never return 0 here:
1781 if newheads < oldheads:
1783 if newheads < oldheads:
1782 return newheads - oldheads - 1
1784 return newheads - oldheads - 1
1783 else:
1785 else:
1784 return newheads - oldheads + 1
1786 return newheads - oldheads + 1
1785
1787
1786
1788
1787 def stream_in(self, remote):
1789 def stream_in(self, remote):
1788 fp = remote.stream_out()
1790 fp = remote.stream_out()
1789 l = fp.readline()
1791 l = fp.readline()
1790 try:
1792 try:
1791 resp = int(l)
1793 resp = int(l)
1792 except ValueError:
1794 except ValueError:
1793 raise util.UnexpectedOutput(
1795 raise util.UnexpectedOutput(
1794 _('Unexpected response from remote server:'), l)
1796 _('Unexpected response from remote server:'), l)
1795 if resp == 1:
1797 if resp == 1:
1796 raise util.Abort(_('operation forbidden by server'))
1798 raise util.Abort(_('operation forbidden by server'))
1797 elif resp == 2:
1799 elif resp == 2:
1798 raise util.Abort(_('locking the remote repository failed'))
1800 raise util.Abort(_('locking the remote repository failed'))
1799 elif resp != 0:
1801 elif resp != 0:
1800 raise util.Abort(_('the server sent an unknown error code'))
1802 raise util.Abort(_('the server sent an unknown error code'))
1801 self.ui.status(_('streaming all changes\n'))
1803 self.ui.status(_('streaming all changes\n'))
1802 l = fp.readline()
1804 l = fp.readline()
1803 try:
1805 try:
1804 total_files, total_bytes = map(int, l.split(' ', 1))
1806 total_files, total_bytes = map(int, l.split(' ', 1))
1805 except ValueError, TypeError:
1807 except ValueError, TypeError:
1806 raise util.UnexpectedOutput(
1808 raise util.UnexpectedOutput(
1807 _('Unexpected response from remote server:'), l)
1809 _('Unexpected response from remote server:'), l)
1808 self.ui.status(_('%d files to transfer, %s of data\n') %
1810 self.ui.status(_('%d files to transfer, %s of data\n') %
1809 (total_files, util.bytecount(total_bytes)))
1811 (total_files, util.bytecount(total_bytes)))
1810 start = time.time()
1812 start = time.time()
1811 for i in xrange(total_files):
1813 for i in xrange(total_files):
1812 # XXX doesn't support '\n' or '\r' in filenames
1814 # XXX doesn't support '\n' or '\r' in filenames
1813 l = fp.readline()
1815 l = fp.readline()
1814 try:
1816 try:
1815 name, size = l.split('\0', 1)
1817 name, size = l.split('\0', 1)
1816 size = int(size)
1818 size = int(size)
1817 except ValueError, TypeError:
1819 except ValueError, TypeError:
1818 raise util.UnexpectedOutput(
1820 raise util.UnexpectedOutput(
1819 _('Unexpected response from remote server:'), l)
1821 _('Unexpected response from remote server:'), l)
1820 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1822 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1821 ofp = self.sopener(name, 'w')
1823 ofp = self.sopener(name, 'w')
1822 for chunk in util.filechunkiter(fp, limit=size):
1824 for chunk in util.filechunkiter(fp, limit=size):
1823 ofp.write(chunk)
1825 ofp.write(chunk)
1824 ofp.close()
1826 ofp.close()
1825 elapsed = time.time() - start
1827 elapsed = time.time() - start
1826 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1828 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1827 (util.bytecount(total_bytes), elapsed,
1829 (util.bytecount(total_bytes), elapsed,
1828 util.bytecount(total_bytes / elapsed)))
1830 util.bytecount(total_bytes / elapsed)))
1829 self.reload()
1831 self.reload()
1830 return len(self.heads()) + 1
1832 return len(self.heads()) + 1
1831
1833
1832 def clone(self, remote, heads=[], stream=False):
1834 def clone(self, remote, heads=[], stream=False):
1833 '''clone remote repository.
1835 '''clone remote repository.
1834
1836
1835 keyword arguments:
1837 keyword arguments:
1836 heads: list of revs to clone (forces use of pull)
1838 heads: list of revs to clone (forces use of pull)
1837 stream: use streaming clone if possible'''
1839 stream: use streaming clone if possible'''
1838
1840
1839 # now, all clients that can request uncompressed clones can
1841 # now, all clients that can request uncompressed clones can
1840 # read repo formats supported by all servers that can serve
1842 # read repo formats supported by all servers that can serve
1841 # them.
1843 # them.
1842
1844
1843 # if revlog format changes, client will have to check version
1845 # if revlog format changes, client will have to check version
1844 # and format flags on "stream" capability, and use
1846 # and format flags on "stream" capability, and use
1845 # uncompressed only if compatible.
1847 # uncompressed only if compatible.
1846
1848
1847 if stream and not heads and remote.capable('stream'):
1849 if stream and not heads and remote.capable('stream'):
1848 return self.stream_in(remote)
1850 return self.stream_in(remote)
1849 return self.pull(remote, heads)
1851 return self.pull(remote, heads)
1850
1852
1851 # used to avoid circular references so destructors work
1853 # used to avoid circular references so destructors work
1852 def aftertrans(files):
1854 def aftertrans(files):
1853 renamefiles = [tuple(t) for t in files]
1855 renamefiles = [tuple(t) for t in files]
1854 def a():
1856 def a():
1855 for src, dest in renamefiles:
1857 for src, dest in renamefiles:
1856 util.rename(src, dest)
1858 util.rename(src, dest)
1857 return a
1859 return a
1858
1860
1859 def instance(ui, path, create):
1861 def instance(ui, path, create):
1860 return localrepository(ui, util.drop_scheme('file', path), create)
1862 return localrepository(ui, util.drop_scheme('file', path), create)
1861
1863
1862 def islocal(path):
1864 def islocal(path):
1863 return True
1865 return True
General Comments 0
You need to be logged in to leave comments. Login now