##// END OF EJS Templates
don't use readline() to read branches.cache...
Alexis S. L. Carvalho -
r3668:6f669696 default
parent child Browse files
Show More
@@ -1,1866 +1,1868 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import gettext as _
9 from i18n import gettext as _
10 from demandload import *
10 from demandload import *
11 import repo
11 import repo
12 demandload(globals(), "appendfile changegroup")
12 demandload(globals(), "appendfile changegroup")
13 demandload(globals(), "changelog dirstate filelog manifest context")
13 demandload(globals(), "changelog dirstate filelog manifest context")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 demandload(globals(), "os revlog time util")
15 demandload(globals(), "os revlog time util")
16
16
17 class localrepository(repo.repository):
17 class localrepository(repo.repository):
18 capabilities = ('lookup', 'changegroupsubset')
18 capabilities = ('lookup', 'changegroupsubset')
19
19
20 def __del__(self):
20 def __del__(self):
21 self.transhandle = None
21 self.transhandle = None
22 def __init__(self, parentui, path=None, create=0):
22 def __init__(self, parentui, path=None, create=0):
23 repo.repository.__init__(self)
23 repo.repository.__init__(self)
24 if not path:
24 if not path:
25 p = os.getcwd()
25 p = os.getcwd()
26 while not os.path.isdir(os.path.join(p, ".hg")):
26 while not os.path.isdir(os.path.join(p, ".hg")):
27 oldp = p
27 oldp = p
28 p = os.path.dirname(p)
28 p = os.path.dirname(p)
29 if p == oldp:
29 if p == oldp:
30 raise repo.RepoError(_("There is no Mercurial repository"
30 raise repo.RepoError(_("There is no Mercurial repository"
31 " here (.hg not found)"))
31 " here (.hg not found)"))
32 path = p
32 path = p
33 self.path = os.path.join(path, ".hg")
33 self.path = os.path.join(path, ".hg")
34
34
35 if not os.path.isdir(self.path):
35 if not os.path.isdir(self.path):
36 if create:
36 if create:
37 if not os.path.exists(path):
37 if not os.path.exists(path):
38 os.mkdir(path)
38 os.mkdir(path)
39 os.mkdir(self.path)
39 os.mkdir(self.path)
40 os.mkdir(self.join("data"))
40 os.mkdir(self.join("data"))
41 else:
41 else:
42 raise repo.RepoError(_("repository %s not found") % path)
42 raise repo.RepoError(_("repository %s not found") % path)
43 elif create:
43 elif create:
44 raise repo.RepoError(_("repository %s already exists") % path)
44 raise repo.RepoError(_("repository %s already exists") % path)
45
45
46 self.root = os.path.realpath(path)
46 self.root = os.path.realpath(path)
47 self.origroot = path
47 self.origroot = path
48 self.ui = ui.ui(parentui=parentui)
48 self.ui = ui.ui(parentui=parentui)
49 self.opener = util.opener(self.path)
49 self.opener = util.opener(self.path)
50 self.sopener = util.opener(self.path)
50 self.sopener = util.opener(self.path)
51 self.wopener = util.opener(self.root)
51 self.wopener = util.opener(self.root)
52
52
53 try:
53 try:
54 self.ui.readconfig(self.join("hgrc"), self.root)
54 self.ui.readconfig(self.join("hgrc"), self.root)
55 except IOError:
55 except IOError:
56 pass
56 pass
57
57
58 v = self.ui.configrevlog()
58 v = self.ui.configrevlog()
59 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
59 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
60 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
60 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
61 fl = v.get('flags', None)
61 fl = v.get('flags', None)
62 flags = 0
62 flags = 0
63 if fl != None:
63 if fl != None:
64 for x in fl.split():
64 for x in fl.split():
65 flags |= revlog.flagstr(x)
65 flags |= revlog.flagstr(x)
66 elif self.revlogv1:
66 elif self.revlogv1:
67 flags = revlog.REVLOG_DEFAULT_FLAGS
67 flags = revlog.REVLOG_DEFAULT_FLAGS
68
68
69 v = self.revlogversion | flags
69 v = self.revlogversion | flags
70 self.manifest = manifest.manifest(self.sopener, v)
70 self.manifest = manifest.manifest(self.sopener, v)
71 self.changelog = changelog.changelog(self.sopener, v)
71 self.changelog = changelog.changelog(self.sopener, v)
72
72
73 # the changelog might not have the inline index flag
73 # the changelog might not have the inline index flag
74 # on. If the format of the changelog is the same as found in
74 # on. If the format of the changelog is the same as found in
75 # .hgrc, apply any flags found in the .hgrc as well.
75 # .hgrc, apply any flags found in the .hgrc as well.
76 # Otherwise, just version from the changelog
76 # Otherwise, just version from the changelog
77 v = self.changelog.version
77 v = self.changelog.version
78 if v == self.revlogversion:
78 if v == self.revlogversion:
79 v |= flags
79 v |= flags
80 self.revlogversion = v
80 self.revlogversion = v
81
81
82 self.tagscache = None
82 self.tagscache = None
83 self.branchcache = None
83 self.branchcache = None
84 self.nodetagscache = None
84 self.nodetagscache = None
85 self.encodepats = None
85 self.encodepats = None
86 self.decodepats = None
86 self.decodepats = None
87 self.transhandle = None
87 self.transhandle = None
88
88
89 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
89 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
90
90
91 def url(self):
91 def url(self):
92 return 'file:' + self.root
92 return 'file:' + self.root
93
93
94 def hook(self, name, throw=False, **args):
94 def hook(self, name, throw=False, **args):
95 def callhook(hname, funcname):
95 def callhook(hname, funcname):
96 '''call python hook. hook is callable object, looked up as
96 '''call python hook. hook is callable object, looked up as
97 name in python module. if callable returns "true", hook
97 name in python module. if callable returns "true", hook
98 fails, else passes. if hook raises exception, treated as
98 fails, else passes. if hook raises exception, treated as
99 hook failure. exception propagates if throw is "true".
99 hook failure. exception propagates if throw is "true".
100
100
101 reason for "true" meaning "hook failed" is so that
101 reason for "true" meaning "hook failed" is so that
102 unmodified commands (e.g. mercurial.commands.update) can
102 unmodified commands (e.g. mercurial.commands.update) can
103 be run as hooks without wrappers to convert return values.'''
103 be run as hooks without wrappers to convert return values.'''
104
104
105 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
105 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
106 d = funcname.rfind('.')
106 d = funcname.rfind('.')
107 if d == -1:
107 if d == -1:
108 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
108 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
109 % (hname, funcname))
109 % (hname, funcname))
110 modname = funcname[:d]
110 modname = funcname[:d]
111 try:
111 try:
112 obj = __import__(modname)
112 obj = __import__(modname)
113 except ImportError:
113 except ImportError:
114 try:
114 try:
115 # extensions are loaded with hgext_ prefix
115 # extensions are loaded with hgext_ prefix
116 obj = __import__("hgext_%s" % modname)
116 obj = __import__("hgext_%s" % modname)
117 except ImportError:
117 except ImportError:
118 raise util.Abort(_('%s hook is invalid '
118 raise util.Abort(_('%s hook is invalid '
119 '(import of "%s" failed)') %
119 '(import of "%s" failed)') %
120 (hname, modname))
120 (hname, modname))
121 try:
121 try:
122 for p in funcname.split('.')[1:]:
122 for p in funcname.split('.')[1:]:
123 obj = getattr(obj, p)
123 obj = getattr(obj, p)
124 except AttributeError, err:
124 except AttributeError, err:
125 raise util.Abort(_('%s hook is invalid '
125 raise util.Abort(_('%s hook is invalid '
126 '("%s" is not defined)') %
126 '("%s" is not defined)') %
127 (hname, funcname))
127 (hname, funcname))
128 if not callable(obj):
128 if not callable(obj):
129 raise util.Abort(_('%s hook is invalid '
129 raise util.Abort(_('%s hook is invalid '
130 '("%s" is not callable)') %
130 '("%s" is not callable)') %
131 (hname, funcname))
131 (hname, funcname))
132 try:
132 try:
133 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
133 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
134 except (KeyboardInterrupt, util.SignalInterrupt):
134 except (KeyboardInterrupt, util.SignalInterrupt):
135 raise
135 raise
136 except Exception, exc:
136 except Exception, exc:
137 if isinstance(exc, util.Abort):
137 if isinstance(exc, util.Abort):
138 self.ui.warn(_('error: %s hook failed: %s\n') %
138 self.ui.warn(_('error: %s hook failed: %s\n') %
139 (hname, exc.args[0]))
139 (hname, exc.args[0]))
140 else:
140 else:
141 self.ui.warn(_('error: %s hook raised an exception: '
141 self.ui.warn(_('error: %s hook raised an exception: '
142 '%s\n') % (hname, exc))
142 '%s\n') % (hname, exc))
143 if throw:
143 if throw:
144 raise
144 raise
145 self.ui.print_exc()
145 self.ui.print_exc()
146 return True
146 return True
147 if r:
147 if r:
148 if throw:
148 if throw:
149 raise util.Abort(_('%s hook failed') % hname)
149 raise util.Abort(_('%s hook failed') % hname)
150 self.ui.warn(_('warning: %s hook failed\n') % hname)
150 self.ui.warn(_('warning: %s hook failed\n') % hname)
151 return r
151 return r
152
152
153 def runhook(name, cmd):
153 def runhook(name, cmd):
154 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
154 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
155 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
155 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
156 r = util.system(cmd, environ=env, cwd=self.root)
156 r = util.system(cmd, environ=env, cwd=self.root)
157 if r:
157 if r:
158 desc, r = util.explain_exit(r)
158 desc, r = util.explain_exit(r)
159 if throw:
159 if throw:
160 raise util.Abort(_('%s hook %s') % (name, desc))
160 raise util.Abort(_('%s hook %s') % (name, desc))
161 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
161 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
162 return r
162 return r
163
163
164 r = False
164 r = False
165 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
165 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
166 if hname.split(".", 1)[0] == name and cmd]
166 if hname.split(".", 1)[0] == name and cmd]
167 hooks.sort()
167 hooks.sort()
168 for hname, cmd in hooks:
168 for hname, cmd in hooks:
169 if cmd.startswith('python:'):
169 if cmd.startswith('python:'):
170 r = callhook(hname, cmd[7:].strip()) or r
170 r = callhook(hname, cmd[7:].strip()) or r
171 else:
171 else:
172 r = runhook(hname, cmd) or r
172 r = runhook(hname, cmd) or r
173 return r
173 return r
174
174
175 tag_disallowed = ':\r\n'
175 tag_disallowed = ':\r\n'
176
176
177 def tag(self, name, node, message, local, user, date):
177 def tag(self, name, node, message, local, user, date):
178 '''tag a revision with a symbolic name.
178 '''tag a revision with a symbolic name.
179
179
180 if local is True, the tag is stored in a per-repository file.
180 if local is True, the tag is stored in a per-repository file.
181 otherwise, it is stored in the .hgtags file, and a new
181 otherwise, it is stored in the .hgtags file, and a new
182 changeset is committed with the change.
182 changeset is committed with the change.
183
183
184 keyword arguments:
184 keyword arguments:
185
185
186 local: whether to store tag in non-version-controlled file
186 local: whether to store tag in non-version-controlled file
187 (default False)
187 (default False)
188
188
189 message: commit message to use if committing
189 message: commit message to use if committing
190
190
191 user: name of user to use if committing
191 user: name of user to use if committing
192
192
193 date: date tuple to use if committing'''
193 date: date tuple to use if committing'''
194
194
195 for c in self.tag_disallowed:
195 for c in self.tag_disallowed:
196 if c in name:
196 if c in name:
197 raise util.Abort(_('%r cannot be used in a tag name') % c)
197 raise util.Abort(_('%r cannot be used in a tag name') % c)
198
198
199 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
199 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
200
200
201 if local:
201 if local:
202 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
202 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
203 self.hook('tag', node=hex(node), tag=name, local=local)
203 self.hook('tag', node=hex(node), tag=name, local=local)
204 return
204 return
205
205
206 for x in self.status()[:5]:
206 for x in self.status()[:5]:
207 if '.hgtags' in x:
207 if '.hgtags' in x:
208 raise util.Abort(_('working copy of .hgtags is changed '
208 raise util.Abort(_('working copy of .hgtags is changed '
209 '(please commit .hgtags manually)'))
209 '(please commit .hgtags manually)'))
210
210
211 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
211 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
212 if self.dirstate.state('.hgtags') == '?':
212 if self.dirstate.state('.hgtags') == '?':
213 self.add(['.hgtags'])
213 self.add(['.hgtags'])
214
214
215 self.commit(['.hgtags'], message, user, date)
215 self.commit(['.hgtags'], message, user, date)
216 self.hook('tag', node=hex(node), tag=name, local=local)
216 self.hook('tag', node=hex(node), tag=name, local=local)
217
217
218 def tags(self):
218 def tags(self):
219 '''return a mapping of tag to node'''
219 '''return a mapping of tag to node'''
220 if not self.tagscache:
220 if not self.tagscache:
221 self.tagscache = {}
221 self.tagscache = {}
222
222
223 def parsetag(line, context):
223 def parsetag(line, context):
224 if not line:
224 if not line:
225 return
225 return
226 s = l.split(" ", 1)
226 s = l.split(" ", 1)
227 if len(s) != 2:
227 if len(s) != 2:
228 self.ui.warn(_("%s: cannot parse entry\n") % context)
228 self.ui.warn(_("%s: cannot parse entry\n") % context)
229 return
229 return
230 node, key = s
230 node, key = s
231 key = key.strip()
231 key = key.strip()
232 try:
232 try:
233 bin_n = bin(node)
233 bin_n = bin(node)
234 except TypeError:
234 except TypeError:
235 self.ui.warn(_("%s: node '%s' is not well formed\n") %
235 self.ui.warn(_("%s: node '%s' is not well formed\n") %
236 (context, node))
236 (context, node))
237 return
237 return
238 if bin_n not in self.changelog.nodemap:
238 if bin_n not in self.changelog.nodemap:
239 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
239 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
240 (context, key))
240 (context, key))
241 return
241 return
242 self.tagscache[key] = bin_n
242 self.tagscache[key] = bin_n
243
243
244 # read the tags file from each head, ending with the tip,
244 # read the tags file from each head, ending with the tip,
245 # and add each tag found to the map, with "newer" ones
245 # and add each tag found to the map, with "newer" ones
246 # taking precedence
246 # taking precedence
247 f = None
247 f = None
248 for rev, node, fnode in self._hgtagsnodes():
248 for rev, node, fnode in self._hgtagsnodes():
249 f = (f and f.filectx(fnode) or
249 f = (f and f.filectx(fnode) or
250 self.filectx('.hgtags', fileid=fnode))
250 self.filectx('.hgtags', fileid=fnode))
251 count = 0
251 count = 0
252 for l in f.data().splitlines():
252 for l in f.data().splitlines():
253 count += 1
253 count += 1
254 parsetag(l, _("%s, line %d") % (str(f), count))
254 parsetag(l, _("%s, line %d") % (str(f), count))
255
255
256 try:
256 try:
257 f = self.opener("localtags")
257 f = self.opener("localtags")
258 count = 0
258 count = 0
259 for l in f:
259 for l in f:
260 count += 1
260 count += 1
261 parsetag(l, _("localtags, line %d") % count)
261 parsetag(l, _("localtags, line %d") % count)
262 except IOError:
262 except IOError:
263 pass
263 pass
264
264
265 self.tagscache['tip'] = self.changelog.tip()
265 self.tagscache['tip'] = self.changelog.tip()
266
266
267 return self.tagscache
267 return self.tagscache
268
268
269 def _hgtagsnodes(self):
269 def _hgtagsnodes(self):
270 heads = self.heads()
270 heads = self.heads()
271 heads.reverse()
271 heads.reverse()
272 last = {}
272 last = {}
273 ret = []
273 ret = []
274 for node in heads:
274 for node in heads:
275 c = self.changectx(node)
275 c = self.changectx(node)
276 rev = c.rev()
276 rev = c.rev()
277 try:
277 try:
278 fnode = c.filenode('.hgtags')
278 fnode = c.filenode('.hgtags')
279 except repo.LookupError:
279 except repo.LookupError:
280 continue
280 continue
281 ret.append((rev, node, fnode))
281 ret.append((rev, node, fnode))
282 if fnode in last:
282 if fnode in last:
283 ret[last[fnode]] = None
283 ret[last[fnode]] = None
284 last[fnode] = len(ret) - 1
284 last[fnode] = len(ret) - 1
285 return [item for item in ret if item]
285 return [item for item in ret if item]
286
286
287 def tagslist(self):
287 def tagslist(self):
288 '''return a list of tags ordered by revision'''
288 '''return a list of tags ordered by revision'''
289 l = []
289 l = []
290 for t, n in self.tags().items():
290 for t, n in self.tags().items():
291 try:
291 try:
292 r = self.changelog.rev(n)
292 r = self.changelog.rev(n)
293 except:
293 except:
294 r = -2 # sort to the beginning of the list if unknown
294 r = -2 # sort to the beginning of the list if unknown
295 l.append((r, t, n))
295 l.append((r, t, n))
296 l.sort()
296 l.sort()
297 return [(t, n) for r, t, n in l]
297 return [(t, n) for r, t, n in l]
298
298
299 def nodetags(self, node):
299 def nodetags(self, node):
300 '''return the tags associated with a node'''
300 '''return the tags associated with a node'''
301 if not self.nodetagscache:
301 if not self.nodetagscache:
302 self.nodetagscache = {}
302 self.nodetagscache = {}
303 for t, n in self.tags().items():
303 for t, n in self.tags().items():
304 self.nodetagscache.setdefault(n, []).append(t)
304 self.nodetagscache.setdefault(n, []).append(t)
305 return self.nodetagscache.get(node, [])
305 return self.nodetagscache.get(node, [])
306
306
307 def branchtags(self):
307 def branchtags(self):
308 if self.branchcache != None:
308 if self.branchcache != None:
309 return self.branchcache
309 return self.branchcache
310
310
311 self.branchcache = {} # avoid recursion in changectx
311 self.branchcache = {} # avoid recursion in changectx
312
312
313 partial, last, lrev = self._readbranchcache()
313 partial, last, lrev = self._readbranchcache()
314
314
315 tiprev = self.changelog.count() - 1
315 tiprev = self.changelog.count() - 1
316 if lrev != tiprev:
316 if lrev != tiprev:
317 self._updatebranchcache(partial, lrev+1, tiprev+1)
317 self._updatebranchcache(partial, lrev+1, tiprev+1)
318 self._writebranchcache(partial, self.changelog.tip(), tiprev)
318 self._writebranchcache(partial, self.changelog.tip(), tiprev)
319
319
320 self.branchcache = partial
320 self.branchcache = partial
321 return self.branchcache
321 return self.branchcache
322
322
323 def _readbranchcache(self):
323 def _readbranchcache(self):
324 partial = {}
324 partial = {}
325 try:
325 try:
326 f = self.opener("branches.cache")
326 f = self.opener("branches.cache")
327 last, lrev = f.readline().rstrip().split(" ", 1)
327 lines = f.read().split('\n')
328 f.close()
329 last, lrev = lines.pop(0).rstrip().split(" ", 1)
328 last, lrev = bin(last), int(lrev)
330 last, lrev = bin(last), int(lrev)
329 if (lrev < self.changelog.count() and
331 if (lrev < self.changelog.count() and
330 self.changelog.node(lrev) == last): # sanity check
332 self.changelog.node(lrev) == last): # sanity check
331 for l in f:
333 for l in lines:
334 if not l: continue
332 node, label = l.rstrip().split(" ", 1)
335 node, label = l.rstrip().split(" ", 1)
333 partial[label] = bin(node)
336 partial[label] = bin(node)
334 else: # invalidate the cache
337 else: # invalidate the cache
335 last, lrev = nullid, nullrev
338 last, lrev = nullid, nullrev
336 f.close()
337 except IOError:
339 except IOError:
338 last, lrev = nullid, nullrev
340 last, lrev = nullid, nullrev
339 return partial, last, lrev
341 return partial, last, lrev
340
342
341 def _writebranchcache(self, branches, tip, tiprev):
343 def _writebranchcache(self, branches, tip, tiprev):
342 try:
344 try:
343 f = self.opener("branches.cache", "w")
345 f = self.opener("branches.cache", "w")
344 f.write("%s %s\n" % (hex(tip), tiprev))
346 f.write("%s %s\n" % (hex(tip), tiprev))
345 for label, node in branches.iteritems():
347 for label, node in branches.iteritems():
346 f.write("%s %s\n" % (hex(node), label))
348 f.write("%s %s\n" % (hex(node), label))
347 except IOError:
349 except IOError:
348 pass
350 pass
349
351
350 def _updatebranchcache(self, partial, start, end):
352 def _updatebranchcache(self, partial, start, end):
351 for r in xrange(start, end):
353 for r in xrange(start, end):
352 c = self.changectx(r)
354 c = self.changectx(r)
353 b = c.branch()
355 b = c.branch()
354 if b:
356 if b:
355 partial[b] = c.node()
357 partial[b] = c.node()
356
358
357 def lookup(self, key):
359 def lookup(self, key):
358 if key == '.':
360 if key == '.':
359 key = self.dirstate.parents()[0]
361 key = self.dirstate.parents()[0]
360 if key == nullid:
362 if key == nullid:
361 raise repo.RepoError(_("no revision checked out"))
363 raise repo.RepoError(_("no revision checked out"))
362 n = self.changelog._match(key)
364 n = self.changelog._match(key)
363 if n:
365 if n:
364 return n
366 return n
365 if key in self.tags():
367 if key in self.tags():
366 return self.tags()[key]
368 return self.tags()[key]
367 if key in self.branchtags():
369 if key in self.branchtags():
368 return self.branchtags()[key]
370 return self.branchtags()[key]
369 n = self.changelog._partialmatch(key)
371 n = self.changelog._partialmatch(key)
370 if n:
372 if n:
371 return n
373 return n
372 raise repo.RepoError(_("unknown revision '%s'") % key)
374 raise repo.RepoError(_("unknown revision '%s'") % key)
373
375
374 def dev(self):
376 def dev(self):
375 return os.lstat(self.path).st_dev
377 return os.lstat(self.path).st_dev
376
378
377 def local(self):
379 def local(self):
378 return True
380 return True
379
381
380 def join(self, f):
382 def join(self, f):
381 return os.path.join(self.path, f)
383 return os.path.join(self.path, f)
382
384
383 def sjoin(self, f):
385 def sjoin(self, f):
384 return os.path.join(self.path, f)
386 return os.path.join(self.path, f)
385
387
386 def wjoin(self, f):
388 def wjoin(self, f):
387 return os.path.join(self.root, f)
389 return os.path.join(self.root, f)
388
390
389 def file(self, f):
391 def file(self, f):
390 if f[0] == '/':
392 if f[0] == '/':
391 f = f[1:]
393 f = f[1:]
392 return filelog.filelog(self.sopener, f, self.revlogversion)
394 return filelog.filelog(self.sopener, f, self.revlogversion)
393
395
394 def changectx(self, changeid=None):
396 def changectx(self, changeid=None):
395 return context.changectx(self, changeid)
397 return context.changectx(self, changeid)
396
398
397 def workingctx(self):
399 def workingctx(self):
398 return context.workingctx(self)
400 return context.workingctx(self)
399
401
400 def parents(self, changeid=None):
402 def parents(self, changeid=None):
401 '''
403 '''
402 get list of changectxs for parents of changeid or working directory
404 get list of changectxs for parents of changeid or working directory
403 '''
405 '''
404 if changeid is None:
406 if changeid is None:
405 pl = self.dirstate.parents()
407 pl = self.dirstate.parents()
406 else:
408 else:
407 n = self.changelog.lookup(changeid)
409 n = self.changelog.lookup(changeid)
408 pl = self.changelog.parents(n)
410 pl = self.changelog.parents(n)
409 if pl[1] == nullid:
411 if pl[1] == nullid:
410 return [self.changectx(pl[0])]
412 return [self.changectx(pl[0])]
411 return [self.changectx(pl[0]), self.changectx(pl[1])]
413 return [self.changectx(pl[0]), self.changectx(pl[1])]
412
414
413 def filectx(self, path, changeid=None, fileid=None):
415 def filectx(self, path, changeid=None, fileid=None):
414 """changeid can be a changeset revision, node, or tag.
416 """changeid can be a changeset revision, node, or tag.
415 fileid can be a file revision or node."""
417 fileid can be a file revision or node."""
416 return context.filectx(self, path, changeid, fileid)
418 return context.filectx(self, path, changeid, fileid)
417
419
418 def getcwd(self):
420 def getcwd(self):
419 return self.dirstate.getcwd()
421 return self.dirstate.getcwd()
420
422
421 def wfile(self, f, mode='r'):
423 def wfile(self, f, mode='r'):
422 return self.wopener(f, mode)
424 return self.wopener(f, mode)
423
425
424 def wread(self, filename):
426 def wread(self, filename):
425 if self.encodepats == None:
427 if self.encodepats == None:
426 l = []
428 l = []
427 for pat, cmd in self.ui.configitems("encode"):
429 for pat, cmd in self.ui.configitems("encode"):
428 mf = util.matcher(self.root, "", [pat], [], [])[1]
430 mf = util.matcher(self.root, "", [pat], [], [])[1]
429 l.append((mf, cmd))
431 l.append((mf, cmd))
430 self.encodepats = l
432 self.encodepats = l
431
433
432 data = self.wopener(filename, 'r').read()
434 data = self.wopener(filename, 'r').read()
433
435
434 for mf, cmd in self.encodepats:
436 for mf, cmd in self.encodepats:
435 if mf(filename):
437 if mf(filename):
436 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
438 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
437 data = util.filter(data, cmd)
439 data = util.filter(data, cmd)
438 break
440 break
439
441
440 return data
442 return data
441
443
442 def wwrite(self, filename, data, fd=None):
444 def wwrite(self, filename, data, fd=None):
443 if self.decodepats == None:
445 if self.decodepats == None:
444 l = []
446 l = []
445 for pat, cmd in self.ui.configitems("decode"):
447 for pat, cmd in self.ui.configitems("decode"):
446 mf = util.matcher(self.root, "", [pat], [], [])[1]
448 mf = util.matcher(self.root, "", [pat], [], [])[1]
447 l.append((mf, cmd))
449 l.append((mf, cmd))
448 self.decodepats = l
450 self.decodepats = l
449
451
450 for mf, cmd in self.decodepats:
452 for mf, cmd in self.decodepats:
451 if mf(filename):
453 if mf(filename):
452 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
454 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
453 data = util.filter(data, cmd)
455 data = util.filter(data, cmd)
454 break
456 break
455
457
456 if fd:
458 if fd:
457 return fd.write(data)
459 return fd.write(data)
458 return self.wopener(filename, 'w').write(data)
460 return self.wopener(filename, 'w').write(data)
459
461
460 def transaction(self):
462 def transaction(self):
461 tr = self.transhandle
463 tr = self.transhandle
462 if tr != None and tr.running():
464 if tr != None and tr.running():
463 return tr.nest()
465 return tr.nest()
464
466
465 # save dirstate for rollback
467 # save dirstate for rollback
466 try:
468 try:
467 ds = self.opener("dirstate").read()
469 ds = self.opener("dirstate").read()
468 except IOError:
470 except IOError:
469 ds = ""
471 ds = ""
470 self.opener("journal.dirstate", "w").write(ds)
472 self.opener("journal.dirstate", "w").write(ds)
471
473
472 tr = transaction.transaction(self.ui.warn, self.sopener,
474 tr = transaction.transaction(self.ui.warn, self.sopener,
473 self.sjoin("journal"),
475 self.sjoin("journal"),
474 aftertrans(self.path))
476 aftertrans(self.path))
475 self.transhandle = tr
477 self.transhandle = tr
476 return tr
478 return tr
477
479
478 def recover(self):
480 def recover(self):
479 l = self.lock()
481 l = self.lock()
480 if os.path.exists(self.sjoin("journal")):
482 if os.path.exists(self.sjoin("journal")):
481 self.ui.status(_("rolling back interrupted transaction\n"))
483 self.ui.status(_("rolling back interrupted transaction\n"))
482 transaction.rollback(self.sopener, self.sjoin("journal"))
484 transaction.rollback(self.sopener, self.sjoin("journal"))
483 self.reload()
485 self.reload()
484 return True
486 return True
485 else:
487 else:
486 self.ui.warn(_("no interrupted transaction available\n"))
488 self.ui.warn(_("no interrupted transaction available\n"))
487 return False
489 return False
488
490
489 def rollback(self, wlock=None):
491 def rollback(self, wlock=None):
490 if not wlock:
492 if not wlock:
491 wlock = self.wlock()
493 wlock = self.wlock()
492 l = self.lock()
494 l = self.lock()
493 if os.path.exists(self.sjoin("undo")):
495 if os.path.exists(self.sjoin("undo")):
494 self.ui.status(_("rolling back last transaction\n"))
496 self.ui.status(_("rolling back last transaction\n"))
495 transaction.rollback(self.sopener, self.sjoin("undo"))
497 transaction.rollback(self.sopener, self.sjoin("undo"))
496 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
498 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
497 self.reload()
499 self.reload()
498 self.wreload()
500 self.wreload()
499 else:
501 else:
500 self.ui.warn(_("no rollback information available\n"))
502 self.ui.warn(_("no rollback information available\n"))
501
503
502 def wreload(self):
504 def wreload(self):
503 self.dirstate.read()
505 self.dirstate.read()
504
506
505 def reload(self):
507 def reload(self):
506 self.changelog.load()
508 self.changelog.load()
507 self.manifest.load()
509 self.manifest.load()
508 self.tagscache = None
510 self.tagscache = None
509 self.nodetagscache = None
511 self.nodetagscache = None
510
512
511 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
513 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
512 desc=None):
514 desc=None):
513 try:
515 try:
514 l = lock.lock(lockname, 0, releasefn, desc=desc)
516 l = lock.lock(lockname, 0, releasefn, desc=desc)
515 except lock.LockHeld, inst:
517 except lock.LockHeld, inst:
516 if not wait:
518 if not wait:
517 raise
519 raise
518 self.ui.warn(_("waiting for lock on %s held by %s\n") %
520 self.ui.warn(_("waiting for lock on %s held by %s\n") %
519 (desc, inst.args[0]))
521 (desc, inst.args[0]))
520 # default to 600 seconds timeout
522 # default to 600 seconds timeout
521 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
523 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
522 releasefn, desc=desc)
524 releasefn, desc=desc)
523 if acquirefn:
525 if acquirefn:
524 acquirefn()
526 acquirefn()
525 return l
527 return l
526
528
527 def lock(self, wait=1):
529 def lock(self, wait=1):
528 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
530 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
529 desc=_('repository %s') % self.origroot)
531 desc=_('repository %s') % self.origroot)
530
532
531 def wlock(self, wait=1):
533 def wlock(self, wait=1):
532 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
534 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
533 self.wreload,
535 self.wreload,
534 desc=_('working directory of %s') % self.origroot)
536 desc=_('working directory of %s') % self.origroot)
535
537
536 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
538 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
537 """
539 """
538 commit an individual file as part of a larger transaction
540 commit an individual file as part of a larger transaction
539 """
541 """
540
542
541 t = self.wread(fn)
543 t = self.wread(fn)
542 fl = self.file(fn)
544 fl = self.file(fn)
543 fp1 = manifest1.get(fn, nullid)
545 fp1 = manifest1.get(fn, nullid)
544 fp2 = manifest2.get(fn, nullid)
546 fp2 = manifest2.get(fn, nullid)
545
547
546 meta = {}
548 meta = {}
547 cp = self.dirstate.copied(fn)
549 cp = self.dirstate.copied(fn)
548 if cp:
550 if cp:
549 meta["copy"] = cp
551 meta["copy"] = cp
550 if not manifest2: # not a branch merge
552 if not manifest2: # not a branch merge
551 meta["copyrev"] = hex(manifest1.get(cp, nullid))
553 meta["copyrev"] = hex(manifest1.get(cp, nullid))
552 fp2 = nullid
554 fp2 = nullid
553 elif fp2 != nullid: # copied on remote side
555 elif fp2 != nullid: # copied on remote side
554 meta["copyrev"] = hex(manifest1.get(cp, nullid))
556 meta["copyrev"] = hex(manifest1.get(cp, nullid))
555 else: # copied on local side, reversed
557 else: # copied on local side, reversed
556 meta["copyrev"] = hex(manifest2.get(cp))
558 meta["copyrev"] = hex(manifest2.get(cp))
557 fp2 = nullid
559 fp2 = nullid
558 self.ui.debug(_(" %s: copy %s:%s\n") %
560 self.ui.debug(_(" %s: copy %s:%s\n") %
559 (fn, cp, meta["copyrev"]))
561 (fn, cp, meta["copyrev"]))
560 fp1 = nullid
562 fp1 = nullid
561 elif fp2 != nullid:
563 elif fp2 != nullid:
562 # is one parent an ancestor of the other?
564 # is one parent an ancestor of the other?
563 fpa = fl.ancestor(fp1, fp2)
565 fpa = fl.ancestor(fp1, fp2)
564 if fpa == fp1:
566 if fpa == fp1:
565 fp1, fp2 = fp2, nullid
567 fp1, fp2 = fp2, nullid
566 elif fpa == fp2:
568 elif fpa == fp2:
567 fp2 = nullid
569 fp2 = nullid
568
570
569 # is the file unmodified from the parent? report existing entry
571 # is the file unmodified from the parent? report existing entry
570 if fp2 == nullid and not fl.cmp(fp1, t):
572 if fp2 == nullid and not fl.cmp(fp1, t):
571 return fp1
573 return fp1
572
574
573 changelist.append(fn)
575 changelist.append(fn)
574 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
576 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
575
577
576 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
578 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
577 if p1 is None:
579 if p1 is None:
578 p1, p2 = self.dirstate.parents()
580 p1, p2 = self.dirstate.parents()
579 return self.commit(files=files, text=text, user=user, date=date,
581 return self.commit(files=files, text=text, user=user, date=date,
580 p1=p1, p2=p2, wlock=wlock)
582 p1=p1, p2=p2, wlock=wlock)
581
583
582 def commit(self, files=None, text="", user=None, date=None,
584 def commit(self, files=None, text="", user=None, date=None,
583 match=util.always, force=False, lock=None, wlock=None,
585 match=util.always, force=False, lock=None, wlock=None,
584 force_editor=False, p1=None, p2=None, extra={}):
586 force_editor=False, p1=None, p2=None, extra={}):
585
587
586 commit = []
588 commit = []
587 remove = []
589 remove = []
588 changed = []
590 changed = []
589 use_dirstate = (p1 is None) # not rawcommit
591 use_dirstate = (p1 is None) # not rawcommit
590 extra = extra.copy()
592 extra = extra.copy()
591
593
592 if use_dirstate:
594 if use_dirstate:
593 if files:
595 if files:
594 for f in files:
596 for f in files:
595 s = self.dirstate.state(f)
597 s = self.dirstate.state(f)
596 if s in 'nmai':
598 if s in 'nmai':
597 commit.append(f)
599 commit.append(f)
598 elif s == 'r':
600 elif s == 'r':
599 remove.append(f)
601 remove.append(f)
600 else:
602 else:
601 self.ui.warn(_("%s not tracked!\n") % f)
603 self.ui.warn(_("%s not tracked!\n") % f)
602 else:
604 else:
603 changes = self.status(match=match)[:5]
605 changes = self.status(match=match)[:5]
604 modified, added, removed, deleted, unknown = changes
606 modified, added, removed, deleted, unknown = changes
605 commit = modified + added
607 commit = modified + added
606 remove = removed
608 remove = removed
607 else:
609 else:
608 commit = files
610 commit = files
609
611
610 if use_dirstate:
612 if use_dirstate:
611 p1, p2 = self.dirstate.parents()
613 p1, p2 = self.dirstate.parents()
612 update_dirstate = True
614 update_dirstate = True
613 else:
615 else:
614 p1, p2 = p1, p2 or nullid
616 p1, p2 = p1, p2 or nullid
615 update_dirstate = (self.dirstate.parents()[0] == p1)
617 update_dirstate = (self.dirstate.parents()[0] == p1)
616
618
617 c1 = self.changelog.read(p1)
619 c1 = self.changelog.read(p1)
618 c2 = self.changelog.read(p2)
620 c2 = self.changelog.read(p2)
619 m1 = self.manifest.read(c1[0]).copy()
621 m1 = self.manifest.read(c1[0]).copy()
620 m2 = self.manifest.read(c2[0])
622 m2 = self.manifest.read(c2[0])
621
623
622 if use_dirstate:
624 if use_dirstate:
623 branchname = self.workingctx().branch()
625 branchname = self.workingctx().branch()
624 else:
626 else:
625 branchname = ""
627 branchname = ""
626
628
627 if use_dirstate:
629 if use_dirstate:
628 oldname = c1[5].get("branch", "")
630 oldname = c1[5].get("branch", "")
629 if not commit and not remove and not force and p2 == nullid and \
631 if not commit and not remove and not force and p2 == nullid and \
630 branchname == oldname:
632 branchname == oldname:
631 self.ui.status(_("nothing changed\n"))
633 self.ui.status(_("nothing changed\n"))
632 return None
634 return None
633
635
634 xp1 = hex(p1)
636 xp1 = hex(p1)
635 if p2 == nullid: xp2 = ''
637 if p2 == nullid: xp2 = ''
636 else: xp2 = hex(p2)
638 else: xp2 = hex(p2)
637
639
638 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
640 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
639
641
640 if not wlock:
642 if not wlock:
641 wlock = self.wlock()
643 wlock = self.wlock()
642 if not lock:
644 if not lock:
643 lock = self.lock()
645 lock = self.lock()
644 tr = self.transaction()
646 tr = self.transaction()
645
647
646 # check in files
648 # check in files
647 new = []
649 new = []
648 linkrev = self.changelog.count()
650 linkrev = self.changelog.count()
649 commit.sort()
651 commit.sort()
650 for f in commit:
652 for f in commit:
651 self.ui.note(f + "\n")
653 self.ui.note(f + "\n")
652 try:
654 try:
653 m1[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
655 m1[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
654 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
656 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
655 new.append(f)
657 new.append(f)
656 except IOError:
658 except IOError:
657 if use_dirstate:
659 if use_dirstate:
658 self.ui.warn(_("trouble committing %s!\n") % f)
660 self.ui.warn(_("trouble committing %s!\n") % f)
659 raise
661 raise
660 else:
662 else:
661 remove.append(f)
663 remove.append(f)
662
664
663 # update manifest
665 # update manifest
664 remove.sort()
666 remove.sort()
665
667
666 for f in remove:
668 for f in remove:
667 if f in m1:
669 if f in m1:
668 del m1[f]
670 del m1[f]
669 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
671 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
670
672
671 # add changeset
673 # add changeset
672 user = user or self.ui.username()
674 user = user or self.ui.username()
673 if not text or force_editor:
675 if not text or force_editor:
674 edittext = []
676 edittext = []
675 if text:
677 if text:
676 edittext.append(text)
678 edittext.append(text)
677 edittext.append("")
679 edittext.append("")
678 if p2 != nullid:
680 if p2 != nullid:
679 edittext.append("HG: branch merge")
681 edittext.append("HG: branch merge")
680 edittext.extend(["HG: changed %s" % f for f in changed])
682 edittext.extend(["HG: changed %s" % f for f in changed])
681 edittext.extend(["HG: removed %s" % f for f in remove])
683 edittext.extend(["HG: removed %s" % f for f in remove])
682 if not changed and not remove:
684 if not changed and not remove:
683 edittext.append("HG: no files changed")
685 edittext.append("HG: no files changed")
684 edittext.append("")
686 edittext.append("")
685 # run editor in the repository root
687 # run editor in the repository root
686 olddir = os.getcwd()
688 olddir = os.getcwd()
687 os.chdir(self.root)
689 os.chdir(self.root)
688 text = self.ui.edit("\n".join(edittext), user)
690 text = self.ui.edit("\n".join(edittext), user)
689 os.chdir(olddir)
691 os.chdir(olddir)
690
692
691 lines = [line.rstrip() for line in text.rstrip().splitlines()]
693 lines = [line.rstrip() for line in text.rstrip().splitlines()]
692 while lines and not lines[0]:
694 while lines and not lines[0]:
693 del lines[0]
695 del lines[0]
694 if not lines:
696 if not lines:
695 return None
697 return None
696 text = '\n'.join(lines)
698 text = '\n'.join(lines)
697 if branchname:
699 if branchname:
698 extra["branch"] = branchname
700 extra["branch"] = branchname
699 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
701 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
700 user, date, extra)
702 user, date, extra)
701 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
703 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
702 parent2=xp2)
704 parent2=xp2)
703 tr.close()
705 tr.close()
704
706
705 if use_dirstate or update_dirstate:
707 if use_dirstate or update_dirstate:
706 self.dirstate.setparents(n)
708 self.dirstate.setparents(n)
707 if use_dirstate:
709 if use_dirstate:
708 self.dirstate.update(new, "n")
710 self.dirstate.update(new, "n")
709 self.dirstate.forget(remove)
711 self.dirstate.forget(remove)
710
712
711 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
713 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
712 return n
714 return n
713
715
714 def walk(self, node=None, files=[], match=util.always, badmatch=None):
716 def walk(self, node=None, files=[], match=util.always, badmatch=None):
715 '''
717 '''
716 walk recursively through the directory tree or a given
718 walk recursively through the directory tree or a given
717 changeset, finding all files matched by the match
719 changeset, finding all files matched by the match
718 function
720 function
719
721
720 results are yielded in a tuple (src, filename), where src
722 results are yielded in a tuple (src, filename), where src
721 is one of:
723 is one of:
722 'f' the file was found in the directory tree
724 'f' the file was found in the directory tree
723 'm' the file was only in the dirstate and not in the tree
725 'm' the file was only in the dirstate and not in the tree
724 'b' file was not found and matched badmatch
726 'b' file was not found and matched badmatch
725 '''
727 '''
726
728
727 if node:
729 if node:
728 fdict = dict.fromkeys(files)
730 fdict = dict.fromkeys(files)
729 for fn in self.manifest.read(self.changelog.read(node)[0]):
731 for fn in self.manifest.read(self.changelog.read(node)[0]):
730 for ffn in fdict:
732 for ffn in fdict:
731 # match if the file is the exact name or a directory
733 # match if the file is the exact name or a directory
732 if ffn == fn or fn.startswith("%s/" % ffn):
734 if ffn == fn or fn.startswith("%s/" % ffn):
733 del fdict[ffn]
735 del fdict[ffn]
734 break
736 break
735 if match(fn):
737 if match(fn):
736 yield 'm', fn
738 yield 'm', fn
737 for fn in fdict:
739 for fn in fdict:
738 if badmatch and badmatch(fn):
740 if badmatch and badmatch(fn):
739 if match(fn):
741 if match(fn):
740 yield 'b', fn
742 yield 'b', fn
741 else:
743 else:
742 self.ui.warn(_('%s: No such file in rev %s\n') % (
744 self.ui.warn(_('%s: No such file in rev %s\n') % (
743 util.pathto(self.getcwd(), fn), short(node)))
745 util.pathto(self.getcwd(), fn), short(node)))
744 else:
746 else:
745 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
747 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
746 yield src, fn
748 yield src, fn
747
749
748 def status(self, node1=None, node2=None, files=[], match=util.always,
750 def status(self, node1=None, node2=None, files=[], match=util.always,
749 wlock=None, list_ignored=False, list_clean=False):
751 wlock=None, list_ignored=False, list_clean=False):
750 """return status of files between two nodes or node and working directory
752 """return status of files between two nodes or node and working directory
751
753
752 If node1 is None, use the first dirstate parent instead.
754 If node1 is None, use the first dirstate parent instead.
753 If node2 is None, compare node1 with working directory.
755 If node2 is None, compare node1 with working directory.
754 """
756 """
755
757
756 def fcmp(fn, mf):
758 def fcmp(fn, mf):
757 t1 = self.wread(fn)
759 t1 = self.wread(fn)
758 return self.file(fn).cmp(mf.get(fn, nullid), t1)
760 return self.file(fn).cmp(mf.get(fn, nullid), t1)
759
761
760 def mfmatches(node):
762 def mfmatches(node):
761 change = self.changelog.read(node)
763 change = self.changelog.read(node)
762 mf = self.manifest.read(change[0]).copy()
764 mf = self.manifest.read(change[0]).copy()
763 for fn in mf.keys():
765 for fn in mf.keys():
764 if not match(fn):
766 if not match(fn):
765 del mf[fn]
767 del mf[fn]
766 return mf
768 return mf
767
769
768 modified, added, removed, deleted, unknown = [], [], [], [], []
770 modified, added, removed, deleted, unknown = [], [], [], [], []
769 ignored, clean = [], []
771 ignored, clean = [], []
770
772
771 compareworking = False
773 compareworking = False
772 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
774 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
773 compareworking = True
775 compareworking = True
774
776
775 if not compareworking:
777 if not compareworking:
776 # read the manifest from node1 before the manifest from node2,
778 # read the manifest from node1 before the manifest from node2,
777 # so that we'll hit the manifest cache if we're going through
779 # so that we'll hit the manifest cache if we're going through
778 # all the revisions in parent->child order.
780 # all the revisions in parent->child order.
779 mf1 = mfmatches(node1)
781 mf1 = mfmatches(node1)
780
782
781 # are we comparing the working directory?
783 # are we comparing the working directory?
782 if not node2:
784 if not node2:
783 if not wlock:
785 if not wlock:
784 try:
786 try:
785 wlock = self.wlock(wait=0)
787 wlock = self.wlock(wait=0)
786 except lock.LockException:
788 except lock.LockException:
787 wlock = None
789 wlock = None
788 (lookup, modified, added, removed, deleted, unknown,
790 (lookup, modified, added, removed, deleted, unknown,
789 ignored, clean) = self.dirstate.status(files, match,
791 ignored, clean) = self.dirstate.status(files, match,
790 list_ignored, list_clean)
792 list_ignored, list_clean)
791
793
792 # are we comparing working dir against its parent?
794 # are we comparing working dir against its parent?
793 if compareworking:
795 if compareworking:
794 if lookup:
796 if lookup:
795 # do a full compare of any files that might have changed
797 # do a full compare of any files that might have changed
796 mf2 = mfmatches(self.dirstate.parents()[0])
798 mf2 = mfmatches(self.dirstate.parents()[0])
797 for f in lookup:
799 for f in lookup:
798 if fcmp(f, mf2):
800 if fcmp(f, mf2):
799 modified.append(f)
801 modified.append(f)
800 else:
802 else:
801 clean.append(f)
803 clean.append(f)
802 if wlock is not None:
804 if wlock is not None:
803 self.dirstate.update([f], "n")
805 self.dirstate.update([f], "n")
804 else:
806 else:
805 # we are comparing working dir against non-parent
807 # we are comparing working dir against non-parent
806 # generate a pseudo-manifest for the working dir
808 # generate a pseudo-manifest for the working dir
807 # XXX: create it in dirstate.py ?
809 # XXX: create it in dirstate.py ?
808 mf2 = mfmatches(self.dirstate.parents()[0])
810 mf2 = mfmatches(self.dirstate.parents()[0])
809 for f in lookup + modified + added:
811 for f in lookup + modified + added:
810 mf2[f] = ""
812 mf2[f] = ""
811 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
813 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
812 for f in removed:
814 for f in removed:
813 if f in mf2:
815 if f in mf2:
814 del mf2[f]
816 del mf2[f]
815 else:
817 else:
816 # we are comparing two revisions
818 # we are comparing two revisions
817 mf2 = mfmatches(node2)
819 mf2 = mfmatches(node2)
818
820
819 if not compareworking:
821 if not compareworking:
820 # flush lists from dirstate before comparing manifests
822 # flush lists from dirstate before comparing manifests
821 modified, added, clean = [], [], []
823 modified, added, clean = [], [], []
822
824
823 # make sure to sort the files so we talk to the disk in a
825 # make sure to sort the files so we talk to the disk in a
824 # reasonable order
826 # reasonable order
825 mf2keys = mf2.keys()
827 mf2keys = mf2.keys()
826 mf2keys.sort()
828 mf2keys.sort()
827 for fn in mf2keys:
829 for fn in mf2keys:
828 if mf1.has_key(fn):
830 if mf1.has_key(fn):
829 if mf1.flags(fn) != mf2.flags(fn) or \
831 if mf1.flags(fn) != mf2.flags(fn) or \
830 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
832 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
831 modified.append(fn)
833 modified.append(fn)
832 elif list_clean:
834 elif list_clean:
833 clean.append(fn)
835 clean.append(fn)
834 del mf1[fn]
836 del mf1[fn]
835 else:
837 else:
836 added.append(fn)
838 added.append(fn)
837
839
838 removed = mf1.keys()
840 removed = mf1.keys()
839
841
840 # sort and return results:
842 # sort and return results:
841 for l in modified, added, removed, deleted, unknown, ignored, clean:
843 for l in modified, added, removed, deleted, unknown, ignored, clean:
842 l.sort()
844 l.sort()
843 return (modified, added, removed, deleted, unknown, ignored, clean)
845 return (modified, added, removed, deleted, unknown, ignored, clean)
844
846
845 def add(self, list, wlock=None):
847 def add(self, list, wlock=None):
846 if not wlock:
848 if not wlock:
847 wlock = self.wlock()
849 wlock = self.wlock()
848 for f in list:
850 for f in list:
849 p = self.wjoin(f)
851 p = self.wjoin(f)
850 if not os.path.exists(p):
852 if not os.path.exists(p):
851 self.ui.warn(_("%s does not exist!\n") % f)
853 self.ui.warn(_("%s does not exist!\n") % f)
852 elif not os.path.isfile(p):
854 elif not os.path.isfile(p):
853 self.ui.warn(_("%s not added: only files supported currently\n")
855 self.ui.warn(_("%s not added: only files supported currently\n")
854 % f)
856 % f)
855 elif self.dirstate.state(f) in 'an':
857 elif self.dirstate.state(f) in 'an':
856 self.ui.warn(_("%s already tracked!\n") % f)
858 self.ui.warn(_("%s already tracked!\n") % f)
857 else:
859 else:
858 self.dirstate.update([f], "a")
860 self.dirstate.update([f], "a")
859
861
860 def forget(self, list, wlock=None):
862 def forget(self, list, wlock=None):
861 if not wlock:
863 if not wlock:
862 wlock = self.wlock()
864 wlock = self.wlock()
863 for f in list:
865 for f in list:
864 if self.dirstate.state(f) not in 'ai':
866 if self.dirstate.state(f) not in 'ai':
865 self.ui.warn(_("%s not added!\n") % f)
867 self.ui.warn(_("%s not added!\n") % f)
866 else:
868 else:
867 self.dirstate.forget([f])
869 self.dirstate.forget([f])
868
870
869 def remove(self, list, unlink=False, wlock=None):
871 def remove(self, list, unlink=False, wlock=None):
870 if unlink:
872 if unlink:
871 for f in list:
873 for f in list:
872 try:
874 try:
873 util.unlink(self.wjoin(f))
875 util.unlink(self.wjoin(f))
874 except OSError, inst:
876 except OSError, inst:
875 if inst.errno != errno.ENOENT:
877 if inst.errno != errno.ENOENT:
876 raise
878 raise
877 if not wlock:
879 if not wlock:
878 wlock = self.wlock()
880 wlock = self.wlock()
879 for f in list:
881 for f in list:
880 p = self.wjoin(f)
882 p = self.wjoin(f)
881 if os.path.exists(p):
883 if os.path.exists(p):
882 self.ui.warn(_("%s still exists!\n") % f)
884 self.ui.warn(_("%s still exists!\n") % f)
883 elif self.dirstate.state(f) == 'a':
885 elif self.dirstate.state(f) == 'a':
884 self.dirstate.forget([f])
886 self.dirstate.forget([f])
885 elif f not in self.dirstate:
887 elif f not in self.dirstate:
886 self.ui.warn(_("%s not tracked!\n") % f)
888 self.ui.warn(_("%s not tracked!\n") % f)
887 else:
889 else:
888 self.dirstate.update([f], "r")
890 self.dirstate.update([f], "r")
889
891
890 def undelete(self, list, wlock=None):
892 def undelete(self, list, wlock=None):
891 p = self.dirstate.parents()[0]
893 p = self.dirstate.parents()[0]
892 mn = self.changelog.read(p)[0]
894 mn = self.changelog.read(p)[0]
893 m = self.manifest.read(mn)
895 m = self.manifest.read(mn)
894 if not wlock:
896 if not wlock:
895 wlock = self.wlock()
897 wlock = self.wlock()
896 for f in list:
898 for f in list:
897 if self.dirstate.state(f) not in "r":
899 if self.dirstate.state(f) not in "r":
898 self.ui.warn("%s not removed!\n" % f)
900 self.ui.warn("%s not removed!\n" % f)
899 else:
901 else:
900 t = self.file(f).read(m[f])
902 t = self.file(f).read(m[f])
901 self.wwrite(f, t)
903 self.wwrite(f, t)
902 util.set_exec(self.wjoin(f), m.execf(f))
904 util.set_exec(self.wjoin(f), m.execf(f))
903 self.dirstate.update([f], "n")
905 self.dirstate.update([f], "n")
904
906
905 def copy(self, source, dest, wlock=None):
907 def copy(self, source, dest, wlock=None):
906 p = self.wjoin(dest)
908 p = self.wjoin(dest)
907 if not os.path.exists(p):
909 if not os.path.exists(p):
908 self.ui.warn(_("%s does not exist!\n") % dest)
910 self.ui.warn(_("%s does not exist!\n") % dest)
909 elif not os.path.isfile(p):
911 elif not os.path.isfile(p):
910 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
912 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
911 else:
913 else:
912 if not wlock:
914 if not wlock:
913 wlock = self.wlock()
915 wlock = self.wlock()
914 if self.dirstate.state(dest) == '?':
916 if self.dirstate.state(dest) == '?':
915 self.dirstate.update([dest], "a")
917 self.dirstate.update([dest], "a")
916 self.dirstate.copy(source, dest)
918 self.dirstate.copy(source, dest)
917
919
918 def heads(self, start=None):
920 def heads(self, start=None):
919 heads = self.changelog.heads(start)
921 heads = self.changelog.heads(start)
920 # sort the output in rev descending order
922 # sort the output in rev descending order
921 heads = [(-self.changelog.rev(h), h) for h in heads]
923 heads = [(-self.changelog.rev(h), h) for h in heads]
922 heads.sort()
924 heads.sort()
923 return [n for (r, n) in heads]
925 return [n for (r, n) in heads]
924
926
925 # branchlookup returns a dict giving a list of branches for
927 # branchlookup returns a dict giving a list of branches for
926 # each head. A branch is defined as the tag of a node or
928 # each head. A branch is defined as the tag of a node or
927 # the branch of the node's parents. If a node has multiple
929 # the branch of the node's parents. If a node has multiple
928 # branch tags, tags are eliminated if they are visible from other
930 # branch tags, tags are eliminated if they are visible from other
929 # branch tags.
931 # branch tags.
930 #
932 #
931 # So, for this graph: a->b->c->d->e
933 # So, for this graph: a->b->c->d->e
932 # \ /
934 # \ /
933 # aa -----/
935 # aa -----/
934 # a has tag 2.6.12
936 # a has tag 2.6.12
935 # d has tag 2.6.13
937 # d has tag 2.6.13
936 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
938 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
937 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
939 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
938 # from the list.
940 # from the list.
939 #
941 #
940 # It is possible that more than one head will have the same branch tag.
942 # It is possible that more than one head will have the same branch tag.
941 # callers need to check the result for multiple heads under the same
943 # callers need to check the result for multiple heads under the same
942 # branch tag if that is a problem for them (ie checkout of a specific
944 # branch tag if that is a problem for them (ie checkout of a specific
943 # branch).
945 # branch).
944 #
946 #
945 # passing in a specific branch will limit the depth of the search
947 # passing in a specific branch will limit the depth of the search
946 # through the parents. It won't limit the branches returned in the
948 # through the parents. It won't limit the branches returned in the
947 # result though.
949 # result though.
948 def branchlookup(self, heads=None, branch=None):
950 def branchlookup(self, heads=None, branch=None):
949 if not heads:
951 if not heads:
950 heads = self.heads()
952 heads = self.heads()
951 headt = [ h for h in heads ]
953 headt = [ h for h in heads ]
952 chlog = self.changelog
954 chlog = self.changelog
953 branches = {}
955 branches = {}
954 merges = []
956 merges = []
955 seenmerge = {}
957 seenmerge = {}
956
958
957 # traverse the tree once for each head, recording in the branches
959 # traverse the tree once for each head, recording in the branches
958 # dict which tags are visible from this head. The branches
960 # dict which tags are visible from this head. The branches
959 # dict also records which tags are visible from each tag
961 # dict also records which tags are visible from each tag
960 # while we traverse.
962 # while we traverse.
961 while headt or merges:
963 while headt or merges:
962 if merges:
964 if merges:
963 n, found = merges.pop()
965 n, found = merges.pop()
964 visit = [n]
966 visit = [n]
965 else:
967 else:
966 h = headt.pop()
968 h = headt.pop()
967 visit = [h]
969 visit = [h]
968 found = [h]
970 found = [h]
969 seen = {}
971 seen = {}
970 while visit:
972 while visit:
971 n = visit.pop()
973 n = visit.pop()
972 if n in seen:
974 if n in seen:
973 continue
975 continue
974 pp = chlog.parents(n)
976 pp = chlog.parents(n)
975 tags = self.nodetags(n)
977 tags = self.nodetags(n)
976 if tags:
978 if tags:
977 for x in tags:
979 for x in tags:
978 if x == 'tip':
980 if x == 'tip':
979 continue
981 continue
980 for f in found:
982 for f in found:
981 branches.setdefault(f, {})[n] = 1
983 branches.setdefault(f, {})[n] = 1
982 branches.setdefault(n, {})[n] = 1
984 branches.setdefault(n, {})[n] = 1
983 break
985 break
984 if n not in found:
986 if n not in found:
985 found.append(n)
987 found.append(n)
986 if branch in tags:
988 if branch in tags:
987 continue
989 continue
988 seen[n] = 1
990 seen[n] = 1
989 if pp[1] != nullid and n not in seenmerge:
991 if pp[1] != nullid and n not in seenmerge:
990 merges.append((pp[1], [x for x in found]))
992 merges.append((pp[1], [x for x in found]))
991 seenmerge[n] = 1
993 seenmerge[n] = 1
992 if pp[0] != nullid:
994 if pp[0] != nullid:
993 visit.append(pp[0])
995 visit.append(pp[0])
994 # traverse the branches dict, eliminating branch tags from each
996 # traverse the branches dict, eliminating branch tags from each
995 # head that are visible from another branch tag for that head.
997 # head that are visible from another branch tag for that head.
996 out = {}
998 out = {}
997 viscache = {}
999 viscache = {}
998 for h in heads:
1000 for h in heads:
999 def visible(node):
1001 def visible(node):
1000 if node in viscache:
1002 if node in viscache:
1001 return viscache[node]
1003 return viscache[node]
1002 ret = {}
1004 ret = {}
1003 visit = [node]
1005 visit = [node]
1004 while visit:
1006 while visit:
1005 x = visit.pop()
1007 x = visit.pop()
1006 if x in viscache:
1008 if x in viscache:
1007 ret.update(viscache[x])
1009 ret.update(viscache[x])
1008 elif x not in ret:
1010 elif x not in ret:
1009 ret[x] = 1
1011 ret[x] = 1
1010 if x in branches:
1012 if x in branches:
1011 visit[len(visit):] = branches[x].keys()
1013 visit[len(visit):] = branches[x].keys()
1012 viscache[node] = ret
1014 viscache[node] = ret
1013 return ret
1015 return ret
1014 if h not in branches:
1016 if h not in branches:
1015 continue
1017 continue
1016 # O(n^2), but somewhat limited. This only searches the
1018 # O(n^2), but somewhat limited. This only searches the
1017 # tags visible from a specific head, not all the tags in the
1019 # tags visible from a specific head, not all the tags in the
1018 # whole repo.
1020 # whole repo.
1019 for b in branches[h]:
1021 for b in branches[h]:
1020 vis = False
1022 vis = False
1021 for bb in branches[h].keys():
1023 for bb in branches[h].keys():
1022 if b != bb:
1024 if b != bb:
1023 if b in visible(bb):
1025 if b in visible(bb):
1024 vis = True
1026 vis = True
1025 break
1027 break
1026 if not vis:
1028 if not vis:
1027 l = out.setdefault(h, [])
1029 l = out.setdefault(h, [])
1028 l[len(l):] = self.nodetags(b)
1030 l[len(l):] = self.nodetags(b)
1029 return out
1031 return out
1030
1032
1031 def branches(self, nodes):
1033 def branches(self, nodes):
1032 if not nodes:
1034 if not nodes:
1033 nodes = [self.changelog.tip()]
1035 nodes = [self.changelog.tip()]
1034 b = []
1036 b = []
1035 for n in nodes:
1037 for n in nodes:
1036 t = n
1038 t = n
1037 while 1:
1039 while 1:
1038 p = self.changelog.parents(n)
1040 p = self.changelog.parents(n)
1039 if p[1] != nullid or p[0] == nullid:
1041 if p[1] != nullid or p[0] == nullid:
1040 b.append((t, n, p[0], p[1]))
1042 b.append((t, n, p[0], p[1]))
1041 break
1043 break
1042 n = p[0]
1044 n = p[0]
1043 return b
1045 return b
1044
1046
1045 def between(self, pairs):
1047 def between(self, pairs):
1046 r = []
1048 r = []
1047
1049
1048 for top, bottom in pairs:
1050 for top, bottom in pairs:
1049 n, l, i = top, [], 0
1051 n, l, i = top, [], 0
1050 f = 1
1052 f = 1
1051
1053
1052 while n != bottom:
1054 while n != bottom:
1053 p = self.changelog.parents(n)[0]
1055 p = self.changelog.parents(n)[0]
1054 if i == f:
1056 if i == f:
1055 l.append(n)
1057 l.append(n)
1056 f = f * 2
1058 f = f * 2
1057 n = p
1059 n = p
1058 i += 1
1060 i += 1
1059
1061
1060 r.append(l)
1062 r.append(l)
1061
1063
1062 return r
1064 return r
1063
1065
1064 def findincoming(self, remote, base=None, heads=None, force=False):
1066 def findincoming(self, remote, base=None, heads=None, force=False):
1065 """Return list of roots of the subsets of missing nodes from remote
1067 """Return list of roots of the subsets of missing nodes from remote
1066
1068
1067 If base dict is specified, assume that these nodes and their parents
1069 If base dict is specified, assume that these nodes and their parents
1068 exist on the remote side and that no child of a node of base exists
1070 exist on the remote side and that no child of a node of base exists
1069 in both remote and self.
1071 in both remote and self.
1070 Furthermore base will be updated to include the nodes that exists
1072 Furthermore base will be updated to include the nodes that exists
1071 in self and remote but no children exists in self and remote.
1073 in self and remote but no children exists in self and remote.
1072 If a list of heads is specified, return only nodes which are heads
1074 If a list of heads is specified, return only nodes which are heads
1073 or ancestors of these heads.
1075 or ancestors of these heads.
1074
1076
1075 All the ancestors of base are in self and in remote.
1077 All the ancestors of base are in self and in remote.
1076 All the descendants of the list returned are missing in self.
1078 All the descendants of the list returned are missing in self.
1077 (and so we know that the rest of the nodes are missing in remote, see
1079 (and so we know that the rest of the nodes are missing in remote, see
1078 outgoing)
1080 outgoing)
1079 """
1081 """
1080 m = self.changelog.nodemap
1082 m = self.changelog.nodemap
1081 search = []
1083 search = []
1082 fetch = {}
1084 fetch = {}
1083 seen = {}
1085 seen = {}
1084 seenbranch = {}
1086 seenbranch = {}
1085 if base == None:
1087 if base == None:
1086 base = {}
1088 base = {}
1087
1089
1088 if not heads:
1090 if not heads:
1089 heads = remote.heads()
1091 heads = remote.heads()
1090
1092
1091 if self.changelog.tip() == nullid:
1093 if self.changelog.tip() == nullid:
1092 base[nullid] = 1
1094 base[nullid] = 1
1093 if heads != [nullid]:
1095 if heads != [nullid]:
1094 return [nullid]
1096 return [nullid]
1095 return []
1097 return []
1096
1098
1097 # assume we're closer to the tip than the root
1099 # assume we're closer to the tip than the root
1098 # and start by examining the heads
1100 # and start by examining the heads
1099 self.ui.status(_("searching for changes\n"))
1101 self.ui.status(_("searching for changes\n"))
1100
1102
1101 unknown = []
1103 unknown = []
1102 for h in heads:
1104 for h in heads:
1103 if h not in m:
1105 if h not in m:
1104 unknown.append(h)
1106 unknown.append(h)
1105 else:
1107 else:
1106 base[h] = 1
1108 base[h] = 1
1107
1109
1108 if not unknown:
1110 if not unknown:
1109 return []
1111 return []
1110
1112
1111 req = dict.fromkeys(unknown)
1113 req = dict.fromkeys(unknown)
1112 reqcnt = 0
1114 reqcnt = 0
1113
1115
1114 # search through remote branches
1116 # search through remote branches
1115 # a 'branch' here is a linear segment of history, with four parts:
1117 # a 'branch' here is a linear segment of history, with four parts:
1116 # head, root, first parent, second parent
1118 # head, root, first parent, second parent
1117 # (a branch always has two parents (or none) by definition)
1119 # (a branch always has two parents (or none) by definition)
1118 unknown = remote.branches(unknown)
1120 unknown = remote.branches(unknown)
1119 while unknown:
1121 while unknown:
1120 r = []
1122 r = []
1121 while unknown:
1123 while unknown:
1122 n = unknown.pop(0)
1124 n = unknown.pop(0)
1123 if n[0] in seen:
1125 if n[0] in seen:
1124 continue
1126 continue
1125
1127
1126 self.ui.debug(_("examining %s:%s\n")
1128 self.ui.debug(_("examining %s:%s\n")
1127 % (short(n[0]), short(n[1])))
1129 % (short(n[0]), short(n[1])))
1128 if n[0] == nullid: # found the end of the branch
1130 if n[0] == nullid: # found the end of the branch
1129 pass
1131 pass
1130 elif n in seenbranch:
1132 elif n in seenbranch:
1131 self.ui.debug(_("branch already found\n"))
1133 self.ui.debug(_("branch already found\n"))
1132 continue
1134 continue
1133 elif n[1] and n[1] in m: # do we know the base?
1135 elif n[1] and n[1] in m: # do we know the base?
1134 self.ui.debug(_("found incomplete branch %s:%s\n")
1136 self.ui.debug(_("found incomplete branch %s:%s\n")
1135 % (short(n[0]), short(n[1])))
1137 % (short(n[0]), short(n[1])))
1136 search.append(n) # schedule branch range for scanning
1138 search.append(n) # schedule branch range for scanning
1137 seenbranch[n] = 1
1139 seenbranch[n] = 1
1138 else:
1140 else:
1139 if n[1] not in seen and n[1] not in fetch:
1141 if n[1] not in seen and n[1] not in fetch:
1140 if n[2] in m and n[3] in m:
1142 if n[2] in m and n[3] in m:
1141 self.ui.debug(_("found new changeset %s\n") %
1143 self.ui.debug(_("found new changeset %s\n") %
1142 short(n[1]))
1144 short(n[1]))
1143 fetch[n[1]] = 1 # earliest unknown
1145 fetch[n[1]] = 1 # earliest unknown
1144 for p in n[2:4]:
1146 for p in n[2:4]:
1145 if p in m:
1147 if p in m:
1146 base[p] = 1 # latest known
1148 base[p] = 1 # latest known
1147
1149
1148 for p in n[2:4]:
1150 for p in n[2:4]:
1149 if p not in req and p not in m:
1151 if p not in req and p not in m:
1150 r.append(p)
1152 r.append(p)
1151 req[p] = 1
1153 req[p] = 1
1152 seen[n[0]] = 1
1154 seen[n[0]] = 1
1153
1155
1154 if r:
1156 if r:
1155 reqcnt += 1
1157 reqcnt += 1
1156 self.ui.debug(_("request %d: %s\n") %
1158 self.ui.debug(_("request %d: %s\n") %
1157 (reqcnt, " ".join(map(short, r))))
1159 (reqcnt, " ".join(map(short, r))))
1158 for p in xrange(0, len(r), 10):
1160 for p in xrange(0, len(r), 10):
1159 for b in remote.branches(r[p:p+10]):
1161 for b in remote.branches(r[p:p+10]):
1160 self.ui.debug(_("received %s:%s\n") %
1162 self.ui.debug(_("received %s:%s\n") %
1161 (short(b[0]), short(b[1])))
1163 (short(b[0]), short(b[1])))
1162 unknown.append(b)
1164 unknown.append(b)
1163
1165
1164 # do binary search on the branches we found
1166 # do binary search on the branches we found
1165 while search:
1167 while search:
1166 n = search.pop(0)
1168 n = search.pop(0)
1167 reqcnt += 1
1169 reqcnt += 1
1168 l = remote.between([(n[0], n[1])])[0]
1170 l = remote.between([(n[0], n[1])])[0]
1169 l.append(n[1])
1171 l.append(n[1])
1170 p = n[0]
1172 p = n[0]
1171 f = 1
1173 f = 1
1172 for i in l:
1174 for i in l:
1173 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1175 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1174 if i in m:
1176 if i in m:
1175 if f <= 2:
1177 if f <= 2:
1176 self.ui.debug(_("found new branch changeset %s\n") %
1178 self.ui.debug(_("found new branch changeset %s\n") %
1177 short(p))
1179 short(p))
1178 fetch[p] = 1
1180 fetch[p] = 1
1179 base[i] = 1
1181 base[i] = 1
1180 else:
1182 else:
1181 self.ui.debug(_("narrowed branch search to %s:%s\n")
1183 self.ui.debug(_("narrowed branch search to %s:%s\n")
1182 % (short(p), short(i)))
1184 % (short(p), short(i)))
1183 search.append((p, i))
1185 search.append((p, i))
1184 break
1186 break
1185 p, f = i, f * 2
1187 p, f = i, f * 2
1186
1188
1187 # sanity check our fetch list
1189 # sanity check our fetch list
1188 for f in fetch.keys():
1190 for f in fetch.keys():
1189 if f in m:
1191 if f in m:
1190 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1192 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1191
1193
1192 if base.keys() == [nullid]:
1194 if base.keys() == [nullid]:
1193 if force:
1195 if force:
1194 self.ui.warn(_("warning: repository is unrelated\n"))
1196 self.ui.warn(_("warning: repository is unrelated\n"))
1195 else:
1197 else:
1196 raise util.Abort(_("repository is unrelated"))
1198 raise util.Abort(_("repository is unrelated"))
1197
1199
1198 self.ui.debug(_("found new changesets starting at ") +
1200 self.ui.debug(_("found new changesets starting at ") +
1199 " ".join([short(f) for f in fetch]) + "\n")
1201 " ".join([short(f) for f in fetch]) + "\n")
1200
1202
1201 self.ui.debug(_("%d total queries\n") % reqcnt)
1203 self.ui.debug(_("%d total queries\n") % reqcnt)
1202
1204
1203 return fetch.keys()
1205 return fetch.keys()
1204
1206
1205 def findoutgoing(self, remote, base=None, heads=None, force=False):
1207 def findoutgoing(self, remote, base=None, heads=None, force=False):
1206 """Return list of nodes that are roots of subsets not in remote
1208 """Return list of nodes that are roots of subsets not in remote
1207
1209
1208 If base dict is specified, assume that these nodes and their parents
1210 If base dict is specified, assume that these nodes and their parents
1209 exist on the remote side.
1211 exist on the remote side.
1210 If a list of heads is specified, return only nodes which are heads
1212 If a list of heads is specified, return only nodes which are heads
1211 or ancestors of these heads, and return a second element which
1213 or ancestors of these heads, and return a second element which
1212 contains all remote heads which get new children.
1214 contains all remote heads which get new children.
1213 """
1215 """
1214 if base == None:
1216 if base == None:
1215 base = {}
1217 base = {}
1216 self.findincoming(remote, base, heads, force=force)
1218 self.findincoming(remote, base, heads, force=force)
1217
1219
1218 self.ui.debug(_("common changesets up to ")
1220 self.ui.debug(_("common changesets up to ")
1219 + " ".join(map(short, base.keys())) + "\n")
1221 + " ".join(map(short, base.keys())) + "\n")
1220
1222
1221 remain = dict.fromkeys(self.changelog.nodemap)
1223 remain = dict.fromkeys(self.changelog.nodemap)
1222
1224
1223 # prune everything remote has from the tree
1225 # prune everything remote has from the tree
1224 del remain[nullid]
1226 del remain[nullid]
1225 remove = base.keys()
1227 remove = base.keys()
1226 while remove:
1228 while remove:
1227 n = remove.pop(0)
1229 n = remove.pop(0)
1228 if n in remain:
1230 if n in remain:
1229 del remain[n]
1231 del remain[n]
1230 for p in self.changelog.parents(n):
1232 for p in self.changelog.parents(n):
1231 remove.append(p)
1233 remove.append(p)
1232
1234
1233 # find every node whose parents have been pruned
1235 # find every node whose parents have been pruned
1234 subset = []
1236 subset = []
1235 # find every remote head that will get new children
1237 # find every remote head that will get new children
1236 updated_heads = {}
1238 updated_heads = {}
1237 for n in remain:
1239 for n in remain:
1238 p1, p2 = self.changelog.parents(n)
1240 p1, p2 = self.changelog.parents(n)
1239 if p1 not in remain and p2 not in remain:
1241 if p1 not in remain and p2 not in remain:
1240 subset.append(n)
1242 subset.append(n)
1241 if heads:
1243 if heads:
1242 if p1 in heads:
1244 if p1 in heads:
1243 updated_heads[p1] = True
1245 updated_heads[p1] = True
1244 if p2 in heads:
1246 if p2 in heads:
1245 updated_heads[p2] = True
1247 updated_heads[p2] = True
1246
1248
1247 # this is the set of all roots we have to push
1249 # this is the set of all roots we have to push
1248 if heads:
1250 if heads:
1249 return subset, updated_heads.keys()
1251 return subset, updated_heads.keys()
1250 else:
1252 else:
1251 return subset
1253 return subset
1252
1254
1253 def pull(self, remote, heads=None, force=False, lock=None):
1255 def pull(self, remote, heads=None, force=False, lock=None):
1254 mylock = False
1256 mylock = False
1255 if not lock:
1257 if not lock:
1256 lock = self.lock()
1258 lock = self.lock()
1257 mylock = True
1259 mylock = True
1258
1260
1259 try:
1261 try:
1260 fetch = self.findincoming(remote, force=force)
1262 fetch = self.findincoming(remote, force=force)
1261 if fetch == [nullid]:
1263 if fetch == [nullid]:
1262 self.ui.status(_("requesting all changes\n"))
1264 self.ui.status(_("requesting all changes\n"))
1263
1265
1264 if not fetch:
1266 if not fetch:
1265 self.ui.status(_("no changes found\n"))
1267 self.ui.status(_("no changes found\n"))
1266 return 0
1268 return 0
1267
1269
1268 if heads is None:
1270 if heads is None:
1269 cg = remote.changegroup(fetch, 'pull')
1271 cg = remote.changegroup(fetch, 'pull')
1270 else:
1272 else:
1271 if 'changegroupsubset' not in remote.capabilities:
1273 if 'changegroupsubset' not in remote.capabilities:
1272 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1274 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1273 cg = remote.changegroupsubset(fetch, heads, 'pull')
1275 cg = remote.changegroupsubset(fetch, heads, 'pull')
1274 return self.addchangegroup(cg, 'pull', remote.url())
1276 return self.addchangegroup(cg, 'pull', remote.url())
1275 finally:
1277 finally:
1276 if mylock:
1278 if mylock:
1277 lock.release()
1279 lock.release()
1278
1280
1279 def push(self, remote, force=False, revs=None):
1281 def push(self, remote, force=False, revs=None):
1280 # there are two ways to push to remote repo:
1282 # there are two ways to push to remote repo:
1281 #
1283 #
1282 # addchangegroup assumes local user can lock remote
1284 # addchangegroup assumes local user can lock remote
1283 # repo (local filesystem, old ssh servers).
1285 # repo (local filesystem, old ssh servers).
1284 #
1286 #
1285 # unbundle assumes local user cannot lock remote repo (new ssh
1287 # unbundle assumes local user cannot lock remote repo (new ssh
1286 # servers, http servers).
1288 # servers, http servers).
1287
1289
1288 if remote.capable('unbundle'):
1290 if remote.capable('unbundle'):
1289 return self.push_unbundle(remote, force, revs)
1291 return self.push_unbundle(remote, force, revs)
1290 return self.push_addchangegroup(remote, force, revs)
1292 return self.push_addchangegroup(remote, force, revs)
1291
1293
1292 def prepush(self, remote, force, revs):
1294 def prepush(self, remote, force, revs):
1293 base = {}
1295 base = {}
1294 remote_heads = remote.heads()
1296 remote_heads = remote.heads()
1295 inc = self.findincoming(remote, base, remote_heads, force=force)
1297 inc = self.findincoming(remote, base, remote_heads, force=force)
1296 if not force and inc:
1298 if not force and inc:
1297 self.ui.warn(_("abort: unsynced remote changes!\n"))
1299 self.ui.warn(_("abort: unsynced remote changes!\n"))
1298 self.ui.status(_("(did you forget to sync?"
1300 self.ui.status(_("(did you forget to sync?"
1299 " use push -f to force)\n"))
1301 " use push -f to force)\n"))
1300 return None, 1
1302 return None, 1
1301
1303
1302 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1304 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1303 if revs is not None:
1305 if revs is not None:
1304 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1306 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1305 else:
1307 else:
1306 bases, heads = update, self.changelog.heads()
1308 bases, heads = update, self.changelog.heads()
1307
1309
1308 if not bases:
1310 if not bases:
1309 self.ui.status(_("no changes found\n"))
1311 self.ui.status(_("no changes found\n"))
1310 return None, 1
1312 return None, 1
1311 elif not force:
1313 elif not force:
1312 # FIXME we don't properly detect creation of new heads
1314 # FIXME we don't properly detect creation of new heads
1313 # in the push -r case, assume the user knows what he's doing
1315 # in the push -r case, assume the user knows what he's doing
1314 if not revs and len(remote_heads) < len(heads) \
1316 if not revs and len(remote_heads) < len(heads) \
1315 and remote_heads != [nullid]:
1317 and remote_heads != [nullid]:
1316 self.ui.warn(_("abort: push creates new remote branches!\n"))
1318 self.ui.warn(_("abort: push creates new remote branches!\n"))
1317 self.ui.status(_("(did you forget to merge?"
1319 self.ui.status(_("(did you forget to merge?"
1318 " use push -f to force)\n"))
1320 " use push -f to force)\n"))
1319 return None, 1
1321 return None, 1
1320
1322
1321 if revs is None:
1323 if revs is None:
1322 cg = self.changegroup(update, 'push')
1324 cg = self.changegroup(update, 'push')
1323 else:
1325 else:
1324 cg = self.changegroupsubset(update, revs, 'push')
1326 cg = self.changegroupsubset(update, revs, 'push')
1325 return cg, remote_heads
1327 return cg, remote_heads
1326
1328
1327 def push_addchangegroup(self, remote, force, revs):
1329 def push_addchangegroup(self, remote, force, revs):
1328 lock = remote.lock()
1330 lock = remote.lock()
1329
1331
1330 ret = self.prepush(remote, force, revs)
1332 ret = self.prepush(remote, force, revs)
1331 if ret[0] is not None:
1333 if ret[0] is not None:
1332 cg, remote_heads = ret
1334 cg, remote_heads = ret
1333 return remote.addchangegroup(cg, 'push', self.url())
1335 return remote.addchangegroup(cg, 'push', self.url())
1334 return ret[1]
1336 return ret[1]
1335
1337
1336 def push_unbundle(self, remote, force, revs):
1338 def push_unbundle(self, remote, force, revs):
1337 # local repo finds heads on server, finds out what revs it
1339 # local repo finds heads on server, finds out what revs it
1338 # must push. once revs transferred, if server finds it has
1340 # must push. once revs transferred, if server finds it has
1339 # different heads (someone else won commit/push race), server
1341 # different heads (someone else won commit/push race), server
1340 # aborts.
1342 # aborts.
1341
1343
1342 ret = self.prepush(remote, force, revs)
1344 ret = self.prepush(remote, force, revs)
1343 if ret[0] is not None:
1345 if ret[0] is not None:
1344 cg, remote_heads = ret
1346 cg, remote_heads = ret
1345 if force: remote_heads = ['force']
1347 if force: remote_heads = ['force']
1346 return remote.unbundle(cg, remote_heads, 'push')
1348 return remote.unbundle(cg, remote_heads, 'push')
1347 return ret[1]
1349 return ret[1]
1348
1350
1349 def changegroupinfo(self, nodes):
1351 def changegroupinfo(self, nodes):
1350 self.ui.note(_("%d changesets found\n") % len(nodes))
1352 self.ui.note(_("%d changesets found\n") % len(nodes))
1351 if self.ui.debugflag:
1353 if self.ui.debugflag:
1352 self.ui.debug(_("List of changesets:\n"))
1354 self.ui.debug(_("List of changesets:\n"))
1353 for node in nodes:
1355 for node in nodes:
1354 self.ui.debug("%s\n" % hex(node))
1356 self.ui.debug("%s\n" % hex(node))
1355
1357
1356 def changegroupsubset(self, bases, heads, source):
1358 def changegroupsubset(self, bases, heads, source):
1357 """This function generates a changegroup consisting of all the nodes
1359 """This function generates a changegroup consisting of all the nodes
1358 that are descendents of any of the bases, and ancestors of any of
1360 that are descendents of any of the bases, and ancestors of any of
1359 the heads.
1361 the heads.
1360
1362
1361 It is fairly complex as determining which filenodes and which
1363 It is fairly complex as determining which filenodes and which
1362 manifest nodes need to be included for the changeset to be complete
1364 manifest nodes need to be included for the changeset to be complete
1363 is non-trivial.
1365 is non-trivial.
1364
1366
1365 Another wrinkle is doing the reverse, figuring out which changeset in
1367 Another wrinkle is doing the reverse, figuring out which changeset in
1366 the changegroup a particular filenode or manifestnode belongs to."""
1368 the changegroup a particular filenode or manifestnode belongs to."""
1367
1369
1368 self.hook('preoutgoing', throw=True, source=source)
1370 self.hook('preoutgoing', throw=True, source=source)
1369
1371
1370 # Set up some initial variables
1372 # Set up some initial variables
1371 # Make it easy to refer to self.changelog
1373 # Make it easy to refer to self.changelog
1372 cl = self.changelog
1374 cl = self.changelog
1373 # msng is short for missing - compute the list of changesets in this
1375 # msng is short for missing - compute the list of changesets in this
1374 # changegroup.
1376 # changegroup.
1375 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1377 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1376 self.changegroupinfo(msng_cl_lst)
1378 self.changegroupinfo(msng_cl_lst)
1377 # Some bases may turn out to be superfluous, and some heads may be
1379 # Some bases may turn out to be superfluous, and some heads may be
1378 # too. nodesbetween will return the minimal set of bases and heads
1380 # too. nodesbetween will return the minimal set of bases and heads
1379 # necessary to re-create the changegroup.
1381 # necessary to re-create the changegroup.
1380
1382
1381 # Known heads are the list of heads that it is assumed the recipient
1383 # Known heads are the list of heads that it is assumed the recipient
1382 # of this changegroup will know about.
1384 # of this changegroup will know about.
1383 knownheads = {}
1385 knownheads = {}
1384 # We assume that all parents of bases are known heads.
1386 # We assume that all parents of bases are known heads.
1385 for n in bases:
1387 for n in bases:
1386 for p in cl.parents(n):
1388 for p in cl.parents(n):
1387 if p != nullid:
1389 if p != nullid:
1388 knownheads[p] = 1
1390 knownheads[p] = 1
1389 knownheads = knownheads.keys()
1391 knownheads = knownheads.keys()
1390 if knownheads:
1392 if knownheads:
1391 # Now that we know what heads are known, we can compute which
1393 # Now that we know what heads are known, we can compute which
1392 # changesets are known. The recipient must know about all
1394 # changesets are known. The recipient must know about all
1393 # changesets required to reach the known heads from the null
1395 # changesets required to reach the known heads from the null
1394 # changeset.
1396 # changeset.
1395 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1397 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1396 junk = None
1398 junk = None
1397 # Transform the list into an ersatz set.
1399 # Transform the list into an ersatz set.
1398 has_cl_set = dict.fromkeys(has_cl_set)
1400 has_cl_set = dict.fromkeys(has_cl_set)
1399 else:
1401 else:
1400 # If there were no known heads, the recipient cannot be assumed to
1402 # If there were no known heads, the recipient cannot be assumed to
1401 # know about any changesets.
1403 # know about any changesets.
1402 has_cl_set = {}
1404 has_cl_set = {}
1403
1405
1404 # Make it easy to refer to self.manifest
1406 # Make it easy to refer to self.manifest
1405 mnfst = self.manifest
1407 mnfst = self.manifest
1406 # We don't know which manifests are missing yet
1408 # We don't know which manifests are missing yet
1407 msng_mnfst_set = {}
1409 msng_mnfst_set = {}
1408 # Nor do we know which filenodes are missing.
1410 # Nor do we know which filenodes are missing.
1409 msng_filenode_set = {}
1411 msng_filenode_set = {}
1410
1412
1411 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1413 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1412 junk = None
1414 junk = None
1413
1415
1414 # A changeset always belongs to itself, so the changenode lookup
1416 # A changeset always belongs to itself, so the changenode lookup
1415 # function for a changenode is identity.
1417 # function for a changenode is identity.
1416 def identity(x):
1418 def identity(x):
1417 return x
1419 return x
1418
1420
1419 # A function generating function. Sets up an environment for the
1421 # A function generating function. Sets up an environment for the
1420 # inner function.
1422 # inner function.
1421 def cmp_by_rev_func(revlog):
1423 def cmp_by_rev_func(revlog):
1422 # Compare two nodes by their revision number in the environment's
1424 # Compare two nodes by their revision number in the environment's
1423 # revision history. Since the revision number both represents the
1425 # revision history. Since the revision number both represents the
1424 # most efficient order to read the nodes in, and represents a
1426 # most efficient order to read the nodes in, and represents a
1425 # topological sorting of the nodes, this function is often useful.
1427 # topological sorting of the nodes, this function is often useful.
1426 def cmp_by_rev(a, b):
1428 def cmp_by_rev(a, b):
1427 return cmp(revlog.rev(a), revlog.rev(b))
1429 return cmp(revlog.rev(a), revlog.rev(b))
1428 return cmp_by_rev
1430 return cmp_by_rev
1429
1431
1430 # If we determine that a particular file or manifest node must be a
1432 # If we determine that a particular file or manifest node must be a
1431 # node that the recipient of the changegroup will already have, we can
1433 # node that the recipient of the changegroup will already have, we can
1432 # also assume the recipient will have all the parents. This function
1434 # also assume the recipient will have all the parents. This function
1433 # prunes them from the set of missing nodes.
1435 # prunes them from the set of missing nodes.
1434 def prune_parents(revlog, hasset, msngset):
1436 def prune_parents(revlog, hasset, msngset):
1435 haslst = hasset.keys()
1437 haslst = hasset.keys()
1436 haslst.sort(cmp_by_rev_func(revlog))
1438 haslst.sort(cmp_by_rev_func(revlog))
1437 for node in haslst:
1439 for node in haslst:
1438 parentlst = [p for p in revlog.parents(node) if p != nullid]
1440 parentlst = [p for p in revlog.parents(node) if p != nullid]
1439 while parentlst:
1441 while parentlst:
1440 n = parentlst.pop()
1442 n = parentlst.pop()
1441 if n not in hasset:
1443 if n not in hasset:
1442 hasset[n] = 1
1444 hasset[n] = 1
1443 p = [p for p in revlog.parents(n) if p != nullid]
1445 p = [p for p in revlog.parents(n) if p != nullid]
1444 parentlst.extend(p)
1446 parentlst.extend(p)
1445 for n in hasset:
1447 for n in hasset:
1446 msngset.pop(n, None)
1448 msngset.pop(n, None)
1447
1449
1448 # This is a function generating function used to set up an environment
1450 # This is a function generating function used to set up an environment
1449 # for the inner function to execute in.
1451 # for the inner function to execute in.
1450 def manifest_and_file_collector(changedfileset):
1452 def manifest_and_file_collector(changedfileset):
1451 # This is an information gathering function that gathers
1453 # This is an information gathering function that gathers
1452 # information from each changeset node that goes out as part of
1454 # information from each changeset node that goes out as part of
1453 # the changegroup. The information gathered is a list of which
1455 # the changegroup. The information gathered is a list of which
1454 # manifest nodes are potentially required (the recipient may
1456 # manifest nodes are potentially required (the recipient may
1455 # already have them) and total list of all files which were
1457 # already have them) and total list of all files which were
1456 # changed in any changeset in the changegroup.
1458 # changed in any changeset in the changegroup.
1457 #
1459 #
1458 # We also remember the first changenode we saw any manifest
1460 # We also remember the first changenode we saw any manifest
1459 # referenced by so we can later determine which changenode 'owns'
1461 # referenced by so we can later determine which changenode 'owns'
1460 # the manifest.
1462 # the manifest.
1461 def collect_manifests_and_files(clnode):
1463 def collect_manifests_and_files(clnode):
1462 c = cl.read(clnode)
1464 c = cl.read(clnode)
1463 for f in c[3]:
1465 for f in c[3]:
1464 # This is to make sure we only have one instance of each
1466 # This is to make sure we only have one instance of each
1465 # filename string for each filename.
1467 # filename string for each filename.
1466 changedfileset.setdefault(f, f)
1468 changedfileset.setdefault(f, f)
1467 msng_mnfst_set.setdefault(c[0], clnode)
1469 msng_mnfst_set.setdefault(c[0], clnode)
1468 return collect_manifests_and_files
1470 return collect_manifests_and_files
1469
1471
1470 # Figure out which manifest nodes (of the ones we think might be part
1472 # Figure out which manifest nodes (of the ones we think might be part
1471 # of the changegroup) the recipient must know about and remove them
1473 # of the changegroup) the recipient must know about and remove them
1472 # from the changegroup.
1474 # from the changegroup.
1473 def prune_manifests():
1475 def prune_manifests():
1474 has_mnfst_set = {}
1476 has_mnfst_set = {}
1475 for n in msng_mnfst_set:
1477 for n in msng_mnfst_set:
1476 # If a 'missing' manifest thinks it belongs to a changenode
1478 # If a 'missing' manifest thinks it belongs to a changenode
1477 # the recipient is assumed to have, obviously the recipient
1479 # the recipient is assumed to have, obviously the recipient
1478 # must have that manifest.
1480 # must have that manifest.
1479 linknode = cl.node(mnfst.linkrev(n))
1481 linknode = cl.node(mnfst.linkrev(n))
1480 if linknode in has_cl_set:
1482 if linknode in has_cl_set:
1481 has_mnfst_set[n] = 1
1483 has_mnfst_set[n] = 1
1482 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1484 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1483
1485
1484 # Use the information collected in collect_manifests_and_files to say
1486 # Use the information collected in collect_manifests_and_files to say
1485 # which changenode any manifestnode belongs to.
1487 # which changenode any manifestnode belongs to.
1486 def lookup_manifest_link(mnfstnode):
1488 def lookup_manifest_link(mnfstnode):
1487 return msng_mnfst_set[mnfstnode]
1489 return msng_mnfst_set[mnfstnode]
1488
1490
1489 # A function generating function that sets up the initial environment
1491 # A function generating function that sets up the initial environment
1490 # the inner function.
1492 # the inner function.
1491 def filenode_collector(changedfiles):
1493 def filenode_collector(changedfiles):
1492 next_rev = [0]
1494 next_rev = [0]
1493 # This gathers information from each manifestnode included in the
1495 # This gathers information from each manifestnode included in the
1494 # changegroup about which filenodes the manifest node references
1496 # changegroup about which filenodes the manifest node references
1495 # so we can include those in the changegroup too.
1497 # so we can include those in the changegroup too.
1496 #
1498 #
1497 # It also remembers which changenode each filenode belongs to. It
1499 # It also remembers which changenode each filenode belongs to. It
1498 # does this by assuming the a filenode belongs to the changenode
1500 # does this by assuming the a filenode belongs to the changenode
1499 # the first manifest that references it belongs to.
1501 # the first manifest that references it belongs to.
1500 def collect_msng_filenodes(mnfstnode):
1502 def collect_msng_filenodes(mnfstnode):
1501 r = mnfst.rev(mnfstnode)
1503 r = mnfst.rev(mnfstnode)
1502 if r == next_rev[0]:
1504 if r == next_rev[0]:
1503 # If the last rev we looked at was the one just previous,
1505 # If the last rev we looked at was the one just previous,
1504 # we only need to see a diff.
1506 # we only need to see a diff.
1505 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1507 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1506 # For each line in the delta
1508 # For each line in the delta
1507 for dline in delta.splitlines():
1509 for dline in delta.splitlines():
1508 # get the filename and filenode for that line
1510 # get the filename and filenode for that line
1509 f, fnode = dline.split('\0')
1511 f, fnode = dline.split('\0')
1510 fnode = bin(fnode[:40])
1512 fnode = bin(fnode[:40])
1511 f = changedfiles.get(f, None)
1513 f = changedfiles.get(f, None)
1512 # And if the file is in the list of files we care
1514 # And if the file is in the list of files we care
1513 # about.
1515 # about.
1514 if f is not None:
1516 if f is not None:
1515 # Get the changenode this manifest belongs to
1517 # Get the changenode this manifest belongs to
1516 clnode = msng_mnfst_set[mnfstnode]
1518 clnode = msng_mnfst_set[mnfstnode]
1517 # Create the set of filenodes for the file if
1519 # Create the set of filenodes for the file if
1518 # there isn't one already.
1520 # there isn't one already.
1519 ndset = msng_filenode_set.setdefault(f, {})
1521 ndset = msng_filenode_set.setdefault(f, {})
1520 # And set the filenode's changelog node to the
1522 # And set the filenode's changelog node to the
1521 # manifest's if it hasn't been set already.
1523 # manifest's if it hasn't been set already.
1522 ndset.setdefault(fnode, clnode)
1524 ndset.setdefault(fnode, clnode)
1523 else:
1525 else:
1524 # Otherwise we need a full manifest.
1526 # Otherwise we need a full manifest.
1525 m = mnfst.read(mnfstnode)
1527 m = mnfst.read(mnfstnode)
1526 # For every file in we care about.
1528 # For every file in we care about.
1527 for f in changedfiles:
1529 for f in changedfiles:
1528 fnode = m.get(f, None)
1530 fnode = m.get(f, None)
1529 # If it's in the manifest
1531 # If it's in the manifest
1530 if fnode is not None:
1532 if fnode is not None:
1531 # See comments above.
1533 # See comments above.
1532 clnode = msng_mnfst_set[mnfstnode]
1534 clnode = msng_mnfst_set[mnfstnode]
1533 ndset = msng_filenode_set.setdefault(f, {})
1535 ndset = msng_filenode_set.setdefault(f, {})
1534 ndset.setdefault(fnode, clnode)
1536 ndset.setdefault(fnode, clnode)
1535 # Remember the revision we hope to see next.
1537 # Remember the revision we hope to see next.
1536 next_rev[0] = r + 1
1538 next_rev[0] = r + 1
1537 return collect_msng_filenodes
1539 return collect_msng_filenodes
1538
1540
1539 # We have a list of filenodes we think we need for a file, lets remove
1541 # We have a list of filenodes we think we need for a file, lets remove
1540 # all those we now the recipient must have.
1542 # all those we now the recipient must have.
1541 def prune_filenodes(f, filerevlog):
1543 def prune_filenodes(f, filerevlog):
1542 msngset = msng_filenode_set[f]
1544 msngset = msng_filenode_set[f]
1543 hasset = {}
1545 hasset = {}
1544 # If a 'missing' filenode thinks it belongs to a changenode we
1546 # If a 'missing' filenode thinks it belongs to a changenode we
1545 # assume the recipient must have, then the recipient must have
1547 # assume the recipient must have, then the recipient must have
1546 # that filenode.
1548 # that filenode.
1547 for n in msngset:
1549 for n in msngset:
1548 clnode = cl.node(filerevlog.linkrev(n))
1550 clnode = cl.node(filerevlog.linkrev(n))
1549 if clnode in has_cl_set:
1551 if clnode in has_cl_set:
1550 hasset[n] = 1
1552 hasset[n] = 1
1551 prune_parents(filerevlog, hasset, msngset)
1553 prune_parents(filerevlog, hasset, msngset)
1552
1554
1553 # A function generator function that sets up the a context for the
1555 # A function generator function that sets up the a context for the
1554 # inner function.
1556 # inner function.
1555 def lookup_filenode_link_func(fname):
1557 def lookup_filenode_link_func(fname):
1556 msngset = msng_filenode_set[fname]
1558 msngset = msng_filenode_set[fname]
1557 # Lookup the changenode the filenode belongs to.
1559 # Lookup the changenode the filenode belongs to.
1558 def lookup_filenode_link(fnode):
1560 def lookup_filenode_link(fnode):
1559 return msngset[fnode]
1561 return msngset[fnode]
1560 return lookup_filenode_link
1562 return lookup_filenode_link
1561
1563
1562 # Now that we have all theses utility functions to help out and
1564 # Now that we have all theses utility functions to help out and
1563 # logically divide up the task, generate the group.
1565 # logically divide up the task, generate the group.
1564 def gengroup():
1566 def gengroup():
1565 # The set of changed files starts empty.
1567 # The set of changed files starts empty.
1566 changedfiles = {}
1568 changedfiles = {}
1567 # Create a changenode group generator that will call our functions
1569 # Create a changenode group generator that will call our functions
1568 # back to lookup the owning changenode and collect information.
1570 # back to lookup the owning changenode and collect information.
1569 group = cl.group(msng_cl_lst, identity,
1571 group = cl.group(msng_cl_lst, identity,
1570 manifest_and_file_collector(changedfiles))
1572 manifest_and_file_collector(changedfiles))
1571 for chnk in group:
1573 for chnk in group:
1572 yield chnk
1574 yield chnk
1573
1575
1574 # The list of manifests has been collected by the generator
1576 # The list of manifests has been collected by the generator
1575 # calling our functions back.
1577 # calling our functions back.
1576 prune_manifests()
1578 prune_manifests()
1577 msng_mnfst_lst = msng_mnfst_set.keys()
1579 msng_mnfst_lst = msng_mnfst_set.keys()
1578 # Sort the manifestnodes by revision number.
1580 # Sort the manifestnodes by revision number.
1579 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1581 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1580 # Create a generator for the manifestnodes that calls our lookup
1582 # Create a generator for the manifestnodes that calls our lookup
1581 # and data collection functions back.
1583 # and data collection functions back.
1582 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1584 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1583 filenode_collector(changedfiles))
1585 filenode_collector(changedfiles))
1584 for chnk in group:
1586 for chnk in group:
1585 yield chnk
1587 yield chnk
1586
1588
1587 # These are no longer needed, dereference and toss the memory for
1589 # These are no longer needed, dereference and toss the memory for
1588 # them.
1590 # them.
1589 msng_mnfst_lst = None
1591 msng_mnfst_lst = None
1590 msng_mnfst_set.clear()
1592 msng_mnfst_set.clear()
1591
1593
1592 changedfiles = changedfiles.keys()
1594 changedfiles = changedfiles.keys()
1593 changedfiles.sort()
1595 changedfiles.sort()
1594 # Go through all our files in order sorted by name.
1596 # Go through all our files in order sorted by name.
1595 for fname in changedfiles:
1597 for fname in changedfiles:
1596 filerevlog = self.file(fname)
1598 filerevlog = self.file(fname)
1597 # Toss out the filenodes that the recipient isn't really
1599 # Toss out the filenodes that the recipient isn't really
1598 # missing.
1600 # missing.
1599 if msng_filenode_set.has_key(fname):
1601 if msng_filenode_set.has_key(fname):
1600 prune_filenodes(fname, filerevlog)
1602 prune_filenodes(fname, filerevlog)
1601 msng_filenode_lst = msng_filenode_set[fname].keys()
1603 msng_filenode_lst = msng_filenode_set[fname].keys()
1602 else:
1604 else:
1603 msng_filenode_lst = []
1605 msng_filenode_lst = []
1604 # If any filenodes are left, generate the group for them,
1606 # If any filenodes are left, generate the group for them,
1605 # otherwise don't bother.
1607 # otherwise don't bother.
1606 if len(msng_filenode_lst) > 0:
1608 if len(msng_filenode_lst) > 0:
1607 yield changegroup.genchunk(fname)
1609 yield changegroup.genchunk(fname)
1608 # Sort the filenodes by their revision #
1610 # Sort the filenodes by their revision #
1609 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1611 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1610 # Create a group generator and only pass in a changenode
1612 # Create a group generator and only pass in a changenode
1611 # lookup function as we need to collect no information
1613 # lookup function as we need to collect no information
1612 # from filenodes.
1614 # from filenodes.
1613 group = filerevlog.group(msng_filenode_lst,
1615 group = filerevlog.group(msng_filenode_lst,
1614 lookup_filenode_link_func(fname))
1616 lookup_filenode_link_func(fname))
1615 for chnk in group:
1617 for chnk in group:
1616 yield chnk
1618 yield chnk
1617 if msng_filenode_set.has_key(fname):
1619 if msng_filenode_set.has_key(fname):
1618 # Don't need this anymore, toss it to free memory.
1620 # Don't need this anymore, toss it to free memory.
1619 del msng_filenode_set[fname]
1621 del msng_filenode_set[fname]
1620 # Signal that no more groups are left.
1622 # Signal that no more groups are left.
1621 yield changegroup.closechunk()
1623 yield changegroup.closechunk()
1622
1624
1623 if msng_cl_lst:
1625 if msng_cl_lst:
1624 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1626 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1625
1627
1626 return util.chunkbuffer(gengroup())
1628 return util.chunkbuffer(gengroup())
1627
1629
1628 def changegroup(self, basenodes, source):
1630 def changegroup(self, basenodes, source):
1629 """Generate a changegroup of all nodes that we have that a recipient
1631 """Generate a changegroup of all nodes that we have that a recipient
1630 doesn't.
1632 doesn't.
1631
1633
1632 This is much easier than the previous function as we can assume that
1634 This is much easier than the previous function as we can assume that
1633 the recipient has any changenode we aren't sending them."""
1635 the recipient has any changenode we aren't sending them."""
1634
1636
1635 self.hook('preoutgoing', throw=True, source=source)
1637 self.hook('preoutgoing', throw=True, source=source)
1636
1638
1637 cl = self.changelog
1639 cl = self.changelog
1638 nodes = cl.nodesbetween(basenodes, None)[0]
1640 nodes = cl.nodesbetween(basenodes, None)[0]
1639 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1641 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1640 self.changegroupinfo(nodes)
1642 self.changegroupinfo(nodes)
1641
1643
1642 def identity(x):
1644 def identity(x):
1643 return x
1645 return x
1644
1646
1645 def gennodelst(revlog):
1647 def gennodelst(revlog):
1646 for r in xrange(0, revlog.count()):
1648 for r in xrange(0, revlog.count()):
1647 n = revlog.node(r)
1649 n = revlog.node(r)
1648 if revlog.linkrev(n) in revset:
1650 if revlog.linkrev(n) in revset:
1649 yield n
1651 yield n
1650
1652
1651 def changed_file_collector(changedfileset):
1653 def changed_file_collector(changedfileset):
1652 def collect_changed_files(clnode):
1654 def collect_changed_files(clnode):
1653 c = cl.read(clnode)
1655 c = cl.read(clnode)
1654 for fname in c[3]:
1656 for fname in c[3]:
1655 changedfileset[fname] = 1
1657 changedfileset[fname] = 1
1656 return collect_changed_files
1658 return collect_changed_files
1657
1659
1658 def lookuprevlink_func(revlog):
1660 def lookuprevlink_func(revlog):
1659 def lookuprevlink(n):
1661 def lookuprevlink(n):
1660 return cl.node(revlog.linkrev(n))
1662 return cl.node(revlog.linkrev(n))
1661 return lookuprevlink
1663 return lookuprevlink
1662
1664
1663 def gengroup():
1665 def gengroup():
1664 # construct a list of all changed files
1666 # construct a list of all changed files
1665 changedfiles = {}
1667 changedfiles = {}
1666
1668
1667 for chnk in cl.group(nodes, identity,
1669 for chnk in cl.group(nodes, identity,
1668 changed_file_collector(changedfiles)):
1670 changed_file_collector(changedfiles)):
1669 yield chnk
1671 yield chnk
1670 changedfiles = changedfiles.keys()
1672 changedfiles = changedfiles.keys()
1671 changedfiles.sort()
1673 changedfiles.sort()
1672
1674
1673 mnfst = self.manifest
1675 mnfst = self.manifest
1674 nodeiter = gennodelst(mnfst)
1676 nodeiter = gennodelst(mnfst)
1675 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1677 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1676 yield chnk
1678 yield chnk
1677
1679
1678 for fname in changedfiles:
1680 for fname in changedfiles:
1679 filerevlog = self.file(fname)
1681 filerevlog = self.file(fname)
1680 nodeiter = gennodelst(filerevlog)
1682 nodeiter = gennodelst(filerevlog)
1681 nodeiter = list(nodeiter)
1683 nodeiter = list(nodeiter)
1682 if nodeiter:
1684 if nodeiter:
1683 yield changegroup.genchunk(fname)
1685 yield changegroup.genchunk(fname)
1684 lookup = lookuprevlink_func(filerevlog)
1686 lookup = lookuprevlink_func(filerevlog)
1685 for chnk in filerevlog.group(nodeiter, lookup):
1687 for chnk in filerevlog.group(nodeiter, lookup):
1686 yield chnk
1688 yield chnk
1687
1689
1688 yield changegroup.closechunk()
1690 yield changegroup.closechunk()
1689
1691
1690 if nodes:
1692 if nodes:
1691 self.hook('outgoing', node=hex(nodes[0]), source=source)
1693 self.hook('outgoing', node=hex(nodes[0]), source=source)
1692
1694
1693 return util.chunkbuffer(gengroup())
1695 return util.chunkbuffer(gengroup())
1694
1696
1695 def addchangegroup(self, source, srctype, url):
1697 def addchangegroup(self, source, srctype, url):
1696 """add changegroup to repo.
1698 """add changegroup to repo.
1697 returns number of heads modified or added + 1."""
1699 returns number of heads modified or added + 1."""
1698
1700
1699 def csmap(x):
1701 def csmap(x):
1700 self.ui.debug(_("add changeset %s\n") % short(x))
1702 self.ui.debug(_("add changeset %s\n") % short(x))
1701 return cl.count()
1703 return cl.count()
1702
1704
1703 def revmap(x):
1705 def revmap(x):
1704 return cl.rev(x)
1706 return cl.rev(x)
1705
1707
1706 if not source:
1708 if not source:
1707 return 0
1709 return 0
1708
1710
1709 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1711 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1710
1712
1711 changesets = files = revisions = 0
1713 changesets = files = revisions = 0
1712
1714
1713 tr = self.transaction()
1715 tr = self.transaction()
1714
1716
1715 # write changelog data to temp files so concurrent readers will not see
1717 # write changelog data to temp files so concurrent readers will not see
1716 # inconsistent view
1718 # inconsistent view
1717 cl = None
1719 cl = None
1718 try:
1720 try:
1719 cl = appendfile.appendchangelog(self.sopener,
1721 cl = appendfile.appendchangelog(self.sopener,
1720 self.changelog.version)
1722 self.changelog.version)
1721
1723
1722 oldheads = len(cl.heads())
1724 oldheads = len(cl.heads())
1723
1725
1724 # pull off the changeset group
1726 # pull off the changeset group
1725 self.ui.status(_("adding changesets\n"))
1727 self.ui.status(_("adding changesets\n"))
1726 cor = cl.count() - 1
1728 cor = cl.count() - 1
1727 chunkiter = changegroup.chunkiter(source)
1729 chunkiter = changegroup.chunkiter(source)
1728 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1730 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1729 raise util.Abort(_("received changelog group is empty"))
1731 raise util.Abort(_("received changelog group is empty"))
1730 cnr = cl.count() - 1
1732 cnr = cl.count() - 1
1731 changesets = cnr - cor
1733 changesets = cnr - cor
1732
1734
1733 # pull off the manifest group
1735 # pull off the manifest group
1734 self.ui.status(_("adding manifests\n"))
1736 self.ui.status(_("adding manifests\n"))
1735 chunkiter = changegroup.chunkiter(source)
1737 chunkiter = changegroup.chunkiter(source)
1736 # no need to check for empty manifest group here:
1738 # no need to check for empty manifest group here:
1737 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1739 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1738 # no new manifest will be created and the manifest group will
1740 # no new manifest will be created and the manifest group will
1739 # be empty during the pull
1741 # be empty during the pull
1740 self.manifest.addgroup(chunkiter, revmap, tr)
1742 self.manifest.addgroup(chunkiter, revmap, tr)
1741
1743
1742 # process the files
1744 # process the files
1743 self.ui.status(_("adding file changes\n"))
1745 self.ui.status(_("adding file changes\n"))
1744 while 1:
1746 while 1:
1745 f = changegroup.getchunk(source)
1747 f = changegroup.getchunk(source)
1746 if not f:
1748 if not f:
1747 break
1749 break
1748 self.ui.debug(_("adding %s revisions\n") % f)
1750 self.ui.debug(_("adding %s revisions\n") % f)
1749 fl = self.file(f)
1751 fl = self.file(f)
1750 o = fl.count()
1752 o = fl.count()
1751 chunkiter = changegroup.chunkiter(source)
1753 chunkiter = changegroup.chunkiter(source)
1752 if fl.addgroup(chunkiter, revmap, tr) is None:
1754 if fl.addgroup(chunkiter, revmap, tr) is None:
1753 raise util.Abort(_("received file revlog group is empty"))
1755 raise util.Abort(_("received file revlog group is empty"))
1754 revisions += fl.count() - o
1756 revisions += fl.count() - o
1755 files += 1
1757 files += 1
1756
1758
1757 cl.writedata()
1759 cl.writedata()
1758 finally:
1760 finally:
1759 if cl:
1761 if cl:
1760 cl.cleanup()
1762 cl.cleanup()
1761
1763
1762 # make changelog see real files again
1764 # make changelog see real files again
1763 self.changelog = changelog.changelog(self.sopener,
1765 self.changelog = changelog.changelog(self.sopener,
1764 self.changelog.version)
1766 self.changelog.version)
1765 self.changelog.checkinlinesize(tr)
1767 self.changelog.checkinlinesize(tr)
1766
1768
1767 newheads = len(self.changelog.heads())
1769 newheads = len(self.changelog.heads())
1768 heads = ""
1770 heads = ""
1769 if oldheads and newheads != oldheads:
1771 if oldheads and newheads != oldheads:
1770 heads = _(" (%+d heads)") % (newheads - oldheads)
1772 heads = _(" (%+d heads)") % (newheads - oldheads)
1771
1773
1772 self.ui.status(_("added %d changesets"
1774 self.ui.status(_("added %d changesets"
1773 " with %d changes to %d files%s\n")
1775 " with %d changes to %d files%s\n")
1774 % (changesets, revisions, files, heads))
1776 % (changesets, revisions, files, heads))
1775
1777
1776 if changesets > 0:
1778 if changesets > 0:
1777 self.hook('pretxnchangegroup', throw=True,
1779 self.hook('pretxnchangegroup', throw=True,
1778 node=hex(self.changelog.node(cor+1)), source=srctype,
1780 node=hex(self.changelog.node(cor+1)), source=srctype,
1779 url=url)
1781 url=url)
1780
1782
1781 tr.close()
1783 tr.close()
1782
1784
1783 if changesets > 0:
1785 if changesets > 0:
1784 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1786 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1785 source=srctype, url=url)
1787 source=srctype, url=url)
1786
1788
1787 for i in xrange(cor + 1, cnr + 1):
1789 for i in xrange(cor + 1, cnr + 1):
1788 self.hook("incoming", node=hex(self.changelog.node(i)),
1790 self.hook("incoming", node=hex(self.changelog.node(i)),
1789 source=srctype, url=url)
1791 source=srctype, url=url)
1790
1792
1791 return newheads - oldheads + 1
1793 return newheads - oldheads + 1
1792
1794
1793
1795
1794 def stream_in(self, remote):
1796 def stream_in(self, remote):
1795 fp = remote.stream_out()
1797 fp = remote.stream_out()
1796 l = fp.readline()
1798 l = fp.readline()
1797 try:
1799 try:
1798 resp = int(l)
1800 resp = int(l)
1799 except ValueError:
1801 except ValueError:
1800 raise util.UnexpectedOutput(
1802 raise util.UnexpectedOutput(
1801 _('Unexpected response from remote server:'), l)
1803 _('Unexpected response from remote server:'), l)
1802 if resp != 0:
1804 if resp != 0:
1803 raise util.Abort(_('operation forbidden by server'))
1805 raise util.Abort(_('operation forbidden by server'))
1804 self.ui.status(_('streaming all changes\n'))
1806 self.ui.status(_('streaming all changes\n'))
1805 l = fp.readline()
1807 l = fp.readline()
1806 try:
1808 try:
1807 total_files, total_bytes = map(int, l.split(' ', 1))
1809 total_files, total_bytes = map(int, l.split(' ', 1))
1808 except ValueError, TypeError:
1810 except ValueError, TypeError:
1809 raise util.UnexpectedOutput(
1811 raise util.UnexpectedOutput(
1810 _('Unexpected response from remote server:'), l)
1812 _('Unexpected response from remote server:'), l)
1811 self.ui.status(_('%d files to transfer, %s of data\n') %
1813 self.ui.status(_('%d files to transfer, %s of data\n') %
1812 (total_files, util.bytecount(total_bytes)))
1814 (total_files, util.bytecount(total_bytes)))
1813 start = time.time()
1815 start = time.time()
1814 for i in xrange(total_files):
1816 for i in xrange(total_files):
1815 l = fp.readline()
1817 l = fp.readline()
1816 try:
1818 try:
1817 name, size = l.split('\0', 1)
1819 name, size = l.split('\0', 1)
1818 size = int(size)
1820 size = int(size)
1819 except ValueError, TypeError:
1821 except ValueError, TypeError:
1820 raise util.UnexpectedOutput(
1822 raise util.UnexpectedOutput(
1821 _('Unexpected response from remote server:'), l)
1823 _('Unexpected response from remote server:'), l)
1822 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1824 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1823 ofp = self.sopener(name, 'w')
1825 ofp = self.sopener(name, 'w')
1824 for chunk in util.filechunkiter(fp, limit=size):
1826 for chunk in util.filechunkiter(fp, limit=size):
1825 ofp.write(chunk)
1827 ofp.write(chunk)
1826 ofp.close()
1828 ofp.close()
1827 elapsed = time.time() - start
1829 elapsed = time.time() - start
1828 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1830 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1829 (util.bytecount(total_bytes), elapsed,
1831 (util.bytecount(total_bytes), elapsed,
1830 util.bytecount(total_bytes / elapsed)))
1832 util.bytecount(total_bytes / elapsed)))
1831 self.reload()
1833 self.reload()
1832 return len(self.heads()) + 1
1834 return len(self.heads()) + 1
1833
1835
1834 def clone(self, remote, heads=[], stream=False):
1836 def clone(self, remote, heads=[], stream=False):
1835 '''clone remote repository.
1837 '''clone remote repository.
1836
1838
1837 keyword arguments:
1839 keyword arguments:
1838 heads: list of revs to clone (forces use of pull)
1840 heads: list of revs to clone (forces use of pull)
1839 stream: use streaming clone if possible'''
1841 stream: use streaming clone if possible'''
1840
1842
1841 # now, all clients that can request uncompressed clones can
1843 # now, all clients that can request uncompressed clones can
1842 # read repo formats supported by all servers that can serve
1844 # read repo formats supported by all servers that can serve
1843 # them.
1845 # them.
1844
1846
1845 # if revlog format changes, client will have to check version
1847 # if revlog format changes, client will have to check version
1846 # and format flags on "stream" capability, and use
1848 # and format flags on "stream" capability, and use
1847 # uncompressed only if compatible.
1849 # uncompressed only if compatible.
1848
1850
1849 if stream and not heads and remote.capable('stream'):
1851 if stream and not heads and remote.capable('stream'):
1850 return self.stream_in(remote)
1852 return self.stream_in(remote)
1851 return self.pull(remote, heads)
1853 return self.pull(remote, heads)
1852
1854
1853 # used to avoid circular references so destructors work
1855 # used to avoid circular references so destructors work
1854 def aftertrans(base):
1856 def aftertrans(base):
1855 p = base
1857 p = base
1856 def a():
1858 def a():
1857 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1859 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1858 util.rename(os.path.join(p, "journal.dirstate"),
1860 util.rename(os.path.join(p, "journal.dirstate"),
1859 os.path.join(p, "undo.dirstate"))
1861 os.path.join(p, "undo.dirstate"))
1860 return a
1862 return a
1861
1863
1862 def instance(ui, path, create):
1864 def instance(ui, path, create):
1863 return localrepository(ui, util.drop_scheme('file', path), create)
1865 return localrepository(ui, util.drop_scheme('file', path), create)
1864
1866
1865 def islocal(path):
1867 def islocal(path):
1866 return True
1868 return True
General Comments 0
You need to be logged in to leave comments. Login now