##// END OF EJS Templates
Corrected "waiting for lock on repository FOO held by BAR" message....
Thomas Arendsen Hein -
r3688:d92dad35 default
parent child Browse files
Show More
@@ -1,1896 +1,1896 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import gettext as _
9 from i18n import gettext as _
10 from demandload import *
10 from demandload import *
11 import repo
11 import repo
12 demandload(globals(), "appendfile changegroup")
12 demandload(globals(), "appendfile changegroup")
13 demandload(globals(), "changelog dirstate filelog manifest context")
13 demandload(globals(), "changelog dirstate filelog manifest context")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 demandload(globals(), "os revlog time util")
15 demandload(globals(), "os revlog time util")
16
16
17 class localrepository(repo.repository):
17 class localrepository(repo.repository):
18 capabilities = ('lookup', 'changegroupsubset')
18 capabilities = ('lookup', 'changegroupsubset')
19
19
20 def __del__(self):
20 def __del__(self):
21 self.transhandle = None
21 self.transhandle = None
22 def __init__(self, parentui, path=None, create=0):
22 def __init__(self, parentui, path=None, create=0):
23 repo.repository.__init__(self)
23 repo.repository.__init__(self)
24 if not path:
24 if not path:
25 p = os.getcwd()
25 p = os.getcwd()
26 while not os.path.isdir(os.path.join(p, ".hg")):
26 while not os.path.isdir(os.path.join(p, ".hg")):
27 oldp = p
27 oldp = p
28 p = os.path.dirname(p)
28 p = os.path.dirname(p)
29 if p == oldp:
29 if p == oldp:
30 raise repo.RepoError(_("There is no Mercurial repository"
30 raise repo.RepoError(_("There is no Mercurial repository"
31 " here (.hg not found)"))
31 " here (.hg not found)"))
32 path = p
32 path = p
33 self.path = os.path.join(path, ".hg")
33 self.path = os.path.join(path, ".hg")
34
34
35 if not os.path.isdir(self.path):
35 if not os.path.isdir(self.path):
36 if create:
36 if create:
37 if not os.path.exists(path):
37 if not os.path.exists(path):
38 os.mkdir(path)
38 os.mkdir(path)
39 os.mkdir(self.path)
39 os.mkdir(self.path)
40 os.mkdir(self.join("data"))
40 os.mkdir(self.join("data"))
41 else:
41 else:
42 raise repo.RepoError(_("repository %s not found") % path)
42 raise repo.RepoError(_("repository %s not found") % path)
43 elif create:
43 elif create:
44 raise repo.RepoError(_("repository %s already exists") % path)
44 raise repo.RepoError(_("repository %s already exists") % path)
45
45
46 self.root = os.path.realpath(path)
46 self.root = os.path.realpath(path)
47 self.origroot = path
47 self.origroot = path
48 self.ui = ui.ui(parentui=parentui)
48 self.ui = ui.ui(parentui=parentui)
49 self.opener = util.opener(self.path)
49 self.opener = util.opener(self.path)
50 self.sopener = util.opener(self.path)
50 self.sopener = util.opener(self.path)
51 self.wopener = util.opener(self.root)
51 self.wopener = util.opener(self.root)
52
52
53 try:
53 try:
54 self.ui.readconfig(self.join("hgrc"), self.root)
54 self.ui.readconfig(self.join("hgrc"), self.root)
55 except IOError:
55 except IOError:
56 pass
56 pass
57
57
58 v = self.ui.configrevlog()
58 v = self.ui.configrevlog()
59 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
59 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
60 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
60 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
61 fl = v.get('flags', None)
61 fl = v.get('flags', None)
62 flags = 0
62 flags = 0
63 if fl != None:
63 if fl != None:
64 for x in fl.split():
64 for x in fl.split():
65 flags |= revlog.flagstr(x)
65 flags |= revlog.flagstr(x)
66 elif self.revlogv1:
66 elif self.revlogv1:
67 flags = revlog.REVLOG_DEFAULT_FLAGS
67 flags = revlog.REVLOG_DEFAULT_FLAGS
68
68
69 v = self.revlogversion | flags
69 v = self.revlogversion | flags
70 self.manifest = manifest.manifest(self.sopener, v)
70 self.manifest = manifest.manifest(self.sopener, v)
71 self.changelog = changelog.changelog(self.sopener, v)
71 self.changelog = changelog.changelog(self.sopener, v)
72
72
73 # the changelog might not have the inline index flag
73 # the changelog might not have the inline index flag
74 # on. If the format of the changelog is the same as found in
74 # on. If the format of the changelog is the same as found in
75 # .hgrc, apply any flags found in the .hgrc as well.
75 # .hgrc, apply any flags found in the .hgrc as well.
76 # Otherwise, just version from the changelog
76 # Otherwise, just version from the changelog
77 v = self.changelog.version
77 v = self.changelog.version
78 if v == self.revlogversion:
78 if v == self.revlogversion:
79 v |= flags
79 v |= flags
80 self.revlogversion = v
80 self.revlogversion = v
81
81
82 self.tagscache = None
82 self.tagscache = None
83 self.branchcache = None
83 self.branchcache = None
84 self.nodetagscache = None
84 self.nodetagscache = None
85 self.encodepats = None
85 self.encodepats = None
86 self.decodepats = None
86 self.decodepats = None
87 self.transhandle = None
87 self.transhandle = None
88
88
89 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
89 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
90
90
91 def url(self):
91 def url(self):
92 return 'file:' + self.root
92 return 'file:' + self.root
93
93
94 def hook(self, name, throw=False, **args):
94 def hook(self, name, throw=False, **args):
95 def callhook(hname, funcname):
95 def callhook(hname, funcname):
96 '''call python hook. hook is callable object, looked up as
96 '''call python hook. hook is callable object, looked up as
97 name in python module. if callable returns "true", hook
97 name in python module. if callable returns "true", hook
98 fails, else passes. if hook raises exception, treated as
98 fails, else passes. if hook raises exception, treated as
99 hook failure. exception propagates if throw is "true".
99 hook failure. exception propagates if throw is "true".
100
100
101 reason for "true" meaning "hook failed" is so that
101 reason for "true" meaning "hook failed" is so that
102 unmodified commands (e.g. mercurial.commands.update) can
102 unmodified commands (e.g. mercurial.commands.update) can
103 be run as hooks without wrappers to convert return values.'''
103 be run as hooks without wrappers to convert return values.'''
104
104
105 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
105 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
106 d = funcname.rfind('.')
106 d = funcname.rfind('.')
107 if d == -1:
107 if d == -1:
108 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
108 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
109 % (hname, funcname))
109 % (hname, funcname))
110 modname = funcname[:d]
110 modname = funcname[:d]
111 try:
111 try:
112 obj = __import__(modname)
112 obj = __import__(modname)
113 except ImportError:
113 except ImportError:
114 try:
114 try:
115 # extensions are loaded with hgext_ prefix
115 # extensions are loaded with hgext_ prefix
116 obj = __import__("hgext_%s" % modname)
116 obj = __import__("hgext_%s" % modname)
117 except ImportError:
117 except ImportError:
118 raise util.Abort(_('%s hook is invalid '
118 raise util.Abort(_('%s hook is invalid '
119 '(import of "%s" failed)') %
119 '(import of "%s" failed)') %
120 (hname, modname))
120 (hname, modname))
121 try:
121 try:
122 for p in funcname.split('.')[1:]:
122 for p in funcname.split('.')[1:]:
123 obj = getattr(obj, p)
123 obj = getattr(obj, p)
124 except AttributeError, err:
124 except AttributeError, err:
125 raise util.Abort(_('%s hook is invalid '
125 raise util.Abort(_('%s hook is invalid '
126 '("%s" is not defined)') %
126 '("%s" is not defined)') %
127 (hname, funcname))
127 (hname, funcname))
128 if not callable(obj):
128 if not callable(obj):
129 raise util.Abort(_('%s hook is invalid '
129 raise util.Abort(_('%s hook is invalid '
130 '("%s" is not callable)') %
130 '("%s" is not callable)') %
131 (hname, funcname))
131 (hname, funcname))
132 try:
132 try:
133 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
133 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
134 except (KeyboardInterrupt, util.SignalInterrupt):
134 except (KeyboardInterrupt, util.SignalInterrupt):
135 raise
135 raise
136 except Exception, exc:
136 except Exception, exc:
137 if isinstance(exc, util.Abort):
137 if isinstance(exc, util.Abort):
138 self.ui.warn(_('error: %s hook failed: %s\n') %
138 self.ui.warn(_('error: %s hook failed: %s\n') %
139 (hname, exc.args[0]))
139 (hname, exc.args[0]))
140 else:
140 else:
141 self.ui.warn(_('error: %s hook raised an exception: '
141 self.ui.warn(_('error: %s hook raised an exception: '
142 '%s\n') % (hname, exc))
142 '%s\n') % (hname, exc))
143 if throw:
143 if throw:
144 raise
144 raise
145 self.ui.print_exc()
145 self.ui.print_exc()
146 return True
146 return True
147 if r:
147 if r:
148 if throw:
148 if throw:
149 raise util.Abort(_('%s hook failed') % hname)
149 raise util.Abort(_('%s hook failed') % hname)
150 self.ui.warn(_('warning: %s hook failed\n') % hname)
150 self.ui.warn(_('warning: %s hook failed\n') % hname)
151 return r
151 return r
152
152
153 def runhook(name, cmd):
153 def runhook(name, cmd):
154 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
154 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
155 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
155 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
156 r = util.system(cmd, environ=env, cwd=self.root)
156 r = util.system(cmd, environ=env, cwd=self.root)
157 if r:
157 if r:
158 desc, r = util.explain_exit(r)
158 desc, r = util.explain_exit(r)
159 if throw:
159 if throw:
160 raise util.Abort(_('%s hook %s') % (name, desc))
160 raise util.Abort(_('%s hook %s') % (name, desc))
161 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
161 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
162 return r
162 return r
163
163
164 r = False
164 r = False
165 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
165 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
166 if hname.split(".", 1)[0] == name and cmd]
166 if hname.split(".", 1)[0] == name and cmd]
167 hooks.sort()
167 hooks.sort()
168 for hname, cmd in hooks:
168 for hname, cmd in hooks:
169 if cmd.startswith('python:'):
169 if cmd.startswith('python:'):
170 r = callhook(hname, cmd[7:].strip()) or r
170 r = callhook(hname, cmd[7:].strip()) or r
171 else:
171 else:
172 r = runhook(hname, cmd) or r
172 r = runhook(hname, cmd) or r
173 return r
173 return r
174
174
175 tag_disallowed = ':\r\n'
175 tag_disallowed = ':\r\n'
176
176
177 def tag(self, name, node, message, local, user, date):
177 def tag(self, name, node, message, local, user, date):
178 '''tag a revision with a symbolic name.
178 '''tag a revision with a symbolic name.
179
179
180 if local is True, the tag is stored in a per-repository file.
180 if local is True, the tag is stored in a per-repository file.
181 otherwise, it is stored in the .hgtags file, and a new
181 otherwise, it is stored in the .hgtags file, and a new
182 changeset is committed with the change.
182 changeset is committed with the change.
183
183
184 keyword arguments:
184 keyword arguments:
185
185
186 local: whether to store tag in non-version-controlled file
186 local: whether to store tag in non-version-controlled file
187 (default False)
187 (default False)
188
188
189 message: commit message to use if committing
189 message: commit message to use if committing
190
190
191 user: name of user to use if committing
191 user: name of user to use if committing
192
192
193 date: date tuple to use if committing'''
193 date: date tuple to use if committing'''
194
194
195 for c in self.tag_disallowed:
195 for c in self.tag_disallowed:
196 if c in name:
196 if c in name:
197 raise util.Abort(_('%r cannot be used in a tag name') % c)
197 raise util.Abort(_('%r cannot be used in a tag name') % c)
198
198
199 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
199 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
200
200
201 if local:
201 if local:
202 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
202 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
203 self.hook('tag', node=hex(node), tag=name, local=local)
203 self.hook('tag', node=hex(node), tag=name, local=local)
204 return
204 return
205
205
206 for x in self.status()[:5]:
206 for x in self.status()[:5]:
207 if '.hgtags' in x:
207 if '.hgtags' in x:
208 raise util.Abort(_('working copy of .hgtags is changed '
208 raise util.Abort(_('working copy of .hgtags is changed '
209 '(please commit .hgtags manually)'))
209 '(please commit .hgtags manually)'))
210
210
211 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
211 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
212 if self.dirstate.state('.hgtags') == '?':
212 if self.dirstate.state('.hgtags') == '?':
213 self.add(['.hgtags'])
213 self.add(['.hgtags'])
214
214
215 self.commit(['.hgtags'], message, user, date)
215 self.commit(['.hgtags'], message, user, date)
216 self.hook('tag', node=hex(node), tag=name, local=local)
216 self.hook('tag', node=hex(node), tag=name, local=local)
217
217
218 def tags(self):
218 def tags(self):
219 '''return a mapping of tag to node'''
219 '''return a mapping of tag to node'''
220 if not self.tagscache:
220 if not self.tagscache:
221 self.tagscache = {}
221 self.tagscache = {}
222
222
223 def parsetag(line, context):
223 def parsetag(line, context):
224 if not line:
224 if not line:
225 return
225 return
226 s = l.split(" ", 1)
226 s = l.split(" ", 1)
227 if len(s) != 2:
227 if len(s) != 2:
228 self.ui.warn(_("%s: cannot parse entry\n") % context)
228 self.ui.warn(_("%s: cannot parse entry\n") % context)
229 return
229 return
230 node, key = s
230 node, key = s
231 key = key.strip()
231 key = key.strip()
232 try:
232 try:
233 bin_n = bin(node)
233 bin_n = bin(node)
234 except TypeError:
234 except TypeError:
235 self.ui.warn(_("%s: node '%s' is not well formed\n") %
235 self.ui.warn(_("%s: node '%s' is not well formed\n") %
236 (context, node))
236 (context, node))
237 return
237 return
238 if bin_n not in self.changelog.nodemap:
238 if bin_n not in self.changelog.nodemap:
239 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
239 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
240 (context, key))
240 (context, key))
241 return
241 return
242 self.tagscache[key] = bin_n
242 self.tagscache[key] = bin_n
243
243
244 # read the tags file from each head, ending with the tip,
244 # read the tags file from each head, ending with the tip,
245 # and add each tag found to the map, with "newer" ones
245 # and add each tag found to the map, with "newer" ones
246 # taking precedence
246 # taking precedence
247 f = None
247 f = None
248 for rev, node, fnode in self._hgtagsnodes():
248 for rev, node, fnode in self._hgtagsnodes():
249 f = (f and f.filectx(fnode) or
249 f = (f and f.filectx(fnode) or
250 self.filectx('.hgtags', fileid=fnode))
250 self.filectx('.hgtags', fileid=fnode))
251 count = 0
251 count = 0
252 for l in f.data().splitlines():
252 for l in f.data().splitlines():
253 count += 1
253 count += 1
254 parsetag(l, _("%s, line %d") % (str(f), count))
254 parsetag(l, _("%s, line %d") % (str(f), count))
255
255
256 try:
256 try:
257 f = self.opener("localtags")
257 f = self.opener("localtags")
258 count = 0
258 count = 0
259 for l in f:
259 for l in f:
260 count += 1
260 count += 1
261 parsetag(l, _("localtags, line %d") % count)
261 parsetag(l, _("localtags, line %d") % count)
262 except IOError:
262 except IOError:
263 pass
263 pass
264
264
265 self.tagscache['tip'] = self.changelog.tip()
265 self.tagscache['tip'] = self.changelog.tip()
266
266
267 return self.tagscache
267 return self.tagscache
268
268
269 def _hgtagsnodes(self):
269 def _hgtagsnodes(self):
270 heads = self.heads()
270 heads = self.heads()
271 heads.reverse()
271 heads.reverse()
272 last = {}
272 last = {}
273 ret = []
273 ret = []
274 for node in heads:
274 for node in heads:
275 c = self.changectx(node)
275 c = self.changectx(node)
276 rev = c.rev()
276 rev = c.rev()
277 try:
277 try:
278 fnode = c.filenode('.hgtags')
278 fnode = c.filenode('.hgtags')
279 except repo.LookupError:
279 except repo.LookupError:
280 continue
280 continue
281 ret.append((rev, node, fnode))
281 ret.append((rev, node, fnode))
282 if fnode in last:
282 if fnode in last:
283 ret[last[fnode]] = None
283 ret[last[fnode]] = None
284 last[fnode] = len(ret) - 1
284 last[fnode] = len(ret) - 1
285 return [item for item in ret if item]
285 return [item for item in ret if item]
286
286
287 def tagslist(self):
287 def tagslist(self):
288 '''return a list of tags ordered by revision'''
288 '''return a list of tags ordered by revision'''
289 l = []
289 l = []
290 for t, n in self.tags().items():
290 for t, n in self.tags().items():
291 try:
291 try:
292 r = self.changelog.rev(n)
292 r = self.changelog.rev(n)
293 except:
293 except:
294 r = -2 # sort to the beginning of the list if unknown
294 r = -2 # sort to the beginning of the list if unknown
295 l.append((r, t, n))
295 l.append((r, t, n))
296 l.sort()
296 l.sort()
297 return [(t, n) for r, t, n in l]
297 return [(t, n) for r, t, n in l]
298
298
299 def nodetags(self, node):
299 def nodetags(self, node):
300 '''return the tags associated with a node'''
300 '''return the tags associated with a node'''
301 if not self.nodetagscache:
301 if not self.nodetagscache:
302 self.nodetagscache = {}
302 self.nodetagscache = {}
303 for t, n in self.tags().items():
303 for t, n in self.tags().items():
304 self.nodetagscache.setdefault(n, []).append(t)
304 self.nodetagscache.setdefault(n, []).append(t)
305 return self.nodetagscache.get(node, [])
305 return self.nodetagscache.get(node, [])
306
306
307 def branchtags(self):
307 def branchtags(self):
308 if self.branchcache != None:
308 if self.branchcache != None:
309 return self.branchcache
309 return self.branchcache
310
310
311 self.branchcache = {} # avoid recursion in changectx
311 self.branchcache = {} # avoid recursion in changectx
312
312
313 partial, last, lrev = self._readbranchcache()
313 partial, last, lrev = self._readbranchcache()
314
314
315 tiprev = self.changelog.count() - 1
315 tiprev = self.changelog.count() - 1
316 if lrev != tiprev:
316 if lrev != tiprev:
317 self._updatebranchcache(partial, lrev+1, tiprev+1)
317 self._updatebranchcache(partial, lrev+1, tiprev+1)
318 self._writebranchcache(partial, self.changelog.tip(), tiprev)
318 self._writebranchcache(partial, self.changelog.tip(), tiprev)
319
319
320 self.branchcache = partial
320 self.branchcache = partial
321 return self.branchcache
321 return self.branchcache
322
322
323 def _readbranchcache(self):
323 def _readbranchcache(self):
324 partial = {}
324 partial = {}
325 try:
325 try:
326 f = self.opener("branches.cache")
326 f = self.opener("branches.cache")
327 lines = f.read().split('\n')
327 lines = f.read().split('\n')
328 f.close()
328 f.close()
329 last, lrev = lines.pop(0).rstrip().split(" ", 1)
329 last, lrev = lines.pop(0).rstrip().split(" ", 1)
330 last, lrev = bin(last), int(lrev)
330 last, lrev = bin(last), int(lrev)
331 if (lrev < self.changelog.count() and
331 if (lrev < self.changelog.count() and
332 self.changelog.node(lrev) == last): # sanity check
332 self.changelog.node(lrev) == last): # sanity check
333 for l in lines:
333 for l in lines:
334 if not l: continue
334 if not l: continue
335 node, label = l.rstrip().split(" ", 1)
335 node, label = l.rstrip().split(" ", 1)
336 partial[label] = bin(node)
336 partial[label] = bin(node)
337 else: # invalidate the cache
337 else: # invalidate the cache
338 last, lrev = nullid, nullrev
338 last, lrev = nullid, nullrev
339 except IOError:
339 except IOError:
340 last, lrev = nullid, nullrev
340 last, lrev = nullid, nullrev
341 return partial, last, lrev
341 return partial, last, lrev
342
342
343 def _writebranchcache(self, branches, tip, tiprev):
343 def _writebranchcache(self, branches, tip, tiprev):
344 try:
344 try:
345 f = self.opener("branches.cache", "w")
345 f = self.opener("branches.cache", "w")
346 f.write("%s %s\n" % (hex(tip), tiprev))
346 f.write("%s %s\n" % (hex(tip), tiprev))
347 for label, node in branches.iteritems():
347 for label, node in branches.iteritems():
348 f.write("%s %s\n" % (hex(node), label))
348 f.write("%s %s\n" % (hex(node), label))
349 except IOError:
349 except IOError:
350 pass
350 pass
351
351
352 def _updatebranchcache(self, partial, start, end):
352 def _updatebranchcache(self, partial, start, end):
353 for r in xrange(start, end):
353 for r in xrange(start, end):
354 c = self.changectx(r)
354 c = self.changectx(r)
355 b = c.branch()
355 b = c.branch()
356 if b:
356 if b:
357 partial[b] = c.node()
357 partial[b] = c.node()
358
358
359 def lookup(self, key):
359 def lookup(self, key):
360 if key == '.':
360 if key == '.':
361 key = self.dirstate.parents()[0]
361 key = self.dirstate.parents()[0]
362 if key == nullid:
362 if key == nullid:
363 raise repo.RepoError(_("no revision checked out"))
363 raise repo.RepoError(_("no revision checked out"))
364 n = self.changelog._match(key)
364 n = self.changelog._match(key)
365 if n:
365 if n:
366 return n
366 return n
367 if key in self.tags():
367 if key in self.tags():
368 return self.tags()[key]
368 return self.tags()[key]
369 if key in self.branchtags():
369 if key in self.branchtags():
370 return self.branchtags()[key]
370 return self.branchtags()[key]
371 n = self.changelog._partialmatch(key)
371 n = self.changelog._partialmatch(key)
372 if n:
372 if n:
373 return n
373 return n
374 raise repo.RepoError(_("unknown revision '%s'") % key)
374 raise repo.RepoError(_("unknown revision '%s'") % key)
375
375
376 def dev(self):
376 def dev(self):
377 return os.lstat(self.path).st_dev
377 return os.lstat(self.path).st_dev
378
378
379 def local(self):
379 def local(self):
380 return True
380 return True
381
381
382 def join(self, f):
382 def join(self, f):
383 return os.path.join(self.path, f)
383 return os.path.join(self.path, f)
384
384
385 def sjoin(self, f):
385 def sjoin(self, f):
386 return os.path.join(self.path, f)
386 return os.path.join(self.path, f)
387
387
388 def wjoin(self, f):
388 def wjoin(self, f):
389 return os.path.join(self.root, f)
389 return os.path.join(self.root, f)
390
390
391 def file(self, f):
391 def file(self, f):
392 if f[0] == '/':
392 if f[0] == '/':
393 f = f[1:]
393 f = f[1:]
394 return filelog.filelog(self.sopener, f, self.revlogversion)
394 return filelog.filelog(self.sopener, f, self.revlogversion)
395
395
396 def changectx(self, changeid=None):
396 def changectx(self, changeid=None):
397 return context.changectx(self, changeid)
397 return context.changectx(self, changeid)
398
398
399 def workingctx(self):
399 def workingctx(self):
400 return context.workingctx(self)
400 return context.workingctx(self)
401
401
402 def parents(self, changeid=None):
402 def parents(self, changeid=None):
403 '''
403 '''
404 get list of changectxs for parents of changeid or working directory
404 get list of changectxs for parents of changeid or working directory
405 '''
405 '''
406 if changeid is None:
406 if changeid is None:
407 pl = self.dirstate.parents()
407 pl = self.dirstate.parents()
408 else:
408 else:
409 n = self.changelog.lookup(changeid)
409 n = self.changelog.lookup(changeid)
410 pl = self.changelog.parents(n)
410 pl = self.changelog.parents(n)
411 if pl[1] == nullid:
411 if pl[1] == nullid:
412 return [self.changectx(pl[0])]
412 return [self.changectx(pl[0])]
413 return [self.changectx(pl[0]), self.changectx(pl[1])]
413 return [self.changectx(pl[0]), self.changectx(pl[1])]
414
414
415 def filectx(self, path, changeid=None, fileid=None):
415 def filectx(self, path, changeid=None, fileid=None):
416 """changeid can be a changeset revision, node, or tag.
416 """changeid can be a changeset revision, node, or tag.
417 fileid can be a file revision or node."""
417 fileid can be a file revision or node."""
418 return context.filectx(self, path, changeid, fileid)
418 return context.filectx(self, path, changeid, fileid)
419
419
420 def getcwd(self):
420 def getcwd(self):
421 return self.dirstate.getcwd()
421 return self.dirstate.getcwd()
422
422
423 def wfile(self, f, mode='r'):
423 def wfile(self, f, mode='r'):
424 return self.wopener(f, mode)
424 return self.wopener(f, mode)
425
425
426 def wread(self, filename):
426 def wread(self, filename):
427 if self.encodepats == None:
427 if self.encodepats == None:
428 l = []
428 l = []
429 for pat, cmd in self.ui.configitems("encode"):
429 for pat, cmd in self.ui.configitems("encode"):
430 mf = util.matcher(self.root, "", [pat], [], [])[1]
430 mf = util.matcher(self.root, "", [pat], [], [])[1]
431 l.append((mf, cmd))
431 l.append((mf, cmd))
432 self.encodepats = l
432 self.encodepats = l
433
433
434 data = self.wopener(filename, 'r').read()
434 data = self.wopener(filename, 'r').read()
435
435
436 for mf, cmd in self.encodepats:
436 for mf, cmd in self.encodepats:
437 if mf(filename):
437 if mf(filename):
438 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
438 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
439 data = util.filter(data, cmd)
439 data = util.filter(data, cmd)
440 break
440 break
441
441
442 return data
442 return data
443
443
444 def wwrite(self, filename, data, fd=None):
444 def wwrite(self, filename, data, fd=None):
445 if self.decodepats == None:
445 if self.decodepats == None:
446 l = []
446 l = []
447 for pat, cmd in self.ui.configitems("decode"):
447 for pat, cmd in self.ui.configitems("decode"):
448 mf = util.matcher(self.root, "", [pat], [], [])[1]
448 mf = util.matcher(self.root, "", [pat], [], [])[1]
449 l.append((mf, cmd))
449 l.append((mf, cmd))
450 self.decodepats = l
450 self.decodepats = l
451
451
452 for mf, cmd in self.decodepats:
452 for mf, cmd in self.decodepats:
453 if mf(filename):
453 if mf(filename):
454 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
454 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
455 data = util.filter(data, cmd)
455 data = util.filter(data, cmd)
456 break
456 break
457
457
458 if fd:
458 if fd:
459 return fd.write(data)
459 return fd.write(data)
460 return self.wopener(filename, 'w').write(data)
460 return self.wopener(filename, 'w').write(data)
461
461
462 def transaction(self):
462 def transaction(self):
463 tr = self.transhandle
463 tr = self.transhandle
464 if tr != None and tr.running():
464 if tr != None and tr.running():
465 return tr.nest()
465 return tr.nest()
466
466
467 # save dirstate for rollback
467 # save dirstate for rollback
468 try:
468 try:
469 ds = self.opener("dirstate").read()
469 ds = self.opener("dirstate").read()
470 except IOError:
470 except IOError:
471 ds = ""
471 ds = ""
472 self.opener("journal.dirstate", "w").write(ds)
472 self.opener("journal.dirstate", "w").write(ds)
473
473
474 tr = transaction.transaction(self.ui.warn, self.sopener,
474 tr = transaction.transaction(self.ui.warn, self.sopener,
475 self.sjoin("journal"),
475 self.sjoin("journal"),
476 aftertrans(self.path))
476 aftertrans(self.path))
477 self.transhandle = tr
477 self.transhandle = tr
478 return tr
478 return tr
479
479
480 def recover(self):
480 def recover(self):
481 l = self.lock()
481 l = self.lock()
482 if os.path.exists(self.sjoin("journal")):
482 if os.path.exists(self.sjoin("journal")):
483 self.ui.status(_("rolling back interrupted transaction\n"))
483 self.ui.status(_("rolling back interrupted transaction\n"))
484 transaction.rollback(self.sopener, self.sjoin("journal"))
484 transaction.rollback(self.sopener, self.sjoin("journal"))
485 self.reload()
485 self.reload()
486 return True
486 return True
487 else:
487 else:
488 self.ui.warn(_("no interrupted transaction available\n"))
488 self.ui.warn(_("no interrupted transaction available\n"))
489 return False
489 return False
490
490
491 def rollback(self, wlock=None):
491 def rollback(self, wlock=None):
492 if not wlock:
492 if not wlock:
493 wlock = self.wlock()
493 wlock = self.wlock()
494 l = self.lock()
494 l = self.lock()
495 if os.path.exists(self.sjoin("undo")):
495 if os.path.exists(self.sjoin("undo")):
496 self.ui.status(_("rolling back last transaction\n"))
496 self.ui.status(_("rolling back last transaction\n"))
497 transaction.rollback(self.sopener, self.sjoin("undo"))
497 transaction.rollback(self.sopener, self.sjoin("undo"))
498 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
498 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
499 self.reload()
499 self.reload()
500 self.wreload()
500 self.wreload()
501 else:
501 else:
502 self.ui.warn(_("no rollback information available\n"))
502 self.ui.warn(_("no rollback information available\n"))
503
503
504 def wreload(self):
504 def wreload(self):
505 self.dirstate.read()
505 self.dirstate.read()
506
506
507 def reload(self):
507 def reload(self):
508 self.changelog.load()
508 self.changelog.load()
509 self.manifest.load()
509 self.manifest.load()
510 self.tagscache = None
510 self.tagscache = None
511 self.nodetagscache = None
511 self.nodetagscache = None
512
512
513 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
513 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
514 desc=None):
514 desc=None):
515 try:
515 try:
516 l = lock.lock(lockname, 0, releasefn, desc=desc)
516 l = lock.lock(lockname, 0, releasefn, desc=desc)
517 except lock.LockHeld, inst:
517 except lock.LockHeld, inst:
518 if not wait:
518 if not wait:
519 raise
519 raise
520 self.ui.warn(_("waiting for lock on %s held by %s\n") %
520 self.ui.warn(_("waiting for lock on %s held by %r\n") %
521 (desc, inst.args[0]))
521 (desc, inst.locker))
522 # default to 600 seconds timeout
522 # default to 600 seconds timeout
523 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
523 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
524 releasefn, desc=desc)
524 releasefn, desc=desc)
525 if acquirefn:
525 if acquirefn:
526 acquirefn()
526 acquirefn()
527 return l
527 return l
528
528
529 def lock(self, wait=1):
529 def lock(self, wait=1):
530 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
530 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
531 desc=_('repository %s') % self.origroot)
531 desc=_('repository %s') % self.origroot)
532
532
533 def wlock(self, wait=1):
533 def wlock(self, wait=1):
534 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
534 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
535 self.wreload,
535 self.wreload,
536 desc=_('working directory of %s') % self.origroot)
536 desc=_('working directory of %s') % self.origroot)
537
537
538 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
538 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
539 """
539 """
540 commit an individual file as part of a larger transaction
540 commit an individual file as part of a larger transaction
541 """
541 """
542
542
543 t = self.wread(fn)
543 t = self.wread(fn)
544 fl = self.file(fn)
544 fl = self.file(fn)
545 fp1 = manifest1.get(fn, nullid)
545 fp1 = manifest1.get(fn, nullid)
546 fp2 = manifest2.get(fn, nullid)
546 fp2 = manifest2.get(fn, nullid)
547
547
548 meta = {}
548 meta = {}
549 cp = self.dirstate.copied(fn)
549 cp = self.dirstate.copied(fn)
550 if cp:
550 if cp:
551 meta["copy"] = cp
551 meta["copy"] = cp
552 if not manifest2: # not a branch merge
552 if not manifest2: # not a branch merge
553 meta["copyrev"] = hex(manifest1.get(cp, nullid))
553 meta["copyrev"] = hex(manifest1.get(cp, nullid))
554 fp2 = nullid
554 fp2 = nullid
555 elif fp2 != nullid: # copied on remote side
555 elif fp2 != nullid: # copied on remote side
556 meta["copyrev"] = hex(manifest1.get(cp, nullid))
556 meta["copyrev"] = hex(manifest1.get(cp, nullid))
557 else: # copied on local side, reversed
557 else: # copied on local side, reversed
558 meta["copyrev"] = hex(manifest2.get(cp))
558 meta["copyrev"] = hex(manifest2.get(cp))
559 fp2 = nullid
559 fp2 = nullid
560 self.ui.debug(_(" %s: copy %s:%s\n") %
560 self.ui.debug(_(" %s: copy %s:%s\n") %
561 (fn, cp, meta["copyrev"]))
561 (fn, cp, meta["copyrev"]))
562 fp1 = nullid
562 fp1 = nullid
563 elif fp2 != nullid:
563 elif fp2 != nullid:
564 # is one parent an ancestor of the other?
564 # is one parent an ancestor of the other?
565 fpa = fl.ancestor(fp1, fp2)
565 fpa = fl.ancestor(fp1, fp2)
566 if fpa == fp1:
566 if fpa == fp1:
567 fp1, fp2 = fp2, nullid
567 fp1, fp2 = fp2, nullid
568 elif fpa == fp2:
568 elif fpa == fp2:
569 fp2 = nullid
569 fp2 = nullid
570
570
571 # is the file unmodified from the parent? report existing entry
571 # is the file unmodified from the parent? report existing entry
572 if fp2 == nullid and not fl.cmp(fp1, t):
572 if fp2 == nullid and not fl.cmp(fp1, t):
573 return fp1
573 return fp1
574
574
575 changelist.append(fn)
575 changelist.append(fn)
576 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
576 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
577
577
578 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
578 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
579 if p1 is None:
579 if p1 is None:
580 p1, p2 = self.dirstate.parents()
580 p1, p2 = self.dirstate.parents()
581 return self.commit(files=files, text=text, user=user, date=date,
581 return self.commit(files=files, text=text, user=user, date=date,
582 p1=p1, p2=p2, wlock=wlock)
582 p1=p1, p2=p2, wlock=wlock)
583
583
584 def commit(self, files=None, text="", user=None, date=None,
584 def commit(self, files=None, text="", user=None, date=None,
585 match=util.always, force=False, lock=None, wlock=None,
585 match=util.always, force=False, lock=None, wlock=None,
586 force_editor=False, p1=None, p2=None, extra={}):
586 force_editor=False, p1=None, p2=None, extra={}):
587
587
588 commit = []
588 commit = []
589 remove = []
589 remove = []
590 changed = []
590 changed = []
591 use_dirstate = (p1 is None) # not rawcommit
591 use_dirstate = (p1 is None) # not rawcommit
592 extra = extra.copy()
592 extra = extra.copy()
593
593
594 if use_dirstate:
594 if use_dirstate:
595 if files:
595 if files:
596 for f in files:
596 for f in files:
597 s = self.dirstate.state(f)
597 s = self.dirstate.state(f)
598 if s in 'nmai':
598 if s in 'nmai':
599 commit.append(f)
599 commit.append(f)
600 elif s == 'r':
600 elif s == 'r':
601 remove.append(f)
601 remove.append(f)
602 else:
602 else:
603 self.ui.warn(_("%s not tracked!\n") % f)
603 self.ui.warn(_("%s not tracked!\n") % f)
604 else:
604 else:
605 changes = self.status(match=match)[:5]
605 changes = self.status(match=match)[:5]
606 modified, added, removed, deleted, unknown = changes
606 modified, added, removed, deleted, unknown = changes
607 commit = modified + added
607 commit = modified + added
608 remove = removed
608 remove = removed
609 else:
609 else:
610 commit = files
610 commit = files
611
611
612 if use_dirstate:
612 if use_dirstate:
613 p1, p2 = self.dirstate.parents()
613 p1, p2 = self.dirstate.parents()
614 update_dirstate = True
614 update_dirstate = True
615 else:
615 else:
616 p1, p2 = p1, p2 or nullid
616 p1, p2 = p1, p2 or nullid
617 update_dirstate = (self.dirstate.parents()[0] == p1)
617 update_dirstate = (self.dirstate.parents()[0] == p1)
618
618
619 c1 = self.changelog.read(p1)
619 c1 = self.changelog.read(p1)
620 c2 = self.changelog.read(p2)
620 c2 = self.changelog.read(p2)
621 m1 = self.manifest.read(c1[0]).copy()
621 m1 = self.manifest.read(c1[0]).copy()
622 m2 = self.manifest.read(c2[0])
622 m2 = self.manifest.read(c2[0])
623
623
624 if use_dirstate:
624 if use_dirstate:
625 branchname = self.workingctx().branch()
625 branchname = self.workingctx().branch()
626 else:
626 else:
627 branchname = ""
627 branchname = ""
628
628
629 if use_dirstate:
629 if use_dirstate:
630 oldname = c1[5].get("branch", "")
630 oldname = c1[5].get("branch", "")
631 if not commit and not remove and not force and p2 == nullid and \
631 if not commit and not remove and not force and p2 == nullid and \
632 branchname == oldname:
632 branchname == oldname:
633 self.ui.status(_("nothing changed\n"))
633 self.ui.status(_("nothing changed\n"))
634 return None
634 return None
635
635
636 xp1 = hex(p1)
636 xp1 = hex(p1)
637 if p2 == nullid: xp2 = ''
637 if p2 == nullid: xp2 = ''
638 else: xp2 = hex(p2)
638 else: xp2 = hex(p2)
639
639
640 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
640 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
641
641
642 if not wlock:
642 if not wlock:
643 wlock = self.wlock()
643 wlock = self.wlock()
644 if not lock:
644 if not lock:
645 lock = self.lock()
645 lock = self.lock()
646 tr = self.transaction()
646 tr = self.transaction()
647
647
648 # check in files
648 # check in files
649 new = {}
649 new = {}
650 linkrev = self.changelog.count()
650 linkrev = self.changelog.count()
651 commit.sort()
651 commit.sort()
652 for f in commit:
652 for f in commit:
653 self.ui.note(f + "\n")
653 self.ui.note(f + "\n")
654 try:
654 try:
655 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
655 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
656 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
656 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
657 except IOError:
657 except IOError:
658 if use_dirstate:
658 if use_dirstate:
659 self.ui.warn(_("trouble committing %s!\n") % f)
659 self.ui.warn(_("trouble committing %s!\n") % f)
660 raise
660 raise
661 else:
661 else:
662 remove.append(f)
662 remove.append(f)
663
663
664 # update manifest
664 # update manifest
665 m1.update(new)
665 m1.update(new)
666 remove.sort()
666 remove.sort()
667
667
668 for f in remove:
668 for f in remove:
669 if f in m1:
669 if f in m1:
670 del m1[f]
670 del m1[f]
671 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
671 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
672
672
673 # add changeset
673 # add changeset
674 new = new.keys()
674 new = new.keys()
675 new.sort()
675 new.sort()
676
676
677 user = user or self.ui.username()
677 user = user or self.ui.username()
678 if not text or force_editor:
678 if not text or force_editor:
679 edittext = []
679 edittext = []
680 if text:
680 if text:
681 edittext.append(text)
681 edittext.append(text)
682 edittext.append("")
682 edittext.append("")
683 if p2 != nullid:
683 if p2 != nullid:
684 edittext.append("HG: branch merge")
684 edittext.append("HG: branch merge")
685 edittext.extend(["HG: changed %s" % f for f in changed])
685 edittext.extend(["HG: changed %s" % f for f in changed])
686 edittext.extend(["HG: removed %s" % f for f in remove])
686 edittext.extend(["HG: removed %s" % f for f in remove])
687 if not changed and not remove:
687 if not changed and not remove:
688 edittext.append("HG: no files changed")
688 edittext.append("HG: no files changed")
689 edittext.append("")
689 edittext.append("")
690 # run editor in the repository root
690 # run editor in the repository root
691 olddir = os.getcwd()
691 olddir = os.getcwd()
692 os.chdir(self.root)
692 os.chdir(self.root)
693 text = self.ui.edit("\n".join(edittext), user)
693 text = self.ui.edit("\n".join(edittext), user)
694 os.chdir(olddir)
694 os.chdir(olddir)
695
695
696 lines = [line.rstrip() for line in text.rstrip().splitlines()]
696 lines = [line.rstrip() for line in text.rstrip().splitlines()]
697 while lines and not lines[0]:
697 while lines and not lines[0]:
698 del lines[0]
698 del lines[0]
699 if not lines:
699 if not lines:
700 return None
700 return None
701 text = '\n'.join(lines)
701 text = '\n'.join(lines)
702 if branchname:
702 if branchname:
703 extra["branch"] = branchname
703 extra["branch"] = branchname
704 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
704 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
705 user, date, extra)
705 user, date, extra)
706 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
706 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
707 parent2=xp2)
707 parent2=xp2)
708 tr.close()
708 tr.close()
709
709
710 if use_dirstate or update_dirstate:
710 if use_dirstate or update_dirstate:
711 self.dirstate.setparents(n)
711 self.dirstate.setparents(n)
712 if use_dirstate:
712 if use_dirstate:
713 self.dirstate.update(new, "n")
713 self.dirstate.update(new, "n")
714 self.dirstate.forget(remove)
714 self.dirstate.forget(remove)
715
715
716 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
716 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
717 return n
717 return n
718
718
719 def walk(self, node=None, files=[], match=util.always, badmatch=None):
719 def walk(self, node=None, files=[], match=util.always, badmatch=None):
720 '''
720 '''
721 walk recursively through the directory tree or a given
721 walk recursively through the directory tree or a given
722 changeset, finding all files matched by the match
722 changeset, finding all files matched by the match
723 function
723 function
724
724
725 results are yielded in a tuple (src, filename), where src
725 results are yielded in a tuple (src, filename), where src
726 is one of:
726 is one of:
727 'f' the file was found in the directory tree
727 'f' the file was found in the directory tree
728 'm' the file was only in the dirstate and not in the tree
728 'm' the file was only in the dirstate and not in the tree
729 'b' file was not found and matched badmatch
729 'b' file was not found and matched badmatch
730 '''
730 '''
731
731
732 if node:
732 if node:
733 fdict = dict.fromkeys(files)
733 fdict = dict.fromkeys(files)
734 for fn in self.manifest.read(self.changelog.read(node)[0]):
734 for fn in self.manifest.read(self.changelog.read(node)[0]):
735 for ffn in fdict:
735 for ffn in fdict:
736 # match if the file is the exact name or a directory
736 # match if the file is the exact name or a directory
737 if ffn == fn or fn.startswith("%s/" % ffn):
737 if ffn == fn or fn.startswith("%s/" % ffn):
738 del fdict[ffn]
738 del fdict[ffn]
739 break
739 break
740 if match(fn):
740 if match(fn):
741 yield 'm', fn
741 yield 'm', fn
742 for fn in fdict:
742 for fn in fdict:
743 if badmatch and badmatch(fn):
743 if badmatch and badmatch(fn):
744 if match(fn):
744 if match(fn):
745 yield 'b', fn
745 yield 'b', fn
746 else:
746 else:
747 self.ui.warn(_('%s: No such file in rev %s\n') % (
747 self.ui.warn(_('%s: No such file in rev %s\n') % (
748 util.pathto(self.getcwd(), fn), short(node)))
748 util.pathto(self.getcwd(), fn), short(node)))
749 else:
749 else:
750 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
750 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
751 yield src, fn
751 yield src, fn
752
752
753 def status(self, node1=None, node2=None, files=[], match=util.always,
753 def status(self, node1=None, node2=None, files=[], match=util.always,
754 wlock=None, list_ignored=False, list_clean=False):
754 wlock=None, list_ignored=False, list_clean=False):
755 """return status of files between two nodes or node and working directory
755 """return status of files between two nodes or node and working directory
756
756
757 If node1 is None, use the first dirstate parent instead.
757 If node1 is None, use the first dirstate parent instead.
758 If node2 is None, compare node1 with working directory.
758 If node2 is None, compare node1 with working directory.
759 """
759 """
760
760
761 def fcmp(fn, mf):
761 def fcmp(fn, mf):
762 t1 = self.wread(fn)
762 t1 = self.wread(fn)
763 return self.file(fn).cmp(mf.get(fn, nullid), t1)
763 return self.file(fn).cmp(mf.get(fn, nullid), t1)
764
764
765 def mfmatches(node):
765 def mfmatches(node):
766 change = self.changelog.read(node)
766 change = self.changelog.read(node)
767 mf = self.manifest.read(change[0]).copy()
767 mf = self.manifest.read(change[0]).copy()
768 for fn in mf.keys():
768 for fn in mf.keys():
769 if not match(fn):
769 if not match(fn):
770 del mf[fn]
770 del mf[fn]
771 return mf
771 return mf
772
772
773 modified, added, removed, deleted, unknown = [], [], [], [], []
773 modified, added, removed, deleted, unknown = [], [], [], [], []
774 ignored, clean = [], []
774 ignored, clean = [], []
775
775
776 compareworking = False
776 compareworking = False
777 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
777 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
778 compareworking = True
778 compareworking = True
779
779
780 if not compareworking:
780 if not compareworking:
781 # read the manifest from node1 before the manifest from node2,
781 # read the manifest from node1 before the manifest from node2,
782 # so that we'll hit the manifest cache if we're going through
782 # so that we'll hit the manifest cache if we're going through
783 # all the revisions in parent->child order.
783 # all the revisions in parent->child order.
784 mf1 = mfmatches(node1)
784 mf1 = mfmatches(node1)
785
785
786 # are we comparing the working directory?
786 # are we comparing the working directory?
787 if not node2:
787 if not node2:
788 if not wlock:
788 if not wlock:
789 try:
789 try:
790 wlock = self.wlock(wait=0)
790 wlock = self.wlock(wait=0)
791 except lock.LockException:
791 except lock.LockException:
792 wlock = None
792 wlock = None
793 (lookup, modified, added, removed, deleted, unknown,
793 (lookup, modified, added, removed, deleted, unknown,
794 ignored, clean) = self.dirstate.status(files, match,
794 ignored, clean) = self.dirstate.status(files, match,
795 list_ignored, list_clean)
795 list_ignored, list_clean)
796
796
797 # are we comparing working dir against its parent?
797 # are we comparing working dir against its parent?
798 if compareworking:
798 if compareworking:
799 if lookup:
799 if lookup:
800 # do a full compare of any files that might have changed
800 # do a full compare of any files that might have changed
801 mf2 = mfmatches(self.dirstate.parents()[0])
801 mf2 = mfmatches(self.dirstate.parents()[0])
802 for f in lookup:
802 for f in lookup:
803 if fcmp(f, mf2):
803 if fcmp(f, mf2):
804 modified.append(f)
804 modified.append(f)
805 else:
805 else:
806 clean.append(f)
806 clean.append(f)
807 if wlock is not None:
807 if wlock is not None:
808 self.dirstate.update([f], "n")
808 self.dirstate.update([f], "n")
809 else:
809 else:
810 # we are comparing working dir against non-parent
810 # we are comparing working dir against non-parent
811 # generate a pseudo-manifest for the working dir
811 # generate a pseudo-manifest for the working dir
812 # XXX: create it in dirstate.py ?
812 # XXX: create it in dirstate.py ?
813 mf2 = mfmatches(self.dirstate.parents()[0])
813 mf2 = mfmatches(self.dirstate.parents()[0])
814 for f in lookup + modified + added:
814 for f in lookup + modified + added:
815 mf2[f] = ""
815 mf2[f] = ""
816 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
816 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
817 for f in removed:
817 for f in removed:
818 if f in mf2:
818 if f in mf2:
819 del mf2[f]
819 del mf2[f]
820 else:
820 else:
821 # we are comparing two revisions
821 # we are comparing two revisions
822 mf2 = mfmatches(node2)
822 mf2 = mfmatches(node2)
823
823
824 if not compareworking:
824 if not compareworking:
825 # flush lists from dirstate before comparing manifests
825 # flush lists from dirstate before comparing manifests
826 modified, added, clean = [], [], []
826 modified, added, clean = [], [], []
827
827
828 # make sure to sort the files so we talk to the disk in a
828 # make sure to sort the files so we talk to the disk in a
829 # reasonable order
829 # reasonable order
830 mf2keys = mf2.keys()
830 mf2keys = mf2.keys()
831 mf2keys.sort()
831 mf2keys.sort()
832 for fn in mf2keys:
832 for fn in mf2keys:
833 if mf1.has_key(fn):
833 if mf1.has_key(fn):
834 if mf1.flags(fn) != mf2.flags(fn) or \
834 if mf1.flags(fn) != mf2.flags(fn) or \
835 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
835 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
836 modified.append(fn)
836 modified.append(fn)
837 elif list_clean:
837 elif list_clean:
838 clean.append(fn)
838 clean.append(fn)
839 del mf1[fn]
839 del mf1[fn]
840 else:
840 else:
841 added.append(fn)
841 added.append(fn)
842
842
843 removed = mf1.keys()
843 removed = mf1.keys()
844
844
845 # sort and return results:
845 # sort and return results:
846 for l in modified, added, removed, deleted, unknown, ignored, clean:
846 for l in modified, added, removed, deleted, unknown, ignored, clean:
847 l.sort()
847 l.sort()
848 return (modified, added, removed, deleted, unknown, ignored, clean)
848 return (modified, added, removed, deleted, unknown, ignored, clean)
849
849
850 def add(self, list, wlock=None):
850 def add(self, list, wlock=None):
851 if not wlock:
851 if not wlock:
852 wlock = self.wlock()
852 wlock = self.wlock()
853 for f in list:
853 for f in list:
854 p = self.wjoin(f)
854 p = self.wjoin(f)
855 if not os.path.exists(p):
855 if not os.path.exists(p):
856 self.ui.warn(_("%s does not exist!\n") % f)
856 self.ui.warn(_("%s does not exist!\n") % f)
857 elif not os.path.isfile(p):
857 elif not os.path.isfile(p):
858 self.ui.warn(_("%s not added: only files supported currently\n")
858 self.ui.warn(_("%s not added: only files supported currently\n")
859 % f)
859 % f)
860 elif self.dirstate.state(f) in 'an':
860 elif self.dirstate.state(f) in 'an':
861 self.ui.warn(_("%s already tracked!\n") % f)
861 self.ui.warn(_("%s already tracked!\n") % f)
862 else:
862 else:
863 self.dirstate.update([f], "a")
863 self.dirstate.update([f], "a")
864
864
865 def forget(self, list, wlock=None):
865 def forget(self, list, wlock=None):
866 if not wlock:
866 if not wlock:
867 wlock = self.wlock()
867 wlock = self.wlock()
868 for f in list:
868 for f in list:
869 if self.dirstate.state(f) not in 'ai':
869 if self.dirstate.state(f) not in 'ai':
870 self.ui.warn(_("%s not added!\n") % f)
870 self.ui.warn(_("%s not added!\n") % f)
871 else:
871 else:
872 self.dirstate.forget([f])
872 self.dirstate.forget([f])
873
873
874 def remove(self, list, unlink=False, wlock=None):
874 def remove(self, list, unlink=False, wlock=None):
875 if unlink:
875 if unlink:
876 for f in list:
876 for f in list:
877 try:
877 try:
878 util.unlink(self.wjoin(f))
878 util.unlink(self.wjoin(f))
879 except OSError, inst:
879 except OSError, inst:
880 if inst.errno != errno.ENOENT:
880 if inst.errno != errno.ENOENT:
881 raise
881 raise
882 if not wlock:
882 if not wlock:
883 wlock = self.wlock()
883 wlock = self.wlock()
884 for f in list:
884 for f in list:
885 p = self.wjoin(f)
885 p = self.wjoin(f)
886 if os.path.exists(p):
886 if os.path.exists(p):
887 self.ui.warn(_("%s still exists!\n") % f)
887 self.ui.warn(_("%s still exists!\n") % f)
888 elif self.dirstate.state(f) == 'a':
888 elif self.dirstate.state(f) == 'a':
889 self.dirstate.forget([f])
889 self.dirstate.forget([f])
890 elif f not in self.dirstate:
890 elif f not in self.dirstate:
891 self.ui.warn(_("%s not tracked!\n") % f)
891 self.ui.warn(_("%s not tracked!\n") % f)
892 else:
892 else:
893 self.dirstate.update([f], "r")
893 self.dirstate.update([f], "r")
894
894
895 def undelete(self, list, wlock=None):
895 def undelete(self, list, wlock=None):
896 p = self.dirstate.parents()[0]
896 p = self.dirstate.parents()[0]
897 mn = self.changelog.read(p)[0]
897 mn = self.changelog.read(p)[0]
898 m = self.manifest.read(mn)
898 m = self.manifest.read(mn)
899 if not wlock:
899 if not wlock:
900 wlock = self.wlock()
900 wlock = self.wlock()
901 for f in list:
901 for f in list:
902 if self.dirstate.state(f) not in "r":
902 if self.dirstate.state(f) not in "r":
903 self.ui.warn("%s not removed!\n" % f)
903 self.ui.warn("%s not removed!\n" % f)
904 else:
904 else:
905 t = self.file(f).read(m[f])
905 t = self.file(f).read(m[f])
906 self.wwrite(f, t)
906 self.wwrite(f, t)
907 util.set_exec(self.wjoin(f), m.execf(f))
907 util.set_exec(self.wjoin(f), m.execf(f))
908 self.dirstate.update([f], "n")
908 self.dirstate.update([f], "n")
909
909
910 def copy(self, source, dest, wlock=None):
910 def copy(self, source, dest, wlock=None):
911 p = self.wjoin(dest)
911 p = self.wjoin(dest)
912 if not os.path.exists(p):
912 if not os.path.exists(p):
913 self.ui.warn(_("%s does not exist!\n") % dest)
913 self.ui.warn(_("%s does not exist!\n") % dest)
914 elif not os.path.isfile(p):
914 elif not os.path.isfile(p):
915 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
915 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
916 else:
916 else:
917 if not wlock:
917 if not wlock:
918 wlock = self.wlock()
918 wlock = self.wlock()
919 if self.dirstate.state(dest) == '?':
919 if self.dirstate.state(dest) == '?':
920 self.dirstate.update([dest], "a")
920 self.dirstate.update([dest], "a")
921 self.dirstate.copy(source, dest)
921 self.dirstate.copy(source, dest)
922
922
923 def heads(self, start=None):
923 def heads(self, start=None):
924 heads = self.changelog.heads(start)
924 heads = self.changelog.heads(start)
925 # sort the output in rev descending order
925 # sort the output in rev descending order
926 heads = [(-self.changelog.rev(h), h) for h in heads]
926 heads = [(-self.changelog.rev(h), h) for h in heads]
927 heads.sort()
927 heads.sort()
928 return [n for (r, n) in heads]
928 return [n for (r, n) in heads]
929
929
930 # branchlookup returns a dict giving a list of branches for
930 # branchlookup returns a dict giving a list of branches for
931 # each head. A branch is defined as the tag of a node or
931 # each head. A branch is defined as the tag of a node or
932 # the branch of the node's parents. If a node has multiple
932 # the branch of the node's parents. If a node has multiple
933 # branch tags, tags are eliminated if they are visible from other
933 # branch tags, tags are eliminated if they are visible from other
934 # branch tags.
934 # branch tags.
935 #
935 #
936 # So, for this graph: a->b->c->d->e
936 # So, for this graph: a->b->c->d->e
937 # \ /
937 # \ /
938 # aa -----/
938 # aa -----/
939 # a has tag 2.6.12
939 # a has tag 2.6.12
940 # d has tag 2.6.13
940 # d has tag 2.6.13
941 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
941 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
942 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
942 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
943 # from the list.
943 # from the list.
944 #
944 #
945 # It is possible that more than one head will have the same branch tag.
945 # It is possible that more than one head will have the same branch tag.
946 # callers need to check the result for multiple heads under the same
946 # callers need to check the result for multiple heads under the same
947 # branch tag if that is a problem for them (ie checkout of a specific
947 # branch tag if that is a problem for them (ie checkout of a specific
948 # branch).
948 # branch).
949 #
949 #
950 # passing in a specific branch will limit the depth of the search
950 # passing in a specific branch will limit the depth of the search
951 # through the parents. It won't limit the branches returned in the
951 # through the parents. It won't limit the branches returned in the
952 # result though.
952 # result though.
953 def branchlookup(self, heads=None, branch=None):
953 def branchlookup(self, heads=None, branch=None):
954 if not heads:
954 if not heads:
955 heads = self.heads()
955 heads = self.heads()
956 headt = [ h for h in heads ]
956 headt = [ h for h in heads ]
957 chlog = self.changelog
957 chlog = self.changelog
958 branches = {}
958 branches = {}
959 merges = []
959 merges = []
960 seenmerge = {}
960 seenmerge = {}
961
961
962 # traverse the tree once for each head, recording in the branches
962 # traverse the tree once for each head, recording in the branches
963 # dict which tags are visible from this head. The branches
963 # dict which tags are visible from this head. The branches
964 # dict also records which tags are visible from each tag
964 # dict also records which tags are visible from each tag
965 # while we traverse.
965 # while we traverse.
966 while headt or merges:
966 while headt or merges:
967 if merges:
967 if merges:
968 n, found = merges.pop()
968 n, found = merges.pop()
969 visit = [n]
969 visit = [n]
970 else:
970 else:
971 h = headt.pop()
971 h = headt.pop()
972 visit = [h]
972 visit = [h]
973 found = [h]
973 found = [h]
974 seen = {}
974 seen = {}
975 while visit:
975 while visit:
976 n = visit.pop()
976 n = visit.pop()
977 if n in seen:
977 if n in seen:
978 continue
978 continue
979 pp = chlog.parents(n)
979 pp = chlog.parents(n)
980 tags = self.nodetags(n)
980 tags = self.nodetags(n)
981 if tags:
981 if tags:
982 for x in tags:
982 for x in tags:
983 if x == 'tip':
983 if x == 'tip':
984 continue
984 continue
985 for f in found:
985 for f in found:
986 branches.setdefault(f, {})[n] = 1
986 branches.setdefault(f, {})[n] = 1
987 branches.setdefault(n, {})[n] = 1
987 branches.setdefault(n, {})[n] = 1
988 break
988 break
989 if n not in found:
989 if n not in found:
990 found.append(n)
990 found.append(n)
991 if branch in tags:
991 if branch in tags:
992 continue
992 continue
993 seen[n] = 1
993 seen[n] = 1
994 if pp[1] != nullid and n not in seenmerge:
994 if pp[1] != nullid and n not in seenmerge:
995 merges.append((pp[1], [x for x in found]))
995 merges.append((pp[1], [x for x in found]))
996 seenmerge[n] = 1
996 seenmerge[n] = 1
997 if pp[0] != nullid:
997 if pp[0] != nullid:
998 visit.append(pp[0])
998 visit.append(pp[0])
999 # traverse the branches dict, eliminating branch tags from each
999 # traverse the branches dict, eliminating branch tags from each
1000 # head that are visible from another branch tag for that head.
1000 # head that are visible from another branch tag for that head.
1001 out = {}
1001 out = {}
1002 viscache = {}
1002 viscache = {}
1003 for h in heads:
1003 for h in heads:
1004 def visible(node):
1004 def visible(node):
1005 if node in viscache:
1005 if node in viscache:
1006 return viscache[node]
1006 return viscache[node]
1007 ret = {}
1007 ret = {}
1008 visit = [node]
1008 visit = [node]
1009 while visit:
1009 while visit:
1010 x = visit.pop()
1010 x = visit.pop()
1011 if x in viscache:
1011 if x in viscache:
1012 ret.update(viscache[x])
1012 ret.update(viscache[x])
1013 elif x not in ret:
1013 elif x not in ret:
1014 ret[x] = 1
1014 ret[x] = 1
1015 if x in branches:
1015 if x in branches:
1016 visit[len(visit):] = branches[x].keys()
1016 visit[len(visit):] = branches[x].keys()
1017 viscache[node] = ret
1017 viscache[node] = ret
1018 return ret
1018 return ret
1019 if h not in branches:
1019 if h not in branches:
1020 continue
1020 continue
1021 # O(n^2), but somewhat limited. This only searches the
1021 # O(n^2), but somewhat limited. This only searches the
1022 # tags visible from a specific head, not all the tags in the
1022 # tags visible from a specific head, not all the tags in the
1023 # whole repo.
1023 # whole repo.
1024 for b in branches[h]:
1024 for b in branches[h]:
1025 vis = False
1025 vis = False
1026 for bb in branches[h].keys():
1026 for bb in branches[h].keys():
1027 if b != bb:
1027 if b != bb:
1028 if b in visible(bb):
1028 if b in visible(bb):
1029 vis = True
1029 vis = True
1030 break
1030 break
1031 if not vis:
1031 if not vis:
1032 l = out.setdefault(h, [])
1032 l = out.setdefault(h, [])
1033 l[len(l):] = self.nodetags(b)
1033 l[len(l):] = self.nodetags(b)
1034 return out
1034 return out
1035
1035
1036 def branches(self, nodes):
1036 def branches(self, nodes):
1037 if not nodes:
1037 if not nodes:
1038 nodes = [self.changelog.tip()]
1038 nodes = [self.changelog.tip()]
1039 b = []
1039 b = []
1040 for n in nodes:
1040 for n in nodes:
1041 t = n
1041 t = n
1042 while 1:
1042 while 1:
1043 p = self.changelog.parents(n)
1043 p = self.changelog.parents(n)
1044 if p[1] != nullid or p[0] == nullid:
1044 if p[1] != nullid or p[0] == nullid:
1045 b.append((t, n, p[0], p[1]))
1045 b.append((t, n, p[0], p[1]))
1046 break
1046 break
1047 n = p[0]
1047 n = p[0]
1048 return b
1048 return b
1049
1049
1050 def between(self, pairs):
1050 def between(self, pairs):
1051 r = []
1051 r = []
1052
1052
1053 for top, bottom in pairs:
1053 for top, bottom in pairs:
1054 n, l, i = top, [], 0
1054 n, l, i = top, [], 0
1055 f = 1
1055 f = 1
1056
1056
1057 while n != bottom:
1057 while n != bottom:
1058 p = self.changelog.parents(n)[0]
1058 p = self.changelog.parents(n)[0]
1059 if i == f:
1059 if i == f:
1060 l.append(n)
1060 l.append(n)
1061 f = f * 2
1061 f = f * 2
1062 n = p
1062 n = p
1063 i += 1
1063 i += 1
1064
1064
1065 r.append(l)
1065 r.append(l)
1066
1066
1067 return r
1067 return r
1068
1068
1069 def findincoming(self, remote, base=None, heads=None, force=False):
1069 def findincoming(self, remote, base=None, heads=None, force=False):
1070 """Return list of roots of the subsets of missing nodes from remote
1070 """Return list of roots of the subsets of missing nodes from remote
1071
1071
1072 If base dict is specified, assume that these nodes and their parents
1072 If base dict is specified, assume that these nodes and their parents
1073 exist on the remote side and that no child of a node of base exists
1073 exist on the remote side and that no child of a node of base exists
1074 in both remote and self.
1074 in both remote and self.
1075 Furthermore base will be updated to include the nodes that exists
1075 Furthermore base will be updated to include the nodes that exists
1076 in self and remote but no children exists in self and remote.
1076 in self and remote but no children exists in self and remote.
1077 If a list of heads is specified, return only nodes which are heads
1077 If a list of heads is specified, return only nodes which are heads
1078 or ancestors of these heads.
1078 or ancestors of these heads.
1079
1079
1080 All the ancestors of base are in self and in remote.
1080 All the ancestors of base are in self and in remote.
1081 All the descendants of the list returned are missing in self.
1081 All the descendants of the list returned are missing in self.
1082 (and so we know that the rest of the nodes are missing in remote, see
1082 (and so we know that the rest of the nodes are missing in remote, see
1083 outgoing)
1083 outgoing)
1084 """
1084 """
1085 m = self.changelog.nodemap
1085 m = self.changelog.nodemap
1086 search = []
1086 search = []
1087 fetch = {}
1087 fetch = {}
1088 seen = {}
1088 seen = {}
1089 seenbranch = {}
1089 seenbranch = {}
1090 if base == None:
1090 if base == None:
1091 base = {}
1091 base = {}
1092
1092
1093 if not heads:
1093 if not heads:
1094 heads = remote.heads()
1094 heads = remote.heads()
1095
1095
1096 if self.changelog.tip() == nullid:
1096 if self.changelog.tip() == nullid:
1097 base[nullid] = 1
1097 base[nullid] = 1
1098 if heads != [nullid]:
1098 if heads != [nullid]:
1099 return [nullid]
1099 return [nullid]
1100 return []
1100 return []
1101
1101
1102 # assume we're closer to the tip than the root
1102 # assume we're closer to the tip than the root
1103 # and start by examining the heads
1103 # and start by examining the heads
1104 self.ui.status(_("searching for changes\n"))
1104 self.ui.status(_("searching for changes\n"))
1105
1105
1106 unknown = []
1106 unknown = []
1107 for h in heads:
1107 for h in heads:
1108 if h not in m:
1108 if h not in m:
1109 unknown.append(h)
1109 unknown.append(h)
1110 else:
1110 else:
1111 base[h] = 1
1111 base[h] = 1
1112
1112
1113 if not unknown:
1113 if not unknown:
1114 return []
1114 return []
1115
1115
1116 req = dict.fromkeys(unknown)
1116 req = dict.fromkeys(unknown)
1117 reqcnt = 0
1117 reqcnt = 0
1118
1118
1119 # search through remote branches
1119 # search through remote branches
1120 # a 'branch' here is a linear segment of history, with four parts:
1120 # a 'branch' here is a linear segment of history, with four parts:
1121 # head, root, first parent, second parent
1121 # head, root, first parent, second parent
1122 # (a branch always has two parents (or none) by definition)
1122 # (a branch always has two parents (or none) by definition)
1123 unknown = remote.branches(unknown)
1123 unknown = remote.branches(unknown)
1124 while unknown:
1124 while unknown:
1125 r = []
1125 r = []
1126 while unknown:
1126 while unknown:
1127 n = unknown.pop(0)
1127 n = unknown.pop(0)
1128 if n[0] in seen:
1128 if n[0] in seen:
1129 continue
1129 continue
1130
1130
1131 self.ui.debug(_("examining %s:%s\n")
1131 self.ui.debug(_("examining %s:%s\n")
1132 % (short(n[0]), short(n[1])))
1132 % (short(n[0]), short(n[1])))
1133 if n[0] == nullid: # found the end of the branch
1133 if n[0] == nullid: # found the end of the branch
1134 pass
1134 pass
1135 elif n in seenbranch:
1135 elif n in seenbranch:
1136 self.ui.debug(_("branch already found\n"))
1136 self.ui.debug(_("branch already found\n"))
1137 continue
1137 continue
1138 elif n[1] and n[1] in m: # do we know the base?
1138 elif n[1] and n[1] in m: # do we know the base?
1139 self.ui.debug(_("found incomplete branch %s:%s\n")
1139 self.ui.debug(_("found incomplete branch %s:%s\n")
1140 % (short(n[0]), short(n[1])))
1140 % (short(n[0]), short(n[1])))
1141 search.append(n) # schedule branch range for scanning
1141 search.append(n) # schedule branch range for scanning
1142 seenbranch[n] = 1
1142 seenbranch[n] = 1
1143 else:
1143 else:
1144 if n[1] not in seen and n[1] not in fetch:
1144 if n[1] not in seen and n[1] not in fetch:
1145 if n[2] in m and n[3] in m:
1145 if n[2] in m and n[3] in m:
1146 self.ui.debug(_("found new changeset %s\n") %
1146 self.ui.debug(_("found new changeset %s\n") %
1147 short(n[1]))
1147 short(n[1]))
1148 fetch[n[1]] = 1 # earliest unknown
1148 fetch[n[1]] = 1 # earliest unknown
1149 for p in n[2:4]:
1149 for p in n[2:4]:
1150 if p in m:
1150 if p in m:
1151 base[p] = 1 # latest known
1151 base[p] = 1 # latest known
1152
1152
1153 for p in n[2:4]:
1153 for p in n[2:4]:
1154 if p not in req and p not in m:
1154 if p not in req and p not in m:
1155 r.append(p)
1155 r.append(p)
1156 req[p] = 1
1156 req[p] = 1
1157 seen[n[0]] = 1
1157 seen[n[0]] = 1
1158
1158
1159 if r:
1159 if r:
1160 reqcnt += 1
1160 reqcnt += 1
1161 self.ui.debug(_("request %d: %s\n") %
1161 self.ui.debug(_("request %d: %s\n") %
1162 (reqcnt, " ".join(map(short, r))))
1162 (reqcnt, " ".join(map(short, r))))
1163 for p in xrange(0, len(r), 10):
1163 for p in xrange(0, len(r), 10):
1164 for b in remote.branches(r[p:p+10]):
1164 for b in remote.branches(r[p:p+10]):
1165 self.ui.debug(_("received %s:%s\n") %
1165 self.ui.debug(_("received %s:%s\n") %
1166 (short(b[0]), short(b[1])))
1166 (short(b[0]), short(b[1])))
1167 unknown.append(b)
1167 unknown.append(b)
1168
1168
1169 # do binary search on the branches we found
1169 # do binary search on the branches we found
1170 while search:
1170 while search:
1171 n = search.pop(0)
1171 n = search.pop(0)
1172 reqcnt += 1
1172 reqcnt += 1
1173 l = remote.between([(n[0], n[1])])[0]
1173 l = remote.between([(n[0], n[1])])[0]
1174 l.append(n[1])
1174 l.append(n[1])
1175 p = n[0]
1175 p = n[0]
1176 f = 1
1176 f = 1
1177 for i in l:
1177 for i in l:
1178 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1178 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1179 if i in m:
1179 if i in m:
1180 if f <= 2:
1180 if f <= 2:
1181 self.ui.debug(_("found new branch changeset %s\n") %
1181 self.ui.debug(_("found new branch changeset %s\n") %
1182 short(p))
1182 short(p))
1183 fetch[p] = 1
1183 fetch[p] = 1
1184 base[i] = 1
1184 base[i] = 1
1185 else:
1185 else:
1186 self.ui.debug(_("narrowed branch search to %s:%s\n")
1186 self.ui.debug(_("narrowed branch search to %s:%s\n")
1187 % (short(p), short(i)))
1187 % (short(p), short(i)))
1188 search.append((p, i))
1188 search.append((p, i))
1189 break
1189 break
1190 p, f = i, f * 2
1190 p, f = i, f * 2
1191
1191
1192 # sanity check our fetch list
1192 # sanity check our fetch list
1193 for f in fetch.keys():
1193 for f in fetch.keys():
1194 if f in m:
1194 if f in m:
1195 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1195 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1196
1196
1197 if base.keys() == [nullid]:
1197 if base.keys() == [nullid]:
1198 if force:
1198 if force:
1199 self.ui.warn(_("warning: repository is unrelated\n"))
1199 self.ui.warn(_("warning: repository is unrelated\n"))
1200 else:
1200 else:
1201 raise util.Abort(_("repository is unrelated"))
1201 raise util.Abort(_("repository is unrelated"))
1202
1202
1203 self.ui.debug(_("found new changesets starting at ") +
1203 self.ui.debug(_("found new changesets starting at ") +
1204 " ".join([short(f) for f in fetch]) + "\n")
1204 " ".join([short(f) for f in fetch]) + "\n")
1205
1205
1206 self.ui.debug(_("%d total queries\n") % reqcnt)
1206 self.ui.debug(_("%d total queries\n") % reqcnt)
1207
1207
1208 return fetch.keys()
1208 return fetch.keys()
1209
1209
1210 def findoutgoing(self, remote, base=None, heads=None, force=False):
1210 def findoutgoing(self, remote, base=None, heads=None, force=False):
1211 """Return list of nodes that are roots of subsets not in remote
1211 """Return list of nodes that are roots of subsets not in remote
1212
1212
1213 If base dict is specified, assume that these nodes and their parents
1213 If base dict is specified, assume that these nodes and their parents
1214 exist on the remote side.
1214 exist on the remote side.
1215 If a list of heads is specified, return only nodes which are heads
1215 If a list of heads is specified, return only nodes which are heads
1216 or ancestors of these heads, and return a second element which
1216 or ancestors of these heads, and return a second element which
1217 contains all remote heads which get new children.
1217 contains all remote heads which get new children.
1218 """
1218 """
1219 if base == None:
1219 if base == None:
1220 base = {}
1220 base = {}
1221 self.findincoming(remote, base, heads, force=force)
1221 self.findincoming(remote, base, heads, force=force)
1222
1222
1223 self.ui.debug(_("common changesets up to ")
1223 self.ui.debug(_("common changesets up to ")
1224 + " ".join(map(short, base.keys())) + "\n")
1224 + " ".join(map(short, base.keys())) + "\n")
1225
1225
1226 remain = dict.fromkeys(self.changelog.nodemap)
1226 remain = dict.fromkeys(self.changelog.nodemap)
1227
1227
1228 # prune everything remote has from the tree
1228 # prune everything remote has from the tree
1229 del remain[nullid]
1229 del remain[nullid]
1230 remove = base.keys()
1230 remove = base.keys()
1231 while remove:
1231 while remove:
1232 n = remove.pop(0)
1232 n = remove.pop(0)
1233 if n in remain:
1233 if n in remain:
1234 del remain[n]
1234 del remain[n]
1235 for p in self.changelog.parents(n):
1235 for p in self.changelog.parents(n):
1236 remove.append(p)
1236 remove.append(p)
1237
1237
1238 # find every node whose parents have been pruned
1238 # find every node whose parents have been pruned
1239 subset = []
1239 subset = []
1240 # find every remote head that will get new children
1240 # find every remote head that will get new children
1241 updated_heads = {}
1241 updated_heads = {}
1242 for n in remain:
1242 for n in remain:
1243 p1, p2 = self.changelog.parents(n)
1243 p1, p2 = self.changelog.parents(n)
1244 if p1 not in remain and p2 not in remain:
1244 if p1 not in remain and p2 not in remain:
1245 subset.append(n)
1245 subset.append(n)
1246 if heads:
1246 if heads:
1247 if p1 in heads:
1247 if p1 in heads:
1248 updated_heads[p1] = True
1248 updated_heads[p1] = True
1249 if p2 in heads:
1249 if p2 in heads:
1250 updated_heads[p2] = True
1250 updated_heads[p2] = True
1251
1251
1252 # this is the set of all roots we have to push
1252 # this is the set of all roots we have to push
1253 if heads:
1253 if heads:
1254 return subset, updated_heads.keys()
1254 return subset, updated_heads.keys()
1255 else:
1255 else:
1256 return subset
1256 return subset
1257
1257
1258 def pull(self, remote, heads=None, force=False, lock=None):
1258 def pull(self, remote, heads=None, force=False, lock=None):
1259 mylock = False
1259 mylock = False
1260 if not lock:
1260 if not lock:
1261 lock = self.lock()
1261 lock = self.lock()
1262 mylock = True
1262 mylock = True
1263
1263
1264 try:
1264 try:
1265 fetch = self.findincoming(remote, force=force)
1265 fetch = self.findincoming(remote, force=force)
1266 if fetch == [nullid]:
1266 if fetch == [nullid]:
1267 self.ui.status(_("requesting all changes\n"))
1267 self.ui.status(_("requesting all changes\n"))
1268
1268
1269 if not fetch:
1269 if not fetch:
1270 self.ui.status(_("no changes found\n"))
1270 self.ui.status(_("no changes found\n"))
1271 return 0
1271 return 0
1272
1272
1273 if heads is None:
1273 if heads is None:
1274 cg = remote.changegroup(fetch, 'pull')
1274 cg = remote.changegroup(fetch, 'pull')
1275 else:
1275 else:
1276 if 'changegroupsubset' not in remote.capabilities:
1276 if 'changegroupsubset' not in remote.capabilities:
1277 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1277 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1278 cg = remote.changegroupsubset(fetch, heads, 'pull')
1278 cg = remote.changegroupsubset(fetch, heads, 'pull')
1279 return self.addchangegroup(cg, 'pull', remote.url())
1279 return self.addchangegroup(cg, 'pull', remote.url())
1280 finally:
1280 finally:
1281 if mylock:
1281 if mylock:
1282 lock.release()
1282 lock.release()
1283
1283
1284 def push(self, remote, force=False, revs=None):
1284 def push(self, remote, force=False, revs=None):
1285 # there are two ways to push to remote repo:
1285 # there are two ways to push to remote repo:
1286 #
1286 #
1287 # addchangegroup assumes local user can lock remote
1287 # addchangegroup assumes local user can lock remote
1288 # repo (local filesystem, old ssh servers).
1288 # repo (local filesystem, old ssh servers).
1289 #
1289 #
1290 # unbundle assumes local user cannot lock remote repo (new ssh
1290 # unbundle assumes local user cannot lock remote repo (new ssh
1291 # servers, http servers).
1291 # servers, http servers).
1292
1292
1293 if remote.capable('unbundle'):
1293 if remote.capable('unbundle'):
1294 return self.push_unbundle(remote, force, revs)
1294 return self.push_unbundle(remote, force, revs)
1295 return self.push_addchangegroup(remote, force, revs)
1295 return self.push_addchangegroup(remote, force, revs)
1296
1296
1297 def prepush(self, remote, force, revs):
1297 def prepush(self, remote, force, revs):
1298 base = {}
1298 base = {}
1299 remote_heads = remote.heads()
1299 remote_heads = remote.heads()
1300 inc = self.findincoming(remote, base, remote_heads, force=force)
1300 inc = self.findincoming(remote, base, remote_heads, force=force)
1301
1301
1302 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1302 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1303 if revs is not None:
1303 if revs is not None:
1304 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1304 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1305 else:
1305 else:
1306 bases, heads = update, self.changelog.heads()
1306 bases, heads = update, self.changelog.heads()
1307
1307
1308 if not bases:
1308 if not bases:
1309 self.ui.status(_("no changes found\n"))
1309 self.ui.status(_("no changes found\n"))
1310 return None, 1
1310 return None, 1
1311 elif not force:
1311 elif not force:
1312 # check if we're creating new remote heads
1312 # check if we're creating new remote heads
1313 # to be a remote head after push, node must be either
1313 # to be a remote head after push, node must be either
1314 # - unknown locally
1314 # - unknown locally
1315 # - a local outgoing head descended from update
1315 # - a local outgoing head descended from update
1316 # - a remote head that's known locally and not
1316 # - a remote head that's known locally and not
1317 # ancestral to an outgoing head
1317 # ancestral to an outgoing head
1318
1318
1319 warn = 0
1319 warn = 0
1320
1320
1321 if remote_heads == [nullid]:
1321 if remote_heads == [nullid]:
1322 warn = 0
1322 warn = 0
1323 elif not revs and len(heads) > len(remote_heads):
1323 elif not revs and len(heads) > len(remote_heads):
1324 warn = 1
1324 warn = 1
1325 else:
1325 else:
1326 newheads = list(heads)
1326 newheads = list(heads)
1327 for r in remote_heads:
1327 for r in remote_heads:
1328 if r in self.changelog.nodemap:
1328 if r in self.changelog.nodemap:
1329 desc = self.changelog.heads(r)
1329 desc = self.changelog.heads(r)
1330 l = [h for h in heads if h in desc]
1330 l = [h for h in heads if h in desc]
1331 if not l:
1331 if not l:
1332 newheads.append(r)
1332 newheads.append(r)
1333 else:
1333 else:
1334 newheads.append(r)
1334 newheads.append(r)
1335 if len(newheads) > len(remote_heads):
1335 if len(newheads) > len(remote_heads):
1336 warn = 1
1336 warn = 1
1337
1337
1338 if warn:
1338 if warn:
1339 self.ui.warn(_("abort: push creates new remote branches!\n"))
1339 self.ui.warn(_("abort: push creates new remote branches!\n"))
1340 self.ui.status(_("(did you forget to merge?"
1340 self.ui.status(_("(did you forget to merge?"
1341 " use push -f to force)\n"))
1341 " use push -f to force)\n"))
1342 return None, 1
1342 return None, 1
1343 elif inc:
1343 elif inc:
1344 self.ui.warn(_("note: unsynced remote changes!\n"))
1344 self.ui.warn(_("note: unsynced remote changes!\n"))
1345
1345
1346
1346
1347 if revs is None:
1347 if revs is None:
1348 cg = self.changegroup(update, 'push')
1348 cg = self.changegroup(update, 'push')
1349 else:
1349 else:
1350 cg = self.changegroupsubset(update, revs, 'push')
1350 cg = self.changegroupsubset(update, revs, 'push')
1351 return cg, remote_heads
1351 return cg, remote_heads
1352
1352
1353 def push_addchangegroup(self, remote, force, revs):
1353 def push_addchangegroup(self, remote, force, revs):
1354 lock = remote.lock()
1354 lock = remote.lock()
1355
1355
1356 ret = self.prepush(remote, force, revs)
1356 ret = self.prepush(remote, force, revs)
1357 if ret[0] is not None:
1357 if ret[0] is not None:
1358 cg, remote_heads = ret
1358 cg, remote_heads = ret
1359 return remote.addchangegroup(cg, 'push', self.url())
1359 return remote.addchangegroup(cg, 'push', self.url())
1360 return ret[1]
1360 return ret[1]
1361
1361
1362 def push_unbundle(self, remote, force, revs):
1362 def push_unbundle(self, remote, force, revs):
1363 # local repo finds heads on server, finds out what revs it
1363 # local repo finds heads on server, finds out what revs it
1364 # must push. once revs transferred, if server finds it has
1364 # must push. once revs transferred, if server finds it has
1365 # different heads (someone else won commit/push race), server
1365 # different heads (someone else won commit/push race), server
1366 # aborts.
1366 # aborts.
1367
1367
1368 ret = self.prepush(remote, force, revs)
1368 ret = self.prepush(remote, force, revs)
1369 if ret[0] is not None:
1369 if ret[0] is not None:
1370 cg, remote_heads = ret
1370 cg, remote_heads = ret
1371 if force: remote_heads = ['force']
1371 if force: remote_heads = ['force']
1372 return remote.unbundle(cg, remote_heads, 'push')
1372 return remote.unbundle(cg, remote_heads, 'push')
1373 return ret[1]
1373 return ret[1]
1374
1374
1375 def changegroupinfo(self, nodes):
1375 def changegroupinfo(self, nodes):
1376 self.ui.note(_("%d changesets found\n") % len(nodes))
1376 self.ui.note(_("%d changesets found\n") % len(nodes))
1377 if self.ui.debugflag:
1377 if self.ui.debugflag:
1378 self.ui.debug(_("List of changesets:\n"))
1378 self.ui.debug(_("List of changesets:\n"))
1379 for node in nodes:
1379 for node in nodes:
1380 self.ui.debug("%s\n" % hex(node))
1380 self.ui.debug("%s\n" % hex(node))
1381
1381
1382 def changegroupsubset(self, bases, heads, source):
1382 def changegroupsubset(self, bases, heads, source):
1383 """This function generates a changegroup consisting of all the nodes
1383 """This function generates a changegroup consisting of all the nodes
1384 that are descendents of any of the bases, and ancestors of any of
1384 that are descendents of any of the bases, and ancestors of any of
1385 the heads.
1385 the heads.
1386
1386
1387 It is fairly complex as determining which filenodes and which
1387 It is fairly complex as determining which filenodes and which
1388 manifest nodes need to be included for the changeset to be complete
1388 manifest nodes need to be included for the changeset to be complete
1389 is non-trivial.
1389 is non-trivial.
1390
1390
1391 Another wrinkle is doing the reverse, figuring out which changeset in
1391 Another wrinkle is doing the reverse, figuring out which changeset in
1392 the changegroup a particular filenode or manifestnode belongs to."""
1392 the changegroup a particular filenode or manifestnode belongs to."""
1393
1393
1394 self.hook('preoutgoing', throw=True, source=source)
1394 self.hook('preoutgoing', throw=True, source=source)
1395
1395
1396 # Set up some initial variables
1396 # Set up some initial variables
1397 # Make it easy to refer to self.changelog
1397 # Make it easy to refer to self.changelog
1398 cl = self.changelog
1398 cl = self.changelog
1399 # msng is short for missing - compute the list of changesets in this
1399 # msng is short for missing - compute the list of changesets in this
1400 # changegroup.
1400 # changegroup.
1401 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1401 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1402 self.changegroupinfo(msng_cl_lst)
1402 self.changegroupinfo(msng_cl_lst)
1403 # Some bases may turn out to be superfluous, and some heads may be
1403 # Some bases may turn out to be superfluous, and some heads may be
1404 # too. nodesbetween will return the minimal set of bases and heads
1404 # too. nodesbetween will return the minimal set of bases and heads
1405 # necessary to re-create the changegroup.
1405 # necessary to re-create the changegroup.
1406
1406
1407 # Known heads are the list of heads that it is assumed the recipient
1407 # Known heads are the list of heads that it is assumed the recipient
1408 # of this changegroup will know about.
1408 # of this changegroup will know about.
1409 knownheads = {}
1409 knownheads = {}
1410 # We assume that all parents of bases are known heads.
1410 # We assume that all parents of bases are known heads.
1411 for n in bases:
1411 for n in bases:
1412 for p in cl.parents(n):
1412 for p in cl.parents(n):
1413 if p != nullid:
1413 if p != nullid:
1414 knownheads[p] = 1
1414 knownheads[p] = 1
1415 knownheads = knownheads.keys()
1415 knownheads = knownheads.keys()
1416 if knownheads:
1416 if knownheads:
1417 # Now that we know what heads are known, we can compute which
1417 # Now that we know what heads are known, we can compute which
1418 # changesets are known. The recipient must know about all
1418 # changesets are known. The recipient must know about all
1419 # changesets required to reach the known heads from the null
1419 # changesets required to reach the known heads from the null
1420 # changeset.
1420 # changeset.
1421 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1421 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1422 junk = None
1422 junk = None
1423 # Transform the list into an ersatz set.
1423 # Transform the list into an ersatz set.
1424 has_cl_set = dict.fromkeys(has_cl_set)
1424 has_cl_set = dict.fromkeys(has_cl_set)
1425 else:
1425 else:
1426 # If there were no known heads, the recipient cannot be assumed to
1426 # If there were no known heads, the recipient cannot be assumed to
1427 # know about any changesets.
1427 # know about any changesets.
1428 has_cl_set = {}
1428 has_cl_set = {}
1429
1429
1430 # Make it easy to refer to self.manifest
1430 # Make it easy to refer to self.manifest
1431 mnfst = self.manifest
1431 mnfst = self.manifest
1432 # We don't know which manifests are missing yet
1432 # We don't know which manifests are missing yet
1433 msng_mnfst_set = {}
1433 msng_mnfst_set = {}
1434 # Nor do we know which filenodes are missing.
1434 # Nor do we know which filenodes are missing.
1435 msng_filenode_set = {}
1435 msng_filenode_set = {}
1436
1436
1437 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1437 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1438 junk = None
1438 junk = None
1439
1439
1440 # A changeset always belongs to itself, so the changenode lookup
1440 # A changeset always belongs to itself, so the changenode lookup
1441 # function for a changenode is identity.
1441 # function for a changenode is identity.
1442 def identity(x):
1442 def identity(x):
1443 return x
1443 return x
1444
1444
1445 # A function generating function. Sets up an environment for the
1445 # A function generating function. Sets up an environment for the
1446 # inner function.
1446 # inner function.
1447 def cmp_by_rev_func(revlog):
1447 def cmp_by_rev_func(revlog):
1448 # Compare two nodes by their revision number in the environment's
1448 # Compare two nodes by their revision number in the environment's
1449 # revision history. Since the revision number both represents the
1449 # revision history. Since the revision number both represents the
1450 # most efficient order to read the nodes in, and represents a
1450 # most efficient order to read the nodes in, and represents a
1451 # topological sorting of the nodes, this function is often useful.
1451 # topological sorting of the nodes, this function is often useful.
1452 def cmp_by_rev(a, b):
1452 def cmp_by_rev(a, b):
1453 return cmp(revlog.rev(a), revlog.rev(b))
1453 return cmp(revlog.rev(a), revlog.rev(b))
1454 return cmp_by_rev
1454 return cmp_by_rev
1455
1455
1456 # If we determine that a particular file or manifest node must be a
1456 # If we determine that a particular file or manifest node must be a
1457 # node that the recipient of the changegroup will already have, we can
1457 # node that the recipient of the changegroup will already have, we can
1458 # also assume the recipient will have all the parents. This function
1458 # also assume the recipient will have all the parents. This function
1459 # prunes them from the set of missing nodes.
1459 # prunes them from the set of missing nodes.
1460 def prune_parents(revlog, hasset, msngset):
1460 def prune_parents(revlog, hasset, msngset):
1461 haslst = hasset.keys()
1461 haslst = hasset.keys()
1462 haslst.sort(cmp_by_rev_func(revlog))
1462 haslst.sort(cmp_by_rev_func(revlog))
1463 for node in haslst:
1463 for node in haslst:
1464 parentlst = [p for p in revlog.parents(node) if p != nullid]
1464 parentlst = [p for p in revlog.parents(node) if p != nullid]
1465 while parentlst:
1465 while parentlst:
1466 n = parentlst.pop()
1466 n = parentlst.pop()
1467 if n not in hasset:
1467 if n not in hasset:
1468 hasset[n] = 1
1468 hasset[n] = 1
1469 p = [p for p in revlog.parents(n) if p != nullid]
1469 p = [p for p in revlog.parents(n) if p != nullid]
1470 parentlst.extend(p)
1470 parentlst.extend(p)
1471 for n in hasset:
1471 for n in hasset:
1472 msngset.pop(n, None)
1472 msngset.pop(n, None)
1473
1473
1474 # This is a function generating function used to set up an environment
1474 # This is a function generating function used to set up an environment
1475 # for the inner function to execute in.
1475 # for the inner function to execute in.
1476 def manifest_and_file_collector(changedfileset):
1476 def manifest_and_file_collector(changedfileset):
1477 # This is an information gathering function that gathers
1477 # This is an information gathering function that gathers
1478 # information from each changeset node that goes out as part of
1478 # information from each changeset node that goes out as part of
1479 # the changegroup. The information gathered is a list of which
1479 # the changegroup. The information gathered is a list of which
1480 # manifest nodes are potentially required (the recipient may
1480 # manifest nodes are potentially required (the recipient may
1481 # already have them) and total list of all files which were
1481 # already have them) and total list of all files which were
1482 # changed in any changeset in the changegroup.
1482 # changed in any changeset in the changegroup.
1483 #
1483 #
1484 # We also remember the first changenode we saw any manifest
1484 # We also remember the first changenode we saw any manifest
1485 # referenced by so we can later determine which changenode 'owns'
1485 # referenced by so we can later determine which changenode 'owns'
1486 # the manifest.
1486 # the manifest.
1487 def collect_manifests_and_files(clnode):
1487 def collect_manifests_and_files(clnode):
1488 c = cl.read(clnode)
1488 c = cl.read(clnode)
1489 for f in c[3]:
1489 for f in c[3]:
1490 # This is to make sure we only have one instance of each
1490 # This is to make sure we only have one instance of each
1491 # filename string for each filename.
1491 # filename string for each filename.
1492 changedfileset.setdefault(f, f)
1492 changedfileset.setdefault(f, f)
1493 msng_mnfst_set.setdefault(c[0], clnode)
1493 msng_mnfst_set.setdefault(c[0], clnode)
1494 return collect_manifests_and_files
1494 return collect_manifests_and_files
1495
1495
1496 # Figure out which manifest nodes (of the ones we think might be part
1496 # Figure out which manifest nodes (of the ones we think might be part
1497 # of the changegroup) the recipient must know about and remove them
1497 # of the changegroup) the recipient must know about and remove them
1498 # from the changegroup.
1498 # from the changegroup.
1499 def prune_manifests():
1499 def prune_manifests():
1500 has_mnfst_set = {}
1500 has_mnfst_set = {}
1501 for n in msng_mnfst_set:
1501 for n in msng_mnfst_set:
1502 # If a 'missing' manifest thinks it belongs to a changenode
1502 # If a 'missing' manifest thinks it belongs to a changenode
1503 # the recipient is assumed to have, obviously the recipient
1503 # the recipient is assumed to have, obviously the recipient
1504 # must have that manifest.
1504 # must have that manifest.
1505 linknode = cl.node(mnfst.linkrev(n))
1505 linknode = cl.node(mnfst.linkrev(n))
1506 if linknode in has_cl_set:
1506 if linknode in has_cl_set:
1507 has_mnfst_set[n] = 1
1507 has_mnfst_set[n] = 1
1508 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1508 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1509
1509
1510 # Use the information collected in collect_manifests_and_files to say
1510 # Use the information collected in collect_manifests_and_files to say
1511 # which changenode any manifestnode belongs to.
1511 # which changenode any manifestnode belongs to.
1512 def lookup_manifest_link(mnfstnode):
1512 def lookup_manifest_link(mnfstnode):
1513 return msng_mnfst_set[mnfstnode]
1513 return msng_mnfst_set[mnfstnode]
1514
1514
1515 # A function generating function that sets up the initial environment
1515 # A function generating function that sets up the initial environment
1516 # the inner function.
1516 # the inner function.
1517 def filenode_collector(changedfiles):
1517 def filenode_collector(changedfiles):
1518 next_rev = [0]
1518 next_rev = [0]
1519 # This gathers information from each manifestnode included in the
1519 # This gathers information from each manifestnode included in the
1520 # changegroup about which filenodes the manifest node references
1520 # changegroup about which filenodes the manifest node references
1521 # so we can include those in the changegroup too.
1521 # so we can include those in the changegroup too.
1522 #
1522 #
1523 # It also remembers which changenode each filenode belongs to. It
1523 # It also remembers which changenode each filenode belongs to. It
1524 # does this by assuming the a filenode belongs to the changenode
1524 # does this by assuming the a filenode belongs to the changenode
1525 # the first manifest that references it belongs to.
1525 # the first manifest that references it belongs to.
1526 def collect_msng_filenodes(mnfstnode):
1526 def collect_msng_filenodes(mnfstnode):
1527 r = mnfst.rev(mnfstnode)
1527 r = mnfst.rev(mnfstnode)
1528 if r == next_rev[0]:
1528 if r == next_rev[0]:
1529 # If the last rev we looked at was the one just previous,
1529 # If the last rev we looked at was the one just previous,
1530 # we only need to see a diff.
1530 # we only need to see a diff.
1531 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1531 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1532 # For each line in the delta
1532 # For each line in the delta
1533 for dline in delta.splitlines():
1533 for dline in delta.splitlines():
1534 # get the filename and filenode for that line
1534 # get the filename and filenode for that line
1535 f, fnode = dline.split('\0')
1535 f, fnode = dline.split('\0')
1536 fnode = bin(fnode[:40])
1536 fnode = bin(fnode[:40])
1537 f = changedfiles.get(f, None)
1537 f = changedfiles.get(f, None)
1538 # And if the file is in the list of files we care
1538 # And if the file is in the list of files we care
1539 # about.
1539 # about.
1540 if f is not None:
1540 if f is not None:
1541 # Get the changenode this manifest belongs to
1541 # Get the changenode this manifest belongs to
1542 clnode = msng_mnfst_set[mnfstnode]
1542 clnode = msng_mnfst_set[mnfstnode]
1543 # Create the set of filenodes for the file if
1543 # Create the set of filenodes for the file if
1544 # there isn't one already.
1544 # there isn't one already.
1545 ndset = msng_filenode_set.setdefault(f, {})
1545 ndset = msng_filenode_set.setdefault(f, {})
1546 # And set the filenode's changelog node to the
1546 # And set the filenode's changelog node to the
1547 # manifest's if it hasn't been set already.
1547 # manifest's if it hasn't been set already.
1548 ndset.setdefault(fnode, clnode)
1548 ndset.setdefault(fnode, clnode)
1549 else:
1549 else:
1550 # Otherwise we need a full manifest.
1550 # Otherwise we need a full manifest.
1551 m = mnfst.read(mnfstnode)
1551 m = mnfst.read(mnfstnode)
1552 # For every file in we care about.
1552 # For every file in we care about.
1553 for f in changedfiles:
1553 for f in changedfiles:
1554 fnode = m.get(f, None)
1554 fnode = m.get(f, None)
1555 # If it's in the manifest
1555 # If it's in the manifest
1556 if fnode is not None:
1556 if fnode is not None:
1557 # See comments above.
1557 # See comments above.
1558 clnode = msng_mnfst_set[mnfstnode]
1558 clnode = msng_mnfst_set[mnfstnode]
1559 ndset = msng_filenode_set.setdefault(f, {})
1559 ndset = msng_filenode_set.setdefault(f, {})
1560 ndset.setdefault(fnode, clnode)
1560 ndset.setdefault(fnode, clnode)
1561 # Remember the revision we hope to see next.
1561 # Remember the revision we hope to see next.
1562 next_rev[0] = r + 1
1562 next_rev[0] = r + 1
1563 return collect_msng_filenodes
1563 return collect_msng_filenodes
1564
1564
1565 # We have a list of filenodes we think we need for a file, lets remove
1565 # We have a list of filenodes we think we need for a file, lets remove
1566 # all those we now the recipient must have.
1566 # all those we now the recipient must have.
1567 def prune_filenodes(f, filerevlog):
1567 def prune_filenodes(f, filerevlog):
1568 msngset = msng_filenode_set[f]
1568 msngset = msng_filenode_set[f]
1569 hasset = {}
1569 hasset = {}
1570 # If a 'missing' filenode thinks it belongs to a changenode we
1570 # If a 'missing' filenode thinks it belongs to a changenode we
1571 # assume the recipient must have, then the recipient must have
1571 # assume the recipient must have, then the recipient must have
1572 # that filenode.
1572 # that filenode.
1573 for n in msngset:
1573 for n in msngset:
1574 clnode = cl.node(filerevlog.linkrev(n))
1574 clnode = cl.node(filerevlog.linkrev(n))
1575 if clnode in has_cl_set:
1575 if clnode in has_cl_set:
1576 hasset[n] = 1
1576 hasset[n] = 1
1577 prune_parents(filerevlog, hasset, msngset)
1577 prune_parents(filerevlog, hasset, msngset)
1578
1578
1579 # A function generator function that sets up the a context for the
1579 # A function generator function that sets up the a context for the
1580 # inner function.
1580 # inner function.
1581 def lookup_filenode_link_func(fname):
1581 def lookup_filenode_link_func(fname):
1582 msngset = msng_filenode_set[fname]
1582 msngset = msng_filenode_set[fname]
1583 # Lookup the changenode the filenode belongs to.
1583 # Lookup the changenode the filenode belongs to.
1584 def lookup_filenode_link(fnode):
1584 def lookup_filenode_link(fnode):
1585 return msngset[fnode]
1585 return msngset[fnode]
1586 return lookup_filenode_link
1586 return lookup_filenode_link
1587
1587
1588 # Now that we have all theses utility functions to help out and
1588 # Now that we have all theses utility functions to help out and
1589 # logically divide up the task, generate the group.
1589 # logically divide up the task, generate the group.
1590 def gengroup():
1590 def gengroup():
1591 # The set of changed files starts empty.
1591 # The set of changed files starts empty.
1592 changedfiles = {}
1592 changedfiles = {}
1593 # Create a changenode group generator that will call our functions
1593 # Create a changenode group generator that will call our functions
1594 # back to lookup the owning changenode and collect information.
1594 # back to lookup the owning changenode and collect information.
1595 group = cl.group(msng_cl_lst, identity,
1595 group = cl.group(msng_cl_lst, identity,
1596 manifest_and_file_collector(changedfiles))
1596 manifest_and_file_collector(changedfiles))
1597 for chnk in group:
1597 for chnk in group:
1598 yield chnk
1598 yield chnk
1599
1599
1600 # The list of manifests has been collected by the generator
1600 # The list of manifests has been collected by the generator
1601 # calling our functions back.
1601 # calling our functions back.
1602 prune_manifests()
1602 prune_manifests()
1603 msng_mnfst_lst = msng_mnfst_set.keys()
1603 msng_mnfst_lst = msng_mnfst_set.keys()
1604 # Sort the manifestnodes by revision number.
1604 # Sort the manifestnodes by revision number.
1605 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1605 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1606 # Create a generator for the manifestnodes that calls our lookup
1606 # Create a generator for the manifestnodes that calls our lookup
1607 # and data collection functions back.
1607 # and data collection functions back.
1608 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1608 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1609 filenode_collector(changedfiles))
1609 filenode_collector(changedfiles))
1610 for chnk in group:
1610 for chnk in group:
1611 yield chnk
1611 yield chnk
1612
1612
1613 # These are no longer needed, dereference and toss the memory for
1613 # These are no longer needed, dereference and toss the memory for
1614 # them.
1614 # them.
1615 msng_mnfst_lst = None
1615 msng_mnfst_lst = None
1616 msng_mnfst_set.clear()
1616 msng_mnfst_set.clear()
1617
1617
1618 changedfiles = changedfiles.keys()
1618 changedfiles = changedfiles.keys()
1619 changedfiles.sort()
1619 changedfiles.sort()
1620 # Go through all our files in order sorted by name.
1620 # Go through all our files in order sorted by name.
1621 for fname in changedfiles:
1621 for fname in changedfiles:
1622 filerevlog = self.file(fname)
1622 filerevlog = self.file(fname)
1623 # Toss out the filenodes that the recipient isn't really
1623 # Toss out the filenodes that the recipient isn't really
1624 # missing.
1624 # missing.
1625 if msng_filenode_set.has_key(fname):
1625 if msng_filenode_set.has_key(fname):
1626 prune_filenodes(fname, filerevlog)
1626 prune_filenodes(fname, filerevlog)
1627 msng_filenode_lst = msng_filenode_set[fname].keys()
1627 msng_filenode_lst = msng_filenode_set[fname].keys()
1628 else:
1628 else:
1629 msng_filenode_lst = []
1629 msng_filenode_lst = []
1630 # If any filenodes are left, generate the group for them,
1630 # If any filenodes are left, generate the group for them,
1631 # otherwise don't bother.
1631 # otherwise don't bother.
1632 if len(msng_filenode_lst) > 0:
1632 if len(msng_filenode_lst) > 0:
1633 yield changegroup.genchunk(fname)
1633 yield changegroup.genchunk(fname)
1634 # Sort the filenodes by their revision #
1634 # Sort the filenodes by their revision #
1635 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1635 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1636 # Create a group generator and only pass in a changenode
1636 # Create a group generator and only pass in a changenode
1637 # lookup function as we need to collect no information
1637 # lookup function as we need to collect no information
1638 # from filenodes.
1638 # from filenodes.
1639 group = filerevlog.group(msng_filenode_lst,
1639 group = filerevlog.group(msng_filenode_lst,
1640 lookup_filenode_link_func(fname))
1640 lookup_filenode_link_func(fname))
1641 for chnk in group:
1641 for chnk in group:
1642 yield chnk
1642 yield chnk
1643 if msng_filenode_set.has_key(fname):
1643 if msng_filenode_set.has_key(fname):
1644 # Don't need this anymore, toss it to free memory.
1644 # Don't need this anymore, toss it to free memory.
1645 del msng_filenode_set[fname]
1645 del msng_filenode_set[fname]
1646 # Signal that no more groups are left.
1646 # Signal that no more groups are left.
1647 yield changegroup.closechunk()
1647 yield changegroup.closechunk()
1648
1648
1649 if msng_cl_lst:
1649 if msng_cl_lst:
1650 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1650 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1651
1651
1652 return util.chunkbuffer(gengroup())
1652 return util.chunkbuffer(gengroup())
1653
1653
1654 def changegroup(self, basenodes, source):
1654 def changegroup(self, basenodes, source):
1655 """Generate a changegroup of all nodes that we have that a recipient
1655 """Generate a changegroup of all nodes that we have that a recipient
1656 doesn't.
1656 doesn't.
1657
1657
1658 This is much easier than the previous function as we can assume that
1658 This is much easier than the previous function as we can assume that
1659 the recipient has any changenode we aren't sending them."""
1659 the recipient has any changenode we aren't sending them."""
1660
1660
1661 self.hook('preoutgoing', throw=True, source=source)
1661 self.hook('preoutgoing', throw=True, source=source)
1662
1662
1663 cl = self.changelog
1663 cl = self.changelog
1664 nodes = cl.nodesbetween(basenodes, None)[0]
1664 nodes = cl.nodesbetween(basenodes, None)[0]
1665 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1665 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1666 self.changegroupinfo(nodes)
1666 self.changegroupinfo(nodes)
1667
1667
1668 def identity(x):
1668 def identity(x):
1669 return x
1669 return x
1670
1670
1671 def gennodelst(revlog):
1671 def gennodelst(revlog):
1672 for r in xrange(0, revlog.count()):
1672 for r in xrange(0, revlog.count()):
1673 n = revlog.node(r)
1673 n = revlog.node(r)
1674 if revlog.linkrev(n) in revset:
1674 if revlog.linkrev(n) in revset:
1675 yield n
1675 yield n
1676
1676
1677 def changed_file_collector(changedfileset):
1677 def changed_file_collector(changedfileset):
1678 def collect_changed_files(clnode):
1678 def collect_changed_files(clnode):
1679 c = cl.read(clnode)
1679 c = cl.read(clnode)
1680 for fname in c[3]:
1680 for fname in c[3]:
1681 changedfileset[fname] = 1
1681 changedfileset[fname] = 1
1682 return collect_changed_files
1682 return collect_changed_files
1683
1683
1684 def lookuprevlink_func(revlog):
1684 def lookuprevlink_func(revlog):
1685 def lookuprevlink(n):
1685 def lookuprevlink(n):
1686 return cl.node(revlog.linkrev(n))
1686 return cl.node(revlog.linkrev(n))
1687 return lookuprevlink
1687 return lookuprevlink
1688
1688
1689 def gengroup():
1689 def gengroup():
1690 # construct a list of all changed files
1690 # construct a list of all changed files
1691 changedfiles = {}
1691 changedfiles = {}
1692
1692
1693 for chnk in cl.group(nodes, identity,
1693 for chnk in cl.group(nodes, identity,
1694 changed_file_collector(changedfiles)):
1694 changed_file_collector(changedfiles)):
1695 yield chnk
1695 yield chnk
1696 changedfiles = changedfiles.keys()
1696 changedfiles = changedfiles.keys()
1697 changedfiles.sort()
1697 changedfiles.sort()
1698
1698
1699 mnfst = self.manifest
1699 mnfst = self.manifest
1700 nodeiter = gennodelst(mnfst)
1700 nodeiter = gennodelst(mnfst)
1701 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1701 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1702 yield chnk
1702 yield chnk
1703
1703
1704 for fname in changedfiles:
1704 for fname in changedfiles:
1705 filerevlog = self.file(fname)
1705 filerevlog = self.file(fname)
1706 nodeiter = gennodelst(filerevlog)
1706 nodeiter = gennodelst(filerevlog)
1707 nodeiter = list(nodeiter)
1707 nodeiter = list(nodeiter)
1708 if nodeiter:
1708 if nodeiter:
1709 yield changegroup.genchunk(fname)
1709 yield changegroup.genchunk(fname)
1710 lookup = lookuprevlink_func(filerevlog)
1710 lookup = lookuprevlink_func(filerevlog)
1711 for chnk in filerevlog.group(nodeiter, lookup):
1711 for chnk in filerevlog.group(nodeiter, lookup):
1712 yield chnk
1712 yield chnk
1713
1713
1714 yield changegroup.closechunk()
1714 yield changegroup.closechunk()
1715
1715
1716 if nodes:
1716 if nodes:
1717 self.hook('outgoing', node=hex(nodes[0]), source=source)
1717 self.hook('outgoing', node=hex(nodes[0]), source=source)
1718
1718
1719 return util.chunkbuffer(gengroup())
1719 return util.chunkbuffer(gengroup())
1720
1720
1721 def addchangegroup(self, source, srctype, url):
1721 def addchangegroup(self, source, srctype, url):
1722 """add changegroup to repo.
1722 """add changegroup to repo.
1723 returns number of heads modified or added + 1."""
1723 returns number of heads modified or added + 1."""
1724
1724
1725 def csmap(x):
1725 def csmap(x):
1726 self.ui.debug(_("add changeset %s\n") % short(x))
1726 self.ui.debug(_("add changeset %s\n") % short(x))
1727 return cl.count()
1727 return cl.count()
1728
1728
1729 def revmap(x):
1729 def revmap(x):
1730 return cl.rev(x)
1730 return cl.rev(x)
1731
1731
1732 if not source:
1732 if not source:
1733 return 0
1733 return 0
1734
1734
1735 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1735 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1736
1736
1737 changesets = files = revisions = 0
1737 changesets = files = revisions = 0
1738
1738
1739 tr = self.transaction()
1739 tr = self.transaction()
1740
1740
1741 # write changelog data to temp files so concurrent readers will not see
1741 # write changelog data to temp files so concurrent readers will not see
1742 # inconsistent view
1742 # inconsistent view
1743 cl = None
1743 cl = None
1744 try:
1744 try:
1745 cl = appendfile.appendchangelog(self.sopener,
1745 cl = appendfile.appendchangelog(self.sopener,
1746 self.changelog.version)
1746 self.changelog.version)
1747
1747
1748 oldheads = len(cl.heads())
1748 oldheads = len(cl.heads())
1749
1749
1750 # pull off the changeset group
1750 # pull off the changeset group
1751 self.ui.status(_("adding changesets\n"))
1751 self.ui.status(_("adding changesets\n"))
1752 cor = cl.count() - 1
1752 cor = cl.count() - 1
1753 chunkiter = changegroup.chunkiter(source)
1753 chunkiter = changegroup.chunkiter(source)
1754 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1754 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1755 raise util.Abort(_("received changelog group is empty"))
1755 raise util.Abort(_("received changelog group is empty"))
1756 cnr = cl.count() - 1
1756 cnr = cl.count() - 1
1757 changesets = cnr - cor
1757 changesets = cnr - cor
1758
1758
1759 # pull off the manifest group
1759 # pull off the manifest group
1760 self.ui.status(_("adding manifests\n"))
1760 self.ui.status(_("adding manifests\n"))
1761 chunkiter = changegroup.chunkiter(source)
1761 chunkiter = changegroup.chunkiter(source)
1762 # no need to check for empty manifest group here:
1762 # no need to check for empty manifest group here:
1763 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1763 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1764 # no new manifest will be created and the manifest group will
1764 # no new manifest will be created and the manifest group will
1765 # be empty during the pull
1765 # be empty during the pull
1766 self.manifest.addgroup(chunkiter, revmap, tr)
1766 self.manifest.addgroup(chunkiter, revmap, tr)
1767
1767
1768 # process the files
1768 # process the files
1769 self.ui.status(_("adding file changes\n"))
1769 self.ui.status(_("adding file changes\n"))
1770 while 1:
1770 while 1:
1771 f = changegroup.getchunk(source)
1771 f = changegroup.getchunk(source)
1772 if not f:
1772 if not f:
1773 break
1773 break
1774 self.ui.debug(_("adding %s revisions\n") % f)
1774 self.ui.debug(_("adding %s revisions\n") % f)
1775 fl = self.file(f)
1775 fl = self.file(f)
1776 o = fl.count()
1776 o = fl.count()
1777 chunkiter = changegroup.chunkiter(source)
1777 chunkiter = changegroup.chunkiter(source)
1778 if fl.addgroup(chunkiter, revmap, tr) is None:
1778 if fl.addgroup(chunkiter, revmap, tr) is None:
1779 raise util.Abort(_("received file revlog group is empty"))
1779 raise util.Abort(_("received file revlog group is empty"))
1780 revisions += fl.count() - o
1780 revisions += fl.count() - o
1781 files += 1
1781 files += 1
1782
1782
1783 cl.writedata()
1783 cl.writedata()
1784 finally:
1784 finally:
1785 if cl:
1785 if cl:
1786 cl.cleanup()
1786 cl.cleanup()
1787
1787
1788 # make changelog see real files again
1788 # make changelog see real files again
1789 self.changelog = changelog.changelog(self.sopener,
1789 self.changelog = changelog.changelog(self.sopener,
1790 self.changelog.version)
1790 self.changelog.version)
1791 self.changelog.checkinlinesize(tr)
1791 self.changelog.checkinlinesize(tr)
1792
1792
1793 newheads = len(self.changelog.heads())
1793 newheads = len(self.changelog.heads())
1794 heads = ""
1794 heads = ""
1795 if oldheads and newheads != oldheads:
1795 if oldheads and newheads != oldheads:
1796 heads = _(" (%+d heads)") % (newheads - oldheads)
1796 heads = _(" (%+d heads)") % (newheads - oldheads)
1797
1797
1798 self.ui.status(_("added %d changesets"
1798 self.ui.status(_("added %d changesets"
1799 " with %d changes to %d files%s\n")
1799 " with %d changes to %d files%s\n")
1800 % (changesets, revisions, files, heads))
1800 % (changesets, revisions, files, heads))
1801
1801
1802 if changesets > 0:
1802 if changesets > 0:
1803 self.hook('pretxnchangegroup', throw=True,
1803 self.hook('pretxnchangegroup', throw=True,
1804 node=hex(self.changelog.node(cor+1)), source=srctype,
1804 node=hex(self.changelog.node(cor+1)), source=srctype,
1805 url=url)
1805 url=url)
1806
1806
1807 tr.close()
1807 tr.close()
1808
1808
1809 if changesets > 0:
1809 if changesets > 0:
1810 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1810 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1811 source=srctype, url=url)
1811 source=srctype, url=url)
1812
1812
1813 for i in xrange(cor + 1, cnr + 1):
1813 for i in xrange(cor + 1, cnr + 1):
1814 self.hook("incoming", node=hex(self.changelog.node(i)),
1814 self.hook("incoming", node=hex(self.changelog.node(i)),
1815 source=srctype, url=url)
1815 source=srctype, url=url)
1816
1816
1817 return newheads - oldheads + 1
1817 return newheads - oldheads + 1
1818
1818
1819
1819
1820 def stream_in(self, remote):
1820 def stream_in(self, remote):
1821 fp = remote.stream_out()
1821 fp = remote.stream_out()
1822 l = fp.readline()
1822 l = fp.readline()
1823 try:
1823 try:
1824 resp = int(l)
1824 resp = int(l)
1825 except ValueError:
1825 except ValueError:
1826 raise util.UnexpectedOutput(
1826 raise util.UnexpectedOutput(
1827 _('Unexpected response from remote server:'), l)
1827 _('Unexpected response from remote server:'), l)
1828 if resp == 1:
1828 if resp == 1:
1829 raise util.Abort(_('operation forbidden by server'))
1829 raise util.Abort(_('operation forbidden by server'))
1830 elif resp == 2:
1830 elif resp == 2:
1831 raise util.Abort(_('locking the remote repository failed'))
1831 raise util.Abort(_('locking the remote repository failed'))
1832 elif resp != 0:
1832 elif resp != 0:
1833 raise util.Abort(_('the server sent an unknown error code'))
1833 raise util.Abort(_('the server sent an unknown error code'))
1834 self.ui.status(_('streaming all changes\n'))
1834 self.ui.status(_('streaming all changes\n'))
1835 l = fp.readline()
1835 l = fp.readline()
1836 try:
1836 try:
1837 total_files, total_bytes = map(int, l.split(' ', 1))
1837 total_files, total_bytes = map(int, l.split(' ', 1))
1838 except ValueError, TypeError:
1838 except ValueError, TypeError:
1839 raise util.UnexpectedOutput(
1839 raise util.UnexpectedOutput(
1840 _('Unexpected response from remote server:'), l)
1840 _('Unexpected response from remote server:'), l)
1841 self.ui.status(_('%d files to transfer, %s of data\n') %
1841 self.ui.status(_('%d files to transfer, %s of data\n') %
1842 (total_files, util.bytecount(total_bytes)))
1842 (total_files, util.bytecount(total_bytes)))
1843 start = time.time()
1843 start = time.time()
1844 for i in xrange(total_files):
1844 for i in xrange(total_files):
1845 l = fp.readline()
1845 l = fp.readline()
1846 try:
1846 try:
1847 name, size = l.split('\0', 1)
1847 name, size = l.split('\0', 1)
1848 size = int(size)
1848 size = int(size)
1849 except ValueError, TypeError:
1849 except ValueError, TypeError:
1850 raise util.UnexpectedOutput(
1850 raise util.UnexpectedOutput(
1851 _('Unexpected response from remote server:'), l)
1851 _('Unexpected response from remote server:'), l)
1852 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1852 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1853 ofp = self.sopener(name, 'w')
1853 ofp = self.sopener(name, 'w')
1854 for chunk in util.filechunkiter(fp, limit=size):
1854 for chunk in util.filechunkiter(fp, limit=size):
1855 ofp.write(chunk)
1855 ofp.write(chunk)
1856 ofp.close()
1856 ofp.close()
1857 elapsed = time.time() - start
1857 elapsed = time.time() - start
1858 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1858 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1859 (util.bytecount(total_bytes), elapsed,
1859 (util.bytecount(total_bytes), elapsed,
1860 util.bytecount(total_bytes / elapsed)))
1860 util.bytecount(total_bytes / elapsed)))
1861 self.reload()
1861 self.reload()
1862 return len(self.heads()) + 1
1862 return len(self.heads()) + 1
1863
1863
1864 def clone(self, remote, heads=[], stream=False):
1864 def clone(self, remote, heads=[], stream=False):
1865 '''clone remote repository.
1865 '''clone remote repository.
1866
1866
1867 keyword arguments:
1867 keyword arguments:
1868 heads: list of revs to clone (forces use of pull)
1868 heads: list of revs to clone (forces use of pull)
1869 stream: use streaming clone if possible'''
1869 stream: use streaming clone if possible'''
1870
1870
1871 # now, all clients that can request uncompressed clones can
1871 # now, all clients that can request uncompressed clones can
1872 # read repo formats supported by all servers that can serve
1872 # read repo formats supported by all servers that can serve
1873 # them.
1873 # them.
1874
1874
1875 # if revlog format changes, client will have to check version
1875 # if revlog format changes, client will have to check version
1876 # and format flags on "stream" capability, and use
1876 # and format flags on "stream" capability, and use
1877 # uncompressed only if compatible.
1877 # uncompressed only if compatible.
1878
1878
1879 if stream and not heads and remote.capable('stream'):
1879 if stream and not heads and remote.capable('stream'):
1880 return self.stream_in(remote)
1880 return self.stream_in(remote)
1881 return self.pull(remote, heads)
1881 return self.pull(remote, heads)
1882
1882
1883 # used to avoid circular references so destructors work
1883 # used to avoid circular references so destructors work
1884 def aftertrans(base):
1884 def aftertrans(base):
1885 p = base
1885 p = base
1886 def a():
1886 def a():
1887 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1887 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1888 util.rename(os.path.join(p, "journal.dirstate"),
1888 util.rename(os.path.join(p, "journal.dirstate"),
1889 os.path.join(p, "undo.dirstate"))
1889 os.path.join(p, "undo.dirstate"))
1890 return a
1890 return a
1891
1891
1892 def instance(ui, path, create):
1892 def instance(ui, path, create):
1893 return localrepository(ui, util.drop_scheme('file', path), create)
1893 return localrepository(ui, util.drop_scheme('file', path), create)
1894
1894
1895 def islocal(path):
1895 def islocal(path):
1896 return True
1896 return True
General Comments 0
You need to be logged in to leave comments. Login now