##// END OF EJS Templates
Ignore all errors while parsing the branch cache.
Alexis S. L. Carvalho -
r3761:9433bdca default
parent child Browse files
Show More
@@ -1,1899 +1,1903
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import gettext as _
9 from i18n import gettext as _
10 from demandload import *
10 from demandload import *
11 import repo
11 import repo
12 demandload(globals(), "appendfile changegroup")
12 demandload(globals(), "appendfile changegroup")
13 demandload(globals(), "changelog dirstate filelog manifest context")
13 demandload(globals(), "changelog dirstate filelog manifest context")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 demandload(globals(), "os revlog time util")
15 demandload(globals(), "os revlog time util")
16
16
17 class localrepository(repo.repository):
17 class localrepository(repo.repository):
18 capabilities = ('lookup', 'changegroupsubset')
18 capabilities = ('lookup', 'changegroupsubset')
19
19
20 def __del__(self):
20 def __del__(self):
21 self.transhandle = None
21 self.transhandle = None
22 def __init__(self, parentui, path=None, create=0):
22 def __init__(self, parentui, path=None, create=0):
23 repo.repository.__init__(self)
23 repo.repository.__init__(self)
24 if not path:
24 if not path:
25 p = os.getcwd()
25 p = os.getcwd()
26 while not os.path.isdir(os.path.join(p, ".hg")):
26 while not os.path.isdir(os.path.join(p, ".hg")):
27 oldp = p
27 oldp = p
28 p = os.path.dirname(p)
28 p = os.path.dirname(p)
29 if p == oldp:
29 if p == oldp:
30 raise repo.RepoError(_("There is no Mercurial repository"
30 raise repo.RepoError(_("There is no Mercurial repository"
31 " here (.hg not found)"))
31 " here (.hg not found)"))
32 path = p
32 path = p
33 self.path = os.path.join(path, ".hg")
33 self.path = os.path.join(path, ".hg")
34
34
35 if not os.path.isdir(self.path):
35 if not os.path.isdir(self.path):
36 if create:
36 if create:
37 if not os.path.exists(path):
37 if not os.path.exists(path):
38 os.mkdir(path)
38 os.mkdir(path)
39 os.mkdir(self.path)
39 os.mkdir(self.path)
40 else:
40 else:
41 raise repo.RepoError(_("repository %s not found") % path)
41 raise repo.RepoError(_("repository %s not found") % path)
42 elif create:
42 elif create:
43 raise repo.RepoError(_("repository %s already exists") % path)
43 raise repo.RepoError(_("repository %s already exists") % path)
44
44
45 self.root = os.path.realpath(path)
45 self.root = os.path.realpath(path)
46 self.origroot = path
46 self.origroot = path
47 self.ui = ui.ui(parentui=parentui)
47 self.ui = ui.ui(parentui=parentui)
48 self.opener = util.opener(self.path)
48 self.opener = util.opener(self.path)
49 self.sopener = util.opener(self.path)
49 self.sopener = util.opener(self.path)
50 self.wopener = util.opener(self.root)
50 self.wopener = util.opener(self.root)
51
51
52 try:
52 try:
53 self.ui.readconfig(self.join("hgrc"), self.root)
53 self.ui.readconfig(self.join("hgrc"), self.root)
54 except IOError:
54 except IOError:
55 pass
55 pass
56
56
57 v = self.ui.configrevlog()
57 v = self.ui.configrevlog()
58 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
58 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
59 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
59 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
60 fl = v.get('flags', None)
60 fl = v.get('flags', None)
61 flags = 0
61 flags = 0
62 if fl != None:
62 if fl != None:
63 for x in fl.split():
63 for x in fl.split():
64 flags |= revlog.flagstr(x)
64 flags |= revlog.flagstr(x)
65 elif self.revlogv1:
65 elif self.revlogv1:
66 flags = revlog.REVLOG_DEFAULT_FLAGS
66 flags = revlog.REVLOG_DEFAULT_FLAGS
67
67
68 v = self.revlogversion | flags
68 v = self.revlogversion | flags
69 self.manifest = manifest.manifest(self.sopener, v)
69 self.manifest = manifest.manifest(self.sopener, v)
70 self.changelog = changelog.changelog(self.sopener, v)
70 self.changelog = changelog.changelog(self.sopener, v)
71
71
72 # the changelog might not have the inline index flag
72 # the changelog might not have the inline index flag
73 # on. If the format of the changelog is the same as found in
73 # on. If the format of the changelog is the same as found in
74 # .hgrc, apply any flags found in the .hgrc as well.
74 # .hgrc, apply any flags found in the .hgrc as well.
75 # Otherwise, just version from the changelog
75 # Otherwise, just version from the changelog
76 v = self.changelog.version
76 v = self.changelog.version
77 if v == self.revlogversion:
77 if v == self.revlogversion:
78 v |= flags
78 v |= flags
79 self.revlogversion = v
79 self.revlogversion = v
80
80
81 self.tagscache = None
81 self.tagscache = None
82 self.branchcache = None
82 self.branchcache = None
83 self.nodetagscache = None
83 self.nodetagscache = None
84 self.encodepats = None
84 self.encodepats = None
85 self.decodepats = None
85 self.decodepats = None
86 self.transhandle = None
86 self.transhandle = None
87
87
88 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
88 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
89
89
90 def url(self):
90 def url(self):
91 return 'file:' + self.root
91 return 'file:' + self.root
92
92
93 def hook(self, name, throw=False, **args):
93 def hook(self, name, throw=False, **args):
94 def callhook(hname, funcname):
94 def callhook(hname, funcname):
95 '''call python hook. hook is callable object, looked up as
95 '''call python hook. hook is callable object, looked up as
96 name in python module. if callable returns "true", hook
96 name in python module. if callable returns "true", hook
97 fails, else passes. if hook raises exception, treated as
97 fails, else passes. if hook raises exception, treated as
98 hook failure. exception propagates if throw is "true".
98 hook failure. exception propagates if throw is "true".
99
99
100 reason for "true" meaning "hook failed" is so that
100 reason for "true" meaning "hook failed" is so that
101 unmodified commands (e.g. mercurial.commands.update) can
101 unmodified commands (e.g. mercurial.commands.update) can
102 be run as hooks without wrappers to convert return values.'''
102 be run as hooks without wrappers to convert return values.'''
103
103
104 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
104 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
105 d = funcname.rfind('.')
105 d = funcname.rfind('.')
106 if d == -1:
106 if d == -1:
107 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
107 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
108 % (hname, funcname))
108 % (hname, funcname))
109 modname = funcname[:d]
109 modname = funcname[:d]
110 try:
110 try:
111 obj = __import__(modname)
111 obj = __import__(modname)
112 except ImportError:
112 except ImportError:
113 try:
113 try:
114 # extensions are loaded with hgext_ prefix
114 # extensions are loaded with hgext_ prefix
115 obj = __import__("hgext_%s" % modname)
115 obj = __import__("hgext_%s" % modname)
116 except ImportError:
116 except ImportError:
117 raise util.Abort(_('%s hook is invalid '
117 raise util.Abort(_('%s hook is invalid '
118 '(import of "%s" failed)') %
118 '(import of "%s" failed)') %
119 (hname, modname))
119 (hname, modname))
120 try:
120 try:
121 for p in funcname.split('.')[1:]:
121 for p in funcname.split('.')[1:]:
122 obj = getattr(obj, p)
122 obj = getattr(obj, p)
123 except AttributeError, err:
123 except AttributeError, err:
124 raise util.Abort(_('%s hook is invalid '
124 raise util.Abort(_('%s hook is invalid '
125 '("%s" is not defined)') %
125 '("%s" is not defined)') %
126 (hname, funcname))
126 (hname, funcname))
127 if not callable(obj):
127 if not callable(obj):
128 raise util.Abort(_('%s hook is invalid '
128 raise util.Abort(_('%s hook is invalid '
129 '("%s" is not callable)') %
129 '("%s" is not callable)') %
130 (hname, funcname))
130 (hname, funcname))
131 try:
131 try:
132 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
132 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
133 except (KeyboardInterrupt, util.SignalInterrupt):
133 except (KeyboardInterrupt, util.SignalInterrupt):
134 raise
134 raise
135 except Exception, exc:
135 except Exception, exc:
136 if isinstance(exc, util.Abort):
136 if isinstance(exc, util.Abort):
137 self.ui.warn(_('error: %s hook failed: %s\n') %
137 self.ui.warn(_('error: %s hook failed: %s\n') %
138 (hname, exc.args[0]))
138 (hname, exc.args[0]))
139 else:
139 else:
140 self.ui.warn(_('error: %s hook raised an exception: '
140 self.ui.warn(_('error: %s hook raised an exception: '
141 '%s\n') % (hname, exc))
141 '%s\n') % (hname, exc))
142 if throw:
142 if throw:
143 raise
143 raise
144 self.ui.print_exc()
144 self.ui.print_exc()
145 return True
145 return True
146 if r:
146 if r:
147 if throw:
147 if throw:
148 raise util.Abort(_('%s hook failed') % hname)
148 raise util.Abort(_('%s hook failed') % hname)
149 self.ui.warn(_('warning: %s hook failed\n') % hname)
149 self.ui.warn(_('warning: %s hook failed\n') % hname)
150 return r
150 return r
151
151
152 def runhook(name, cmd):
152 def runhook(name, cmd):
153 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
153 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
154 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
154 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
155 r = util.system(cmd, environ=env, cwd=self.root)
155 r = util.system(cmd, environ=env, cwd=self.root)
156 if r:
156 if r:
157 desc, r = util.explain_exit(r)
157 desc, r = util.explain_exit(r)
158 if throw:
158 if throw:
159 raise util.Abort(_('%s hook %s') % (name, desc))
159 raise util.Abort(_('%s hook %s') % (name, desc))
160 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
160 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
161 return r
161 return r
162
162
163 r = False
163 r = False
164 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
164 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
165 if hname.split(".", 1)[0] == name and cmd]
165 if hname.split(".", 1)[0] == name and cmd]
166 hooks.sort()
166 hooks.sort()
167 for hname, cmd in hooks:
167 for hname, cmd in hooks:
168 if cmd.startswith('python:'):
168 if cmd.startswith('python:'):
169 r = callhook(hname, cmd[7:].strip()) or r
169 r = callhook(hname, cmd[7:].strip()) or r
170 else:
170 else:
171 r = runhook(hname, cmd) or r
171 r = runhook(hname, cmd) or r
172 return r
172 return r
173
173
174 tag_disallowed = ':\r\n'
174 tag_disallowed = ':\r\n'
175
175
176 def tag(self, name, node, message, local, user, date):
176 def tag(self, name, node, message, local, user, date):
177 '''tag a revision with a symbolic name.
177 '''tag a revision with a symbolic name.
178
178
179 if local is True, the tag is stored in a per-repository file.
179 if local is True, the tag is stored in a per-repository file.
180 otherwise, it is stored in the .hgtags file, and a new
180 otherwise, it is stored in the .hgtags file, and a new
181 changeset is committed with the change.
181 changeset is committed with the change.
182
182
183 keyword arguments:
183 keyword arguments:
184
184
185 local: whether to store tag in non-version-controlled file
185 local: whether to store tag in non-version-controlled file
186 (default False)
186 (default False)
187
187
188 message: commit message to use if committing
188 message: commit message to use if committing
189
189
190 user: name of user to use if committing
190 user: name of user to use if committing
191
191
192 date: date tuple to use if committing'''
192 date: date tuple to use if committing'''
193
193
194 for c in self.tag_disallowed:
194 for c in self.tag_disallowed:
195 if c in name:
195 if c in name:
196 raise util.Abort(_('%r cannot be used in a tag name') % c)
196 raise util.Abort(_('%r cannot be used in a tag name') % c)
197
197
198 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
198 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
199
199
200 if local:
200 if local:
201 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
201 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
202 self.hook('tag', node=hex(node), tag=name, local=local)
202 self.hook('tag', node=hex(node), tag=name, local=local)
203 return
203 return
204
204
205 for x in self.status()[:5]:
205 for x in self.status()[:5]:
206 if '.hgtags' in x:
206 if '.hgtags' in x:
207 raise util.Abort(_('working copy of .hgtags is changed '
207 raise util.Abort(_('working copy of .hgtags is changed '
208 '(please commit .hgtags manually)'))
208 '(please commit .hgtags manually)'))
209
209
210 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
210 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
211 if self.dirstate.state('.hgtags') == '?':
211 if self.dirstate.state('.hgtags') == '?':
212 self.add(['.hgtags'])
212 self.add(['.hgtags'])
213
213
214 self.commit(['.hgtags'], message, user, date)
214 self.commit(['.hgtags'], message, user, date)
215 self.hook('tag', node=hex(node), tag=name, local=local)
215 self.hook('tag', node=hex(node), tag=name, local=local)
216
216
217 def tags(self):
217 def tags(self):
218 '''return a mapping of tag to node'''
218 '''return a mapping of tag to node'''
219 if not self.tagscache:
219 if not self.tagscache:
220 self.tagscache = {}
220 self.tagscache = {}
221
221
222 def parsetag(line, context):
222 def parsetag(line, context):
223 if not line:
223 if not line:
224 return
224 return
225 s = l.split(" ", 1)
225 s = l.split(" ", 1)
226 if len(s) != 2:
226 if len(s) != 2:
227 self.ui.warn(_("%s: cannot parse entry\n") % context)
227 self.ui.warn(_("%s: cannot parse entry\n") % context)
228 return
228 return
229 node, key = s
229 node, key = s
230 key = key.strip()
230 key = key.strip()
231 try:
231 try:
232 bin_n = bin(node)
232 bin_n = bin(node)
233 except TypeError:
233 except TypeError:
234 self.ui.warn(_("%s: node '%s' is not well formed\n") %
234 self.ui.warn(_("%s: node '%s' is not well formed\n") %
235 (context, node))
235 (context, node))
236 return
236 return
237 if bin_n not in self.changelog.nodemap:
237 if bin_n not in self.changelog.nodemap:
238 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
238 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
239 (context, key))
239 (context, key))
240 return
240 return
241 self.tagscache[key] = bin_n
241 self.tagscache[key] = bin_n
242
242
243 # read the tags file from each head, ending with the tip,
243 # read the tags file from each head, ending with the tip,
244 # and add each tag found to the map, with "newer" ones
244 # and add each tag found to the map, with "newer" ones
245 # taking precedence
245 # taking precedence
246 f = None
246 f = None
247 for rev, node, fnode in self._hgtagsnodes():
247 for rev, node, fnode in self._hgtagsnodes():
248 f = (f and f.filectx(fnode) or
248 f = (f and f.filectx(fnode) or
249 self.filectx('.hgtags', fileid=fnode))
249 self.filectx('.hgtags', fileid=fnode))
250 count = 0
250 count = 0
251 for l in f.data().splitlines():
251 for l in f.data().splitlines():
252 count += 1
252 count += 1
253 parsetag(l, _("%s, line %d") % (str(f), count))
253 parsetag(l, _("%s, line %d") % (str(f), count))
254
254
255 try:
255 try:
256 f = self.opener("localtags")
256 f = self.opener("localtags")
257 count = 0
257 count = 0
258 for l in f:
258 for l in f:
259 count += 1
259 count += 1
260 parsetag(l, _("localtags, line %d") % count)
260 parsetag(l, _("localtags, line %d") % count)
261 except IOError:
261 except IOError:
262 pass
262 pass
263
263
264 self.tagscache['tip'] = self.changelog.tip()
264 self.tagscache['tip'] = self.changelog.tip()
265
265
266 return self.tagscache
266 return self.tagscache
267
267
268 def _hgtagsnodes(self):
268 def _hgtagsnodes(self):
269 heads = self.heads()
269 heads = self.heads()
270 heads.reverse()
270 heads.reverse()
271 last = {}
271 last = {}
272 ret = []
272 ret = []
273 for node in heads:
273 for node in heads:
274 c = self.changectx(node)
274 c = self.changectx(node)
275 rev = c.rev()
275 rev = c.rev()
276 try:
276 try:
277 fnode = c.filenode('.hgtags')
277 fnode = c.filenode('.hgtags')
278 except repo.LookupError:
278 except repo.LookupError:
279 continue
279 continue
280 ret.append((rev, node, fnode))
280 ret.append((rev, node, fnode))
281 if fnode in last:
281 if fnode in last:
282 ret[last[fnode]] = None
282 ret[last[fnode]] = None
283 last[fnode] = len(ret) - 1
283 last[fnode] = len(ret) - 1
284 return [item for item in ret if item]
284 return [item for item in ret if item]
285
285
286 def tagslist(self):
286 def tagslist(self):
287 '''return a list of tags ordered by revision'''
287 '''return a list of tags ordered by revision'''
288 l = []
288 l = []
289 for t, n in self.tags().items():
289 for t, n in self.tags().items():
290 try:
290 try:
291 r = self.changelog.rev(n)
291 r = self.changelog.rev(n)
292 except:
292 except:
293 r = -2 # sort to the beginning of the list if unknown
293 r = -2 # sort to the beginning of the list if unknown
294 l.append((r, t, n))
294 l.append((r, t, n))
295 l.sort()
295 l.sort()
296 return [(t, n) for r, t, n in l]
296 return [(t, n) for r, t, n in l]
297
297
298 def nodetags(self, node):
298 def nodetags(self, node):
299 '''return the tags associated with a node'''
299 '''return the tags associated with a node'''
300 if not self.nodetagscache:
300 if not self.nodetagscache:
301 self.nodetagscache = {}
301 self.nodetagscache = {}
302 for t, n in self.tags().items():
302 for t, n in self.tags().items():
303 self.nodetagscache.setdefault(n, []).append(t)
303 self.nodetagscache.setdefault(n, []).append(t)
304 return self.nodetagscache.get(node, [])
304 return self.nodetagscache.get(node, [])
305
305
306 def branchtags(self):
306 def branchtags(self):
307 if self.branchcache != None:
307 if self.branchcache != None:
308 return self.branchcache
308 return self.branchcache
309
309
310 self.branchcache = {} # avoid recursion in changectx
310 self.branchcache = {} # avoid recursion in changectx
311
311
312 partial, last, lrev = self._readbranchcache()
312 partial, last, lrev = self._readbranchcache()
313
313
314 tiprev = self.changelog.count() - 1
314 tiprev = self.changelog.count() - 1
315 if lrev != tiprev:
315 if lrev != tiprev:
316 self._updatebranchcache(partial, lrev+1, tiprev+1)
316 self._updatebranchcache(partial, lrev+1, tiprev+1)
317 self._writebranchcache(partial, self.changelog.tip(), tiprev)
317 self._writebranchcache(partial, self.changelog.tip(), tiprev)
318
318
319 self.branchcache = partial
319 self.branchcache = partial
320 return self.branchcache
320 return self.branchcache
321
321
322 def _readbranchcache(self):
322 def _readbranchcache(self):
323 partial = {}
323 partial = {}
324 try:
324 try:
325 f = self.opener("branches.cache")
325 f = self.opener("branches.cache")
326 lines = f.read().split('\n')
326 lines = f.read().split('\n')
327 f.close()
327 f.close()
328 last, lrev = lines.pop(0).rstrip().split(" ", 1)
328 last, lrev = lines.pop(0).rstrip().split(" ", 1)
329 last, lrev = bin(last), int(lrev)
329 last, lrev = bin(last), int(lrev)
330 if (lrev < self.changelog.count() and
330 if not (lrev < self.changelog.count() and
331 self.changelog.node(lrev) == last): # sanity check
331 self.changelog.node(lrev) == last): # sanity check
332 # invalidate the cache
333 raise ValueError('Invalid branch cache: unknown tip')
332 for l in lines:
334 for l in lines:
333 if not l: continue
335 if not l: continue
334 node, label = l.rstrip().split(" ", 1)
336 node, label = l.rstrip().split(" ", 1)
335 partial[label] = bin(node)
337 partial[label] = bin(node)
336 else: # invalidate the cache
338 except (KeyboardInterrupt, util.SignalInterrupt):
337 last, lrev = nullid, nullrev
339 raise
338 except IOError:
340 except Exception, inst:
339 last, lrev = nullid, nullrev
341 if self.ui.debugflag:
342 self.ui.warn(str(inst), '\n')
343 partial, last, lrev = {}, nullid, nullrev
340 return partial, last, lrev
344 return partial, last, lrev
341
345
342 def _writebranchcache(self, branches, tip, tiprev):
346 def _writebranchcache(self, branches, tip, tiprev):
343 try:
347 try:
344 f = self.opener("branches.cache", "w")
348 f = self.opener("branches.cache", "w")
345 f.write("%s %s\n" % (hex(tip), tiprev))
349 f.write("%s %s\n" % (hex(tip), tiprev))
346 for label, node in branches.iteritems():
350 for label, node in branches.iteritems():
347 f.write("%s %s\n" % (hex(node), label))
351 f.write("%s %s\n" % (hex(node), label))
348 except IOError:
352 except IOError:
349 pass
353 pass
350
354
351 def _updatebranchcache(self, partial, start, end):
355 def _updatebranchcache(self, partial, start, end):
352 for r in xrange(start, end):
356 for r in xrange(start, end):
353 c = self.changectx(r)
357 c = self.changectx(r)
354 b = c.branch()
358 b = c.branch()
355 if b:
359 if b:
356 partial[b] = c.node()
360 partial[b] = c.node()
357
361
358 def lookup(self, key):
362 def lookup(self, key):
359 if key == '.':
363 if key == '.':
360 key = self.dirstate.parents()[0]
364 key = self.dirstate.parents()[0]
361 if key == nullid:
365 if key == nullid:
362 raise repo.RepoError(_("no revision checked out"))
366 raise repo.RepoError(_("no revision checked out"))
363 n = self.changelog._match(key)
367 n = self.changelog._match(key)
364 if n:
368 if n:
365 return n
369 return n
366 if key in self.tags():
370 if key in self.tags():
367 return self.tags()[key]
371 return self.tags()[key]
368 if key in self.branchtags():
372 if key in self.branchtags():
369 return self.branchtags()[key]
373 return self.branchtags()[key]
370 n = self.changelog._partialmatch(key)
374 n = self.changelog._partialmatch(key)
371 if n:
375 if n:
372 return n
376 return n
373 raise repo.RepoError(_("unknown revision '%s'") % key)
377 raise repo.RepoError(_("unknown revision '%s'") % key)
374
378
375 def dev(self):
379 def dev(self):
376 return os.lstat(self.path).st_dev
380 return os.lstat(self.path).st_dev
377
381
378 def local(self):
382 def local(self):
379 return True
383 return True
380
384
381 def join(self, f):
385 def join(self, f):
382 return os.path.join(self.path, f)
386 return os.path.join(self.path, f)
383
387
384 def sjoin(self, f):
388 def sjoin(self, f):
385 return os.path.join(self.path, f)
389 return os.path.join(self.path, f)
386
390
387 def wjoin(self, f):
391 def wjoin(self, f):
388 return os.path.join(self.root, f)
392 return os.path.join(self.root, f)
389
393
390 def file(self, f):
394 def file(self, f):
391 if f[0] == '/':
395 if f[0] == '/':
392 f = f[1:]
396 f = f[1:]
393 return filelog.filelog(self.sopener, f, self.revlogversion)
397 return filelog.filelog(self.sopener, f, self.revlogversion)
394
398
395 def changectx(self, changeid=None):
399 def changectx(self, changeid=None):
396 return context.changectx(self, changeid)
400 return context.changectx(self, changeid)
397
401
398 def workingctx(self):
402 def workingctx(self):
399 return context.workingctx(self)
403 return context.workingctx(self)
400
404
401 def parents(self, changeid=None):
405 def parents(self, changeid=None):
402 '''
406 '''
403 get list of changectxs for parents of changeid or working directory
407 get list of changectxs for parents of changeid or working directory
404 '''
408 '''
405 if changeid is None:
409 if changeid is None:
406 pl = self.dirstate.parents()
410 pl = self.dirstate.parents()
407 else:
411 else:
408 n = self.changelog.lookup(changeid)
412 n = self.changelog.lookup(changeid)
409 pl = self.changelog.parents(n)
413 pl = self.changelog.parents(n)
410 if pl[1] == nullid:
414 if pl[1] == nullid:
411 return [self.changectx(pl[0])]
415 return [self.changectx(pl[0])]
412 return [self.changectx(pl[0]), self.changectx(pl[1])]
416 return [self.changectx(pl[0]), self.changectx(pl[1])]
413
417
414 def filectx(self, path, changeid=None, fileid=None):
418 def filectx(self, path, changeid=None, fileid=None):
415 """changeid can be a changeset revision, node, or tag.
419 """changeid can be a changeset revision, node, or tag.
416 fileid can be a file revision or node."""
420 fileid can be a file revision or node."""
417 return context.filectx(self, path, changeid, fileid)
421 return context.filectx(self, path, changeid, fileid)
418
422
419 def getcwd(self):
423 def getcwd(self):
420 return self.dirstate.getcwd()
424 return self.dirstate.getcwd()
421
425
422 def wfile(self, f, mode='r'):
426 def wfile(self, f, mode='r'):
423 return self.wopener(f, mode)
427 return self.wopener(f, mode)
424
428
425 def wread(self, filename):
429 def wread(self, filename):
426 if self.encodepats == None:
430 if self.encodepats == None:
427 l = []
431 l = []
428 for pat, cmd in self.ui.configitems("encode"):
432 for pat, cmd in self.ui.configitems("encode"):
429 mf = util.matcher(self.root, "", [pat], [], [])[1]
433 mf = util.matcher(self.root, "", [pat], [], [])[1]
430 l.append((mf, cmd))
434 l.append((mf, cmd))
431 self.encodepats = l
435 self.encodepats = l
432
436
433 data = self.wopener(filename, 'r').read()
437 data = self.wopener(filename, 'r').read()
434
438
435 for mf, cmd in self.encodepats:
439 for mf, cmd in self.encodepats:
436 if mf(filename):
440 if mf(filename):
437 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
441 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
438 data = util.filter(data, cmd)
442 data = util.filter(data, cmd)
439 break
443 break
440
444
441 return data
445 return data
442
446
443 def wwrite(self, filename, data, fd=None):
447 def wwrite(self, filename, data, fd=None):
444 if self.decodepats == None:
448 if self.decodepats == None:
445 l = []
449 l = []
446 for pat, cmd in self.ui.configitems("decode"):
450 for pat, cmd in self.ui.configitems("decode"):
447 mf = util.matcher(self.root, "", [pat], [], [])[1]
451 mf = util.matcher(self.root, "", [pat], [], [])[1]
448 l.append((mf, cmd))
452 l.append((mf, cmd))
449 self.decodepats = l
453 self.decodepats = l
450
454
451 for mf, cmd in self.decodepats:
455 for mf, cmd in self.decodepats:
452 if mf(filename):
456 if mf(filename):
453 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
457 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
454 data = util.filter(data, cmd)
458 data = util.filter(data, cmd)
455 break
459 break
456
460
457 if fd:
461 if fd:
458 return fd.write(data)
462 return fd.write(data)
459 return self.wopener(filename, 'w').write(data)
463 return self.wopener(filename, 'w').write(data)
460
464
461 def transaction(self):
465 def transaction(self):
462 tr = self.transhandle
466 tr = self.transhandle
463 if tr != None and tr.running():
467 if tr != None and tr.running():
464 return tr.nest()
468 return tr.nest()
465
469
466 # save dirstate for rollback
470 # save dirstate for rollback
467 try:
471 try:
468 ds = self.opener("dirstate").read()
472 ds = self.opener("dirstate").read()
469 except IOError:
473 except IOError:
470 ds = ""
474 ds = ""
471 self.opener("journal.dirstate", "w").write(ds)
475 self.opener("journal.dirstate", "w").write(ds)
472
476
473 tr = transaction.transaction(self.ui.warn, self.sopener,
477 tr = transaction.transaction(self.ui.warn, self.sopener,
474 self.sjoin("journal"),
478 self.sjoin("journal"),
475 aftertrans(self.path))
479 aftertrans(self.path))
476 self.transhandle = tr
480 self.transhandle = tr
477 return tr
481 return tr
478
482
479 def recover(self):
483 def recover(self):
480 l = self.lock()
484 l = self.lock()
481 if os.path.exists(self.sjoin("journal")):
485 if os.path.exists(self.sjoin("journal")):
482 self.ui.status(_("rolling back interrupted transaction\n"))
486 self.ui.status(_("rolling back interrupted transaction\n"))
483 transaction.rollback(self.sopener, self.sjoin("journal"))
487 transaction.rollback(self.sopener, self.sjoin("journal"))
484 self.reload()
488 self.reload()
485 return True
489 return True
486 else:
490 else:
487 self.ui.warn(_("no interrupted transaction available\n"))
491 self.ui.warn(_("no interrupted transaction available\n"))
488 return False
492 return False
489
493
490 def rollback(self, wlock=None):
494 def rollback(self, wlock=None):
491 if not wlock:
495 if not wlock:
492 wlock = self.wlock()
496 wlock = self.wlock()
493 l = self.lock()
497 l = self.lock()
494 if os.path.exists(self.sjoin("undo")):
498 if os.path.exists(self.sjoin("undo")):
495 self.ui.status(_("rolling back last transaction\n"))
499 self.ui.status(_("rolling back last transaction\n"))
496 transaction.rollback(self.sopener, self.sjoin("undo"))
500 transaction.rollback(self.sopener, self.sjoin("undo"))
497 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
501 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
498 self.reload()
502 self.reload()
499 self.wreload()
503 self.wreload()
500 else:
504 else:
501 self.ui.warn(_("no rollback information available\n"))
505 self.ui.warn(_("no rollback information available\n"))
502
506
503 def wreload(self):
507 def wreload(self):
504 self.dirstate.read()
508 self.dirstate.read()
505
509
506 def reload(self):
510 def reload(self):
507 self.changelog.load()
511 self.changelog.load()
508 self.manifest.load()
512 self.manifest.load()
509 self.tagscache = None
513 self.tagscache = None
510 self.nodetagscache = None
514 self.nodetagscache = None
511
515
512 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
516 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
513 desc=None):
517 desc=None):
514 try:
518 try:
515 l = lock.lock(lockname, 0, releasefn, desc=desc)
519 l = lock.lock(lockname, 0, releasefn, desc=desc)
516 except lock.LockHeld, inst:
520 except lock.LockHeld, inst:
517 if not wait:
521 if not wait:
518 raise
522 raise
519 self.ui.warn(_("waiting for lock on %s held by %r\n") %
523 self.ui.warn(_("waiting for lock on %s held by %r\n") %
520 (desc, inst.locker))
524 (desc, inst.locker))
521 # default to 600 seconds timeout
525 # default to 600 seconds timeout
522 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
526 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
523 releasefn, desc=desc)
527 releasefn, desc=desc)
524 if acquirefn:
528 if acquirefn:
525 acquirefn()
529 acquirefn()
526 return l
530 return l
527
531
528 def lock(self, wait=1):
532 def lock(self, wait=1):
529 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
533 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
530 desc=_('repository %s') % self.origroot)
534 desc=_('repository %s') % self.origroot)
531
535
532 def wlock(self, wait=1):
536 def wlock(self, wait=1):
533 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
537 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
534 self.wreload,
538 self.wreload,
535 desc=_('working directory of %s') % self.origroot)
539 desc=_('working directory of %s') % self.origroot)
536
540
537 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
541 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
538 """
542 """
539 commit an individual file as part of a larger transaction
543 commit an individual file as part of a larger transaction
540 """
544 """
541
545
542 t = self.wread(fn)
546 t = self.wread(fn)
543 fl = self.file(fn)
547 fl = self.file(fn)
544 fp1 = manifest1.get(fn, nullid)
548 fp1 = manifest1.get(fn, nullid)
545 fp2 = manifest2.get(fn, nullid)
549 fp2 = manifest2.get(fn, nullid)
546
550
547 meta = {}
551 meta = {}
548 cp = self.dirstate.copied(fn)
552 cp = self.dirstate.copied(fn)
549 if cp:
553 if cp:
550 meta["copy"] = cp
554 meta["copy"] = cp
551 if not manifest2: # not a branch merge
555 if not manifest2: # not a branch merge
552 meta["copyrev"] = hex(manifest1.get(cp, nullid))
556 meta["copyrev"] = hex(manifest1.get(cp, nullid))
553 fp2 = nullid
557 fp2 = nullid
554 elif fp2 != nullid: # copied on remote side
558 elif fp2 != nullid: # copied on remote side
555 meta["copyrev"] = hex(manifest1.get(cp, nullid))
559 meta["copyrev"] = hex(manifest1.get(cp, nullid))
556 elif fp1 != nullid: # copied on local side, reversed
560 elif fp1 != nullid: # copied on local side, reversed
557 meta["copyrev"] = hex(manifest2.get(cp))
561 meta["copyrev"] = hex(manifest2.get(cp))
558 fp2 = nullid
562 fp2 = nullid
559 else: # directory rename
563 else: # directory rename
560 meta["copyrev"] = hex(manifest1.get(cp, nullid))
564 meta["copyrev"] = hex(manifest1.get(cp, nullid))
561 self.ui.debug(_(" %s: copy %s:%s\n") %
565 self.ui.debug(_(" %s: copy %s:%s\n") %
562 (fn, cp, meta["copyrev"]))
566 (fn, cp, meta["copyrev"]))
563 fp1 = nullid
567 fp1 = nullid
564 elif fp2 != nullid:
568 elif fp2 != nullid:
565 # is one parent an ancestor of the other?
569 # is one parent an ancestor of the other?
566 fpa = fl.ancestor(fp1, fp2)
570 fpa = fl.ancestor(fp1, fp2)
567 if fpa == fp1:
571 if fpa == fp1:
568 fp1, fp2 = fp2, nullid
572 fp1, fp2 = fp2, nullid
569 elif fpa == fp2:
573 elif fpa == fp2:
570 fp2 = nullid
574 fp2 = nullid
571
575
572 # is the file unmodified from the parent? report existing entry
576 # is the file unmodified from the parent? report existing entry
573 if fp2 == nullid and not fl.cmp(fp1, t):
577 if fp2 == nullid and not fl.cmp(fp1, t):
574 return fp1
578 return fp1
575
579
576 changelist.append(fn)
580 changelist.append(fn)
577 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
581 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
578
582
579 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
583 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
580 if p1 is None:
584 if p1 is None:
581 p1, p2 = self.dirstate.parents()
585 p1, p2 = self.dirstate.parents()
582 return self.commit(files=files, text=text, user=user, date=date,
586 return self.commit(files=files, text=text, user=user, date=date,
583 p1=p1, p2=p2, wlock=wlock)
587 p1=p1, p2=p2, wlock=wlock)
584
588
585 def commit(self, files=None, text="", user=None, date=None,
589 def commit(self, files=None, text="", user=None, date=None,
586 match=util.always, force=False, lock=None, wlock=None,
590 match=util.always, force=False, lock=None, wlock=None,
587 force_editor=False, p1=None, p2=None, extra={}):
591 force_editor=False, p1=None, p2=None, extra={}):
588
592
589 commit = []
593 commit = []
590 remove = []
594 remove = []
591 changed = []
595 changed = []
592 use_dirstate = (p1 is None) # not rawcommit
596 use_dirstate = (p1 is None) # not rawcommit
593 extra = extra.copy()
597 extra = extra.copy()
594
598
595 if use_dirstate:
599 if use_dirstate:
596 if files:
600 if files:
597 for f in files:
601 for f in files:
598 s = self.dirstate.state(f)
602 s = self.dirstate.state(f)
599 if s in 'nmai':
603 if s in 'nmai':
600 commit.append(f)
604 commit.append(f)
601 elif s == 'r':
605 elif s == 'r':
602 remove.append(f)
606 remove.append(f)
603 else:
607 else:
604 self.ui.warn(_("%s not tracked!\n") % f)
608 self.ui.warn(_("%s not tracked!\n") % f)
605 else:
609 else:
606 changes = self.status(match=match)[:5]
610 changes = self.status(match=match)[:5]
607 modified, added, removed, deleted, unknown = changes
611 modified, added, removed, deleted, unknown = changes
608 commit = modified + added
612 commit = modified + added
609 remove = removed
613 remove = removed
610 else:
614 else:
611 commit = files
615 commit = files
612
616
613 if use_dirstate:
617 if use_dirstate:
614 p1, p2 = self.dirstate.parents()
618 p1, p2 = self.dirstate.parents()
615 update_dirstate = True
619 update_dirstate = True
616 else:
620 else:
617 p1, p2 = p1, p2 or nullid
621 p1, p2 = p1, p2 or nullid
618 update_dirstate = (self.dirstate.parents()[0] == p1)
622 update_dirstate = (self.dirstate.parents()[0] == p1)
619
623
620 c1 = self.changelog.read(p1)
624 c1 = self.changelog.read(p1)
621 c2 = self.changelog.read(p2)
625 c2 = self.changelog.read(p2)
622 m1 = self.manifest.read(c1[0]).copy()
626 m1 = self.manifest.read(c1[0]).copy()
623 m2 = self.manifest.read(c2[0])
627 m2 = self.manifest.read(c2[0])
624
628
625 if use_dirstate:
629 if use_dirstate:
626 branchname = self.workingctx().branch()
630 branchname = self.workingctx().branch()
627 else:
631 else:
628 branchname = ""
632 branchname = ""
629
633
630 if use_dirstate:
634 if use_dirstate:
631 oldname = c1[5].get("branch", "")
635 oldname = c1[5].get("branch", "")
632 if not commit and not remove and not force and p2 == nullid and \
636 if not commit and not remove and not force and p2 == nullid and \
633 branchname == oldname:
637 branchname == oldname:
634 self.ui.status(_("nothing changed\n"))
638 self.ui.status(_("nothing changed\n"))
635 return None
639 return None
636
640
637 xp1 = hex(p1)
641 xp1 = hex(p1)
638 if p2 == nullid: xp2 = ''
642 if p2 == nullid: xp2 = ''
639 else: xp2 = hex(p2)
643 else: xp2 = hex(p2)
640
644
641 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
645 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
642
646
643 if not wlock:
647 if not wlock:
644 wlock = self.wlock()
648 wlock = self.wlock()
645 if not lock:
649 if not lock:
646 lock = self.lock()
650 lock = self.lock()
647 tr = self.transaction()
651 tr = self.transaction()
648
652
649 # check in files
653 # check in files
650 new = {}
654 new = {}
651 linkrev = self.changelog.count()
655 linkrev = self.changelog.count()
652 commit.sort()
656 commit.sort()
653 for f in commit:
657 for f in commit:
654 self.ui.note(f + "\n")
658 self.ui.note(f + "\n")
655 try:
659 try:
656 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
660 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
657 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
661 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
658 except IOError:
662 except IOError:
659 if use_dirstate:
663 if use_dirstate:
660 self.ui.warn(_("trouble committing %s!\n") % f)
664 self.ui.warn(_("trouble committing %s!\n") % f)
661 raise
665 raise
662 else:
666 else:
663 remove.append(f)
667 remove.append(f)
664
668
665 # update manifest
669 # update manifest
666 m1.update(new)
670 m1.update(new)
667 remove.sort()
671 remove.sort()
668
672
669 for f in remove:
673 for f in remove:
670 if f in m1:
674 if f in m1:
671 del m1[f]
675 del m1[f]
672 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
676 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
673
677
674 # add changeset
678 # add changeset
675 new = new.keys()
679 new = new.keys()
676 new.sort()
680 new.sort()
677
681
678 user = user or self.ui.username()
682 user = user or self.ui.username()
679 if not text or force_editor:
683 if not text or force_editor:
680 edittext = []
684 edittext = []
681 if text:
685 if text:
682 edittext.append(text)
686 edittext.append(text)
683 edittext.append("")
687 edittext.append("")
684 edittext.append("HG: user: %s" % user)
688 edittext.append("HG: user: %s" % user)
685 if p2 != nullid:
689 if p2 != nullid:
686 edittext.append("HG: branch merge")
690 edittext.append("HG: branch merge")
687 edittext.extend(["HG: changed %s" % f for f in changed])
691 edittext.extend(["HG: changed %s" % f for f in changed])
688 edittext.extend(["HG: removed %s" % f for f in remove])
692 edittext.extend(["HG: removed %s" % f for f in remove])
689 if not changed and not remove:
693 if not changed and not remove:
690 edittext.append("HG: no files changed")
694 edittext.append("HG: no files changed")
691 edittext.append("")
695 edittext.append("")
692 # run editor in the repository root
696 # run editor in the repository root
693 olddir = os.getcwd()
697 olddir = os.getcwd()
694 os.chdir(self.root)
698 os.chdir(self.root)
695 text = self.ui.edit("\n".join(edittext), user)
699 text = self.ui.edit("\n".join(edittext), user)
696 os.chdir(olddir)
700 os.chdir(olddir)
697
701
698 lines = [line.rstrip() for line in text.rstrip().splitlines()]
702 lines = [line.rstrip() for line in text.rstrip().splitlines()]
699 while lines and not lines[0]:
703 while lines and not lines[0]:
700 del lines[0]
704 del lines[0]
701 if not lines:
705 if not lines:
702 return None
706 return None
703 text = '\n'.join(lines)
707 text = '\n'.join(lines)
704 if branchname:
708 if branchname:
705 extra["branch"] = branchname
709 extra["branch"] = branchname
706 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
710 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
707 user, date, extra)
711 user, date, extra)
708 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
712 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
709 parent2=xp2)
713 parent2=xp2)
710 tr.close()
714 tr.close()
711
715
712 if use_dirstate or update_dirstate:
716 if use_dirstate or update_dirstate:
713 self.dirstate.setparents(n)
717 self.dirstate.setparents(n)
714 if use_dirstate:
718 if use_dirstate:
715 self.dirstate.update(new, "n")
719 self.dirstate.update(new, "n")
716 self.dirstate.forget(remove)
720 self.dirstate.forget(remove)
717
721
718 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
722 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
719 return n
723 return n
720
724
721 def walk(self, node=None, files=[], match=util.always, badmatch=None):
725 def walk(self, node=None, files=[], match=util.always, badmatch=None):
722 '''
726 '''
723 walk recursively through the directory tree or a given
727 walk recursively through the directory tree or a given
724 changeset, finding all files matched by the match
728 changeset, finding all files matched by the match
725 function
729 function
726
730
727 results are yielded in a tuple (src, filename), where src
731 results are yielded in a tuple (src, filename), where src
728 is one of:
732 is one of:
729 'f' the file was found in the directory tree
733 'f' the file was found in the directory tree
730 'm' the file was only in the dirstate and not in the tree
734 'm' the file was only in the dirstate and not in the tree
731 'b' file was not found and matched badmatch
735 'b' file was not found and matched badmatch
732 '''
736 '''
733
737
734 if node:
738 if node:
735 fdict = dict.fromkeys(files)
739 fdict = dict.fromkeys(files)
736 for fn in self.manifest.read(self.changelog.read(node)[0]):
740 for fn in self.manifest.read(self.changelog.read(node)[0]):
737 for ffn in fdict:
741 for ffn in fdict:
738 # match if the file is the exact name or a directory
742 # match if the file is the exact name or a directory
739 if ffn == fn or fn.startswith("%s/" % ffn):
743 if ffn == fn or fn.startswith("%s/" % ffn):
740 del fdict[ffn]
744 del fdict[ffn]
741 break
745 break
742 if match(fn):
746 if match(fn):
743 yield 'm', fn
747 yield 'm', fn
744 for fn in fdict:
748 for fn in fdict:
745 if badmatch and badmatch(fn):
749 if badmatch and badmatch(fn):
746 if match(fn):
750 if match(fn):
747 yield 'b', fn
751 yield 'b', fn
748 else:
752 else:
749 self.ui.warn(_('%s: No such file in rev %s\n') % (
753 self.ui.warn(_('%s: No such file in rev %s\n') % (
750 util.pathto(self.getcwd(), fn), short(node)))
754 util.pathto(self.getcwd(), fn), short(node)))
751 else:
755 else:
752 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
756 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
753 yield src, fn
757 yield src, fn
754
758
755 def status(self, node1=None, node2=None, files=[], match=util.always,
759 def status(self, node1=None, node2=None, files=[], match=util.always,
756 wlock=None, list_ignored=False, list_clean=False):
760 wlock=None, list_ignored=False, list_clean=False):
757 """return status of files between two nodes or node and working directory
761 """return status of files between two nodes or node and working directory
758
762
759 If node1 is None, use the first dirstate parent instead.
763 If node1 is None, use the first dirstate parent instead.
760 If node2 is None, compare node1 with working directory.
764 If node2 is None, compare node1 with working directory.
761 """
765 """
762
766
763 def fcmp(fn, mf):
767 def fcmp(fn, mf):
764 t1 = self.wread(fn)
768 t1 = self.wread(fn)
765 return self.file(fn).cmp(mf.get(fn, nullid), t1)
769 return self.file(fn).cmp(mf.get(fn, nullid), t1)
766
770
767 def mfmatches(node):
771 def mfmatches(node):
768 change = self.changelog.read(node)
772 change = self.changelog.read(node)
769 mf = self.manifest.read(change[0]).copy()
773 mf = self.manifest.read(change[0]).copy()
770 for fn in mf.keys():
774 for fn in mf.keys():
771 if not match(fn):
775 if not match(fn):
772 del mf[fn]
776 del mf[fn]
773 return mf
777 return mf
774
778
775 modified, added, removed, deleted, unknown = [], [], [], [], []
779 modified, added, removed, deleted, unknown = [], [], [], [], []
776 ignored, clean = [], []
780 ignored, clean = [], []
777
781
778 compareworking = False
782 compareworking = False
779 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
783 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
780 compareworking = True
784 compareworking = True
781
785
782 if not compareworking:
786 if not compareworking:
783 # read the manifest from node1 before the manifest from node2,
787 # read the manifest from node1 before the manifest from node2,
784 # so that we'll hit the manifest cache if we're going through
788 # so that we'll hit the manifest cache if we're going through
785 # all the revisions in parent->child order.
789 # all the revisions in parent->child order.
786 mf1 = mfmatches(node1)
790 mf1 = mfmatches(node1)
787
791
788 # are we comparing the working directory?
792 # are we comparing the working directory?
789 if not node2:
793 if not node2:
790 if not wlock:
794 if not wlock:
791 try:
795 try:
792 wlock = self.wlock(wait=0)
796 wlock = self.wlock(wait=0)
793 except lock.LockException:
797 except lock.LockException:
794 wlock = None
798 wlock = None
795 (lookup, modified, added, removed, deleted, unknown,
799 (lookup, modified, added, removed, deleted, unknown,
796 ignored, clean) = self.dirstate.status(files, match,
800 ignored, clean) = self.dirstate.status(files, match,
797 list_ignored, list_clean)
801 list_ignored, list_clean)
798
802
799 # are we comparing working dir against its parent?
803 # are we comparing working dir against its parent?
800 if compareworking:
804 if compareworking:
801 if lookup:
805 if lookup:
802 # do a full compare of any files that might have changed
806 # do a full compare of any files that might have changed
803 mf2 = mfmatches(self.dirstate.parents()[0])
807 mf2 = mfmatches(self.dirstate.parents()[0])
804 for f in lookup:
808 for f in lookup:
805 if fcmp(f, mf2):
809 if fcmp(f, mf2):
806 modified.append(f)
810 modified.append(f)
807 else:
811 else:
808 clean.append(f)
812 clean.append(f)
809 if wlock is not None:
813 if wlock is not None:
810 self.dirstate.update([f], "n")
814 self.dirstate.update([f], "n")
811 else:
815 else:
812 # we are comparing working dir against non-parent
816 # we are comparing working dir against non-parent
813 # generate a pseudo-manifest for the working dir
817 # generate a pseudo-manifest for the working dir
814 # XXX: create it in dirstate.py ?
818 # XXX: create it in dirstate.py ?
815 mf2 = mfmatches(self.dirstate.parents()[0])
819 mf2 = mfmatches(self.dirstate.parents()[0])
816 for f in lookup + modified + added:
820 for f in lookup + modified + added:
817 mf2[f] = ""
821 mf2[f] = ""
818 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
822 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
819 for f in removed:
823 for f in removed:
820 if f in mf2:
824 if f in mf2:
821 del mf2[f]
825 del mf2[f]
822 else:
826 else:
823 # we are comparing two revisions
827 # we are comparing two revisions
824 mf2 = mfmatches(node2)
828 mf2 = mfmatches(node2)
825
829
826 if not compareworking:
830 if not compareworking:
827 # flush lists from dirstate before comparing manifests
831 # flush lists from dirstate before comparing manifests
828 modified, added, clean = [], [], []
832 modified, added, clean = [], [], []
829
833
830 # make sure to sort the files so we talk to the disk in a
834 # make sure to sort the files so we talk to the disk in a
831 # reasonable order
835 # reasonable order
832 mf2keys = mf2.keys()
836 mf2keys = mf2.keys()
833 mf2keys.sort()
837 mf2keys.sort()
834 for fn in mf2keys:
838 for fn in mf2keys:
835 if mf1.has_key(fn):
839 if mf1.has_key(fn):
836 if mf1.flags(fn) != mf2.flags(fn) or \
840 if mf1.flags(fn) != mf2.flags(fn) or \
837 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
841 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
838 modified.append(fn)
842 modified.append(fn)
839 elif list_clean:
843 elif list_clean:
840 clean.append(fn)
844 clean.append(fn)
841 del mf1[fn]
845 del mf1[fn]
842 else:
846 else:
843 added.append(fn)
847 added.append(fn)
844
848
845 removed = mf1.keys()
849 removed = mf1.keys()
846
850
847 # sort and return results:
851 # sort and return results:
848 for l in modified, added, removed, deleted, unknown, ignored, clean:
852 for l in modified, added, removed, deleted, unknown, ignored, clean:
849 l.sort()
853 l.sort()
850 return (modified, added, removed, deleted, unknown, ignored, clean)
854 return (modified, added, removed, deleted, unknown, ignored, clean)
851
855
852 def add(self, list, wlock=None):
856 def add(self, list, wlock=None):
853 if not wlock:
857 if not wlock:
854 wlock = self.wlock()
858 wlock = self.wlock()
855 for f in list:
859 for f in list:
856 p = self.wjoin(f)
860 p = self.wjoin(f)
857 if not os.path.exists(p):
861 if not os.path.exists(p):
858 self.ui.warn(_("%s does not exist!\n") % f)
862 self.ui.warn(_("%s does not exist!\n") % f)
859 elif not os.path.isfile(p):
863 elif not os.path.isfile(p):
860 self.ui.warn(_("%s not added: only files supported currently\n")
864 self.ui.warn(_("%s not added: only files supported currently\n")
861 % f)
865 % f)
862 elif self.dirstate.state(f) in 'an':
866 elif self.dirstate.state(f) in 'an':
863 self.ui.warn(_("%s already tracked!\n") % f)
867 self.ui.warn(_("%s already tracked!\n") % f)
864 else:
868 else:
865 self.dirstate.update([f], "a")
869 self.dirstate.update([f], "a")
866
870
867 def forget(self, list, wlock=None):
871 def forget(self, list, wlock=None):
868 if not wlock:
872 if not wlock:
869 wlock = self.wlock()
873 wlock = self.wlock()
870 for f in list:
874 for f in list:
871 if self.dirstate.state(f) not in 'ai':
875 if self.dirstate.state(f) not in 'ai':
872 self.ui.warn(_("%s not added!\n") % f)
876 self.ui.warn(_("%s not added!\n") % f)
873 else:
877 else:
874 self.dirstate.forget([f])
878 self.dirstate.forget([f])
875
879
876 def remove(self, list, unlink=False, wlock=None):
880 def remove(self, list, unlink=False, wlock=None):
877 if unlink:
881 if unlink:
878 for f in list:
882 for f in list:
879 try:
883 try:
880 util.unlink(self.wjoin(f))
884 util.unlink(self.wjoin(f))
881 except OSError, inst:
885 except OSError, inst:
882 if inst.errno != errno.ENOENT:
886 if inst.errno != errno.ENOENT:
883 raise
887 raise
884 if not wlock:
888 if not wlock:
885 wlock = self.wlock()
889 wlock = self.wlock()
886 for f in list:
890 for f in list:
887 p = self.wjoin(f)
891 p = self.wjoin(f)
888 if os.path.exists(p):
892 if os.path.exists(p):
889 self.ui.warn(_("%s still exists!\n") % f)
893 self.ui.warn(_("%s still exists!\n") % f)
890 elif self.dirstate.state(f) == 'a':
894 elif self.dirstate.state(f) == 'a':
891 self.dirstate.forget([f])
895 self.dirstate.forget([f])
892 elif f not in self.dirstate:
896 elif f not in self.dirstate:
893 self.ui.warn(_("%s not tracked!\n") % f)
897 self.ui.warn(_("%s not tracked!\n") % f)
894 else:
898 else:
895 self.dirstate.update([f], "r")
899 self.dirstate.update([f], "r")
896
900
897 def undelete(self, list, wlock=None):
901 def undelete(self, list, wlock=None):
898 p = self.dirstate.parents()[0]
902 p = self.dirstate.parents()[0]
899 mn = self.changelog.read(p)[0]
903 mn = self.changelog.read(p)[0]
900 m = self.manifest.read(mn)
904 m = self.manifest.read(mn)
901 if not wlock:
905 if not wlock:
902 wlock = self.wlock()
906 wlock = self.wlock()
903 for f in list:
907 for f in list:
904 if self.dirstate.state(f) not in "r":
908 if self.dirstate.state(f) not in "r":
905 self.ui.warn("%s not removed!\n" % f)
909 self.ui.warn("%s not removed!\n" % f)
906 else:
910 else:
907 t = self.file(f).read(m[f])
911 t = self.file(f).read(m[f])
908 self.wwrite(f, t)
912 self.wwrite(f, t)
909 util.set_exec(self.wjoin(f), m.execf(f))
913 util.set_exec(self.wjoin(f), m.execf(f))
910 self.dirstate.update([f], "n")
914 self.dirstate.update([f], "n")
911
915
912 def copy(self, source, dest, wlock=None):
916 def copy(self, source, dest, wlock=None):
913 p = self.wjoin(dest)
917 p = self.wjoin(dest)
914 if not os.path.exists(p):
918 if not os.path.exists(p):
915 self.ui.warn(_("%s does not exist!\n") % dest)
919 self.ui.warn(_("%s does not exist!\n") % dest)
916 elif not os.path.isfile(p):
920 elif not os.path.isfile(p):
917 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
921 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
918 else:
922 else:
919 if not wlock:
923 if not wlock:
920 wlock = self.wlock()
924 wlock = self.wlock()
921 if self.dirstate.state(dest) == '?':
925 if self.dirstate.state(dest) == '?':
922 self.dirstate.update([dest], "a")
926 self.dirstate.update([dest], "a")
923 self.dirstate.copy(source, dest)
927 self.dirstate.copy(source, dest)
924
928
925 def heads(self, start=None):
929 def heads(self, start=None):
926 heads = self.changelog.heads(start)
930 heads = self.changelog.heads(start)
927 # sort the output in rev descending order
931 # sort the output in rev descending order
928 heads = [(-self.changelog.rev(h), h) for h in heads]
932 heads = [(-self.changelog.rev(h), h) for h in heads]
929 heads.sort()
933 heads.sort()
930 return [n for (r, n) in heads]
934 return [n for (r, n) in heads]
931
935
932 # branchlookup returns a dict giving a list of branches for
936 # branchlookup returns a dict giving a list of branches for
933 # each head. A branch is defined as the tag of a node or
937 # each head. A branch is defined as the tag of a node or
934 # the branch of the node's parents. If a node has multiple
938 # the branch of the node's parents. If a node has multiple
935 # branch tags, tags are eliminated if they are visible from other
939 # branch tags, tags are eliminated if they are visible from other
936 # branch tags.
940 # branch tags.
937 #
941 #
938 # So, for this graph: a->b->c->d->e
942 # So, for this graph: a->b->c->d->e
939 # \ /
943 # \ /
940 # aa -----/
944 # aa -----/
941 # a has tag 2.6.12
945 # a has tag 2.6.12
942 # d has tag 2.6.13
946 # d has tag 2.6.13
943 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
947 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
944 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
948 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
945 # from the list.
949 # from the list.
946 #
950 #
947 # It is possible that more than one head will have the same branch tag.
951 # It is possible that more than one head will have the same branch tag.
948 # callers need to check the result for multiple heads under the same
952 # callers need to check the result for multiple heads under the same
949 # branch tag if that is a problem for them (ie checkout of a specific
953 # branch tag if that is a problem for them (ie checkout of a specific
950 # branch).
954 # branch).
951 #
955 #
952 # passing in a specific branch will limit the depth of the search
956 # passing in a specific branch will limit the depth of the search
953 # through the parents. It won't limit the branches returned in the
957 # through the parents. It won't limit the branches returned in the
954 # result though.
958 # result though.
955 def branchlookup(self, heads=None, branch=None):
959 def branchlookup(self, heads=None, branch=None):
956 if not heads:
960 if not heads:
957 heads = self.heads()
961 heads = self.heads()
958 headt = [ h for h in heads ]
962 headt = [ h for h in heads ]
959 chlog = self.changelog
963 chlog = self.changelog
960 branches = {}
964 branches = {}
961 merges = []
965 merges = []
962 seenmerge = {}
966 seenmerge = {}
963
967
964 # traverse the tree once for each head, recording in the branches
968 # traverse the tree once for each head, recording in the branches
965 # dict which tags are visible from this head. The branches
969 # dict which tags are visible from this head. The branches
966 # dict also records which tags are visible from each tag
970 # dict also records which tags are visible from each tag
967 # while we traverse.
971 # while we traverse.
968 while headt or merges:
972 while headt or merges:
969 if merges:
973 if merges:
970 n, found = merges.pop()
974 n, found = merges.pop()
971 visit = [n]
975 visit = [n]
972 else:
976 else:
973 h = headt.pop()
977 h = headt.pop()
974 visit = [h]
978 visit = [h]
975 found = [h]
979 found = [h]
976 seen = {}
980 seen = {}
977 while visit:
981 while visit:
978 n = visit.pop()
982 n = visit.pop()
979 if n in seen:
983 if n in seen:
980 continue
984 continue
981 pp = chlog.parents(n)
985 pp = chlog.parents(n)
982 tags = self.nodetags(n)
986 tags = self.nodetags(n)
983 if tags:
987 if tags:
984 for x in tags:
988 for x in tags:
985 if x == 'tip':
989 if x == 'tip':
986 continue
990 continue
987 for f in found:
991 for f in found:
988 branches.setdefault(f, {})[n] = 1
992 branches.setdefault(f, {})[n] = 1
989 branches.setdefault(n, {})[n] = 1
993 branches.setdefault(n, {})[n] = 1
990 break
994 break
991 if n not in found:
995 if n not in found:
992 found.append(n)
996 found.append(n)
993 if branch in tags:
997 if branch in tags:
994 continue
998 continue
995 seen[n] = 1
999 seen[n] = 1
996 if pp[1] != nullid and n not in seenmerge:
1000 if pp[1] != nullid and n not in seenmerge:
997 merges.append((pp[1], [x for x in found]))
1001 merges.append((pp[1], [x for x in found]))
998 seenmerge[n] = 1
1002 seenmerge[n] = 1
999 if pp[0] != nullid:
1003 if pp[0] != nullid:
1000 visit.append(pp[0])
1004 visit.append(pp[0])
1001 # traverse the branches dict, eliminating branch tags from each
1005 # traverse the branches dict, eliminating branch tags from each
1002 # head that are visible from another branch tag for that head.
1006 # head that are visible from another branch tag for that head.
1003 out = {}
1007 out = {}
1004 viscache = {}
1008 viscache = {}
1005 for h in heads:
1009 for h in heads:
1006 def visible(node):
1010 def visible(node):
1007 if node in viscache:
1011 if node in viscache:
1008 return viscache[node]
1012 return viscache[node]
1009 ret = {}
1013 ret = {}
1010 visit = [node]
1014 visit = [node]
1011 while visit:
1015 while visit:
1012 x = visit.pop()
1016 x = visit.pop()
1013 if x in viscache:
1017 if x in viscache:
1014 ret.update(viscache[x])
1018 ret.update(viscache[x])
1015 elif x not in ret:
1019 elif x not in ret:
1016 ret[x] = 1
1020 ret[x] = 1
1017 if x in branches:
1021 if x in branches:
1018 visit[len(visit):] = branches[x].keys()
1022 visit[len(visit):] = branches[x].keys()
1019 viscache[node] = ret
1023 viscache[node] = ret
1020 return ret
1024 return ret
1021 if h not in branches:
1025 if h not in branches:
1022 continue
1026 continue
1023 # O(n^2), but somewhat limited. This only searches the
1027 # O(n^2), but somewhat limited. This only searches the
1024 # tags visible from a specific head, not all the tags in the
1028 # tags visible from a specific head, not all the tags in the
1025 # whole repo.
1029 # whole repo.
1026 for b in branches[h]:
1030 for b in branches[h]:
1027 vis = False
1031 vis = False
1028 for bb in branches[h].keys():
1032 for bb in branches[h].keys():
1029 if b != bb:
1033 if b != bb:
1030 if b in visible(bb):
1034 if b in visible(bb):
1031 vis = True
1035 vis = True
1032 break
1036 break
1033 if not vis:
1037 if not vis:
1034 l = out.setdefault(h, [])
1038 l = out.setdefault(h, [])
1035 l[len(l):] = self.nodetags(b)
1039 l[len(l):] = self.nodetags(b)
1036 return out
1040 return out
1037
1041
1038 def branches(self, nodes):
1042 def branches(self, nodes):
1039 if not nodes:
1043 if not nodes:
1040 nodes = [self.changelog.tip()]
1044 nodes = [self.changelog.tip()]
1041 b = []
1045 b = []
1042 for n in nodes:
1046 for n in nodes:
1043 t = n
1047 t = n
1044 while 1:
1048 while 1:
1045 p = self.changelog.parents(n)
1049 p = self.changelog.parents(n)
1046 if p[1] != nullid or p[0] == nullid:
1050 if p[1] != nullid or p[0] == nullid:
1047 b.append((t, n, p[0], p[1]))
1051 b.append((t, n, p[0], p[1]))
1048 break
1052 break
1049 n = p[0]
1053 n = p[0]
1050 return b
1054 return b
1051
1055
1052 def between(self, pairs):
1056 def between(self, pairs):
1053 r = []
1057 r = []
1054
1058
1055 for top, bottom in pairs:
1059 for top, bottom in pairs:
1056 n, l, i = top, [], 0
1060 n, l, i = top, [], 0
1057 f = 1
1061 f = 1
1058
1062
1059 while n != bottom:
1063 while n != bottom:
1060 p = self.changelog.parents(n)[0]
1064 p = self.changelog.parents(n)[0]
1061 if i == f:
1065 if i == f:
1062 l.append(n)
1066 l.append(n)
1063 f = f * 2
1067 f = f * 2
1064 n = p
1068 n = p
1065 i += 1
1069 i += 1
1066
1070
1067 r.append(l)
1071 r.append(l)
1068
1072
1069 return r
1073 return r
1070
1074
1071 def findincoming(self, remote, base=None, heads=None, force=False):
1075 def findincoming(self, remote, base=None, heads=None, force=False):
1072 """Return list of roots of the subsets of missing nodes from remote
1076 """Return list of roots of the subsets of missing nodes from remote
1073
1077
1074 If base dict is specified, assume that these nodes and their parents
1078 If base dict is specified, assume that these nodes and their parents
1075 exist on the remote side and that no child of a node of base exists
1079 exist on the remote side and that no child of a node of base exists
1076 in both remote and self.
1080 in both remote and self.
1077 Furthermore base will be updated to include the nodes that exists
1081 Furthermore base will be updated to include the nodes that exists
1078 in self and remote but no children exists in self and remote.
1082 in self and remote but no children exists in self and remote.
1079 If a list of heads is specified, return only nodes which are heads
1083 If a list of heads is specified, return only nodes which are heads
1080 or ancestors of these heads.
1084 or ancestors of these heads.
1081
1085
1082 All the ancestors of base are in self and in remote.
1086 All the ancestors of base are in self and in remote.
1083 All the descendants of the list returned are missing in self.
1087 All the descendants of the list returned are missing in self.
1084 (and so we know that the rest of the nodes are missing in remote, see
1088 (and so we know that the rest of the nodes are missing in remote, see
1085 outgoing)
1089 outgoing)
1086 """
1090 """
1087 m = self.changelog.nodemap
1091 m = self.changelog.nodemap
1088 search = []
1092 search = []
1089 fetch = {}
1093 fetch = {}
1090 seen = {}
1094 seen = {}
1091 seenbranch = {}
1095 seenbranch = {}
1092 if base == None:
1096 if base == None:
1093 base = {}
1097 base = {}
1094
1098
1095 if not heads:
1099 if not heads:
1096 heads = remote.heads()
1100 heads = remote.heads()
1097
1101
1098 if self.changelog.tip() == nullid:
1102 if self.changelog.tip() == nullid:
1099 base[nullid] = 1
1103 base[nullid] = 1
1100 if heads != [nullid]:
1104 if heads != [nullid]:
1101 return [nullid]
1105 return [nullid]
1102 return []
1106 return []
1103
1107
1104 # assume we're closer to the tip than the root
1108 # assume we're closer to the tip than the root
1105 # and start by examining the heads
1109 # and start by examining the heads
1106 self.ui.status(_("searching for changes\n"))
1110 self.ui.status(_("searching for changes\n"))
1107
1111
1108 unknown = []
1112 unknown = []
1109 for h in heads:
1113 for h in heads:
1110 if h not in m:
1114 if h not in m:
1111 unknown.append(h)
1115 unknown.append(h)
1112 else:
1116 else:
1113 base[h] = 1
1117 base[h] = 1
1114
1118
1115 if not unknown:
1119 if not unknown:
1116 return []
1120 return []
1117
1121
1118 req = dict.fromkeys(unknown)
1122 req = dict.fromkeys(unknown)
1119 reqcnt = 0
1123 reqcnt = 0
1120
1124
1121 # search through remote branches
1125 # search through remote branches
1122 # a 'branch' here is a linear segment of history, with four parts:
1126 # a 'branch' here is a linear segment of history, with four parts:
1123 # head, root, first parent, second parent
1127 # head, root, first parent, second parent
1124 # (a branch always has two parents (or none) by definition)
1128 # (a branch always has two parents (or none) by definition)
1125 unknown = remote.branches(unknown)
1129 unknown = remote.branches(unknown)
1126 while unknown:
1130 while unknown:
1127 r = []
1131 r = []
1128 while unknown:
1132 while unknown:
1129 n = unknown.pop(0)
1133 n = unknown.pop(0)
1130 if n[0] in seen:
1134 if n[0] in seen:
1131 continue
1135 continue
1132
1136
1133 self.ui.debug(_("examining %s:%s\n")
1137 self.ui.debug(_("examining %s:%s\n")
1134 % (short(n[0]), short(n[1])))
1138 % (short(n[0]), short(n[1])))
1135 if n[0] == nullid: # found the end of the branch
1139 if n[0] == nullid: # found the end of the branch
1136 pass
1140 pass
1137 elif n in seenbranch:
1141 elif n in seenbranch:
1138 self.ui.debug(_("branch already found\n"))
1142 self.ui.debug(_("branch already found\n"))
1139 continue
1143 continue
1140 elif n[1] and n[1] in m: # do we know the base?
1144 elif n[1] and n[1] in m: # do we know the base?
1141 self.ui.debug(_("found incomplete branch %s:%s\n")
1145 self.ui.debug(_("found incomplete branch %s:%s\n")
1142 % (short(n[0]), short(n[1])))
1146 % (short(n[0]), short(n[1])))
1143 search.append(n) # schedule branch range for scanning
1147 search.append(n) # schedule branch range for scanning
1144 seenbranch[n] = 1
1148 seenbranch[n] = 1
1145 else:
1149 else:
1146 if n[1] not in seen and n[1] not in fetch:
1150 if n[1] not in seen and n[1] not in fetch:
1147 if n[2] in m and n[3] in m:
1151 if n[2] in m and n[3] in m:
1148 self.ui.debug(_("found new changeset %s\n") %
1152 self.ui.debug(_("found new changeset %s\n") %
1149 short(n[1]))
1153 short(n[1]))
1150 fetch[n[1]] = 1 # earliest unknown
1154 fetch[n[1]] = 1 # earliest unknown
1151 for p in n[2:4]:
1155 for p in n[2:4]:
1152 if p in m:
1156 if p in m:
1153 base[p] = 1 # latest known
1157 base[p] = 1 # latest known
1154
1158
1155 for p in n[2:4]:
1159 for p in n[2:4]:
1156 if p not in req and p not in m:
1160 if p not in req and p not in m:
1157 r.append(p)
1161 r.append(p)
1158 req[p] = 1
1162 req[p] = 1
1159 seen[n[0]] = 1
1163 seen[n[0]] = 1
1160
1164
1161 if r:
1165 if r:
1162 reqcnt += 1
1166 reqcnt += 1
1163 self.ui.debug(_("request %d: %s\n") %
1167 self.ui.debug(_("request %d: %s\n") %
1164 (reqcnt, " ".join(map(short, r))))
1168 (reqcnt, " ".join(map(short, r))))
1165 for p in xrange(0, len(r), 10):
1169 for p in xrange(0, len(r), 10):
1166 for b in remote.branches(r[p:p+10]):
1170 for b in remote.branches(r[p:p+10]):
1167 self.ui.debug(_("received %s:%s\n") %
1171 self.ui.debug(_("received %s:%s\n") %
1168 (short(b[0]), short(b[1])))
1172 (short(b[0]), short(b[1])))
1169 unknown.append(b)
1173 unknown.append(b)
1170
1174
1171 # do binary search on the branches we found
1175 # do binary search on the branches we found
1172 while search:
1176 while search:
1173 n = search.pop(0)
1177 n = search.pop(0)
1174 reqcnt += 1
1178 reqcnt += 1
1175 l = remote.between([(n[0], n[1])])[0]
1179 l = remote.between([(n[0], n[1])])[0]
1176 l.append(n[1])
1180 l.append(n[1])
1177 p = n[0]
1181 p = n[0]
1178 f = 1
1182 f = 1
1179 for i in l:
1183 for i in l:
1180 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1184 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1181 if i in m:
1185 if i in m:
1182 if f <= 2:
1186 if f <= 2:
1183 self.ui.debug(_("found new branch changeset %s\n") %
1187 self.ui.debug(_("found new branch changeset %s\n") %
1184 short(p))
1188 short(p))
1185 fetch[p] = 1
1189 fetch[p] = 1
1186 base[i] = 1
1190 base[i] = 1
1187 else:
1191 else:
1188 self.ui.debug(_("narrowed branch search to %s:%s\n")
1192 self.ui.debug(_("narrowed branch search to %s:%s\n")
1189 % (short(p), short(i)))
1193 % (short(p), short(i)))
1190 search.append((p, i))
1194 search.append((p, i))
1191 break
1195 break
1192 p, f = i, f * 2
1196 p, f = i, f * 2
1193
1197
1194 # sanity check our fetch list
1198 # sanity check our fetch list
1195 for f in fetch.keys():
1199 for f in fetch.keys():
1196 if f in m:
1200 if f in m:
1197 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1201 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1198
1202
1199 if base.keys() == [nullid]:
1203 if base.keys() == [nullid]:
1200 if force:
1204 if force:
1201 self.ui.warn(_("warning: repository is unrelated\n"))
1205 self.ui.warn(_("warning: repository is unrelated\n"))
1202 else:
1206 else:
1203 raise util.Abort(_("repository is unrelated"))
1207 raise util.Abort(_("repository is unrelated"))
1204
1208
1205 self.ui.debug(_("found new changesets starting at ") +
1209 self.ui.debug(_("found new changesets starting at ") +
1206 " ".join([short(f) for f in fetch]) + "\n")
1210 " ".join([short(f) for f in fetch]) + "\n")
1207
1211
1208 self.ui.debug(_("%d total queries\n") % reqcnt)
1212 self.ui.debug(_("%d total queries\n") % reqcnt)
1209
1213
1210 return fetch.keys()
1214 return fetch.keys()
1211
1215
1212 def findoutgoing(self, remote, base=None, heads=None, force=False):
1216 def findoutgoing(self, remote, base=None, heads=None, force=False):
1213 """Return list of nodes that are roots of subsets not in remote
1217 """Return list of nodes that are roots of subsets not in remote
1214
1218
1215 If base dict is specified, assume that these nodes and their parents
1219 If base dict is specified, assume that these nodes and their parents
1216 exist on the remote side.
1220 exist on the remote side.
1217 If a list of heads is specified, return only nodes which are heads
1221 If a list of heads is specified, return only nodes which are heads
1218 or ancestors of these heads, and return a second element which
1222 or ancestors of these heads, and return a second element which
1219 contains all remote heads which get new children.
1223 contains all remote heads which get new children.
1220 """
1224 """
1221 if base == None:
1225 if base == None:
1222 base = {}
1226 base = {}
1223 self.findincoming(remote, base, heads, force=force)
1227 self.findincoming(remote, base, heads, force=force)
1224
1228
1225 self.ui.debug(_("common changesets up to ")
1229 self.ui.debug(_("common changesets up to ")
1226 + " ".join(map(short, base.keys())) + "\n")
1230 + " ".join(map(short, base.keys())) + "\n")
1227
1231
1228 remain = dict.fromkeys(self.changelog.nodemap)
1232 remain = dict.fromkeys(self.changelog.nodemap)
1229
1233
1230 # prune everything remote has from the tree
1234 # prune everything remote has from the tree
1231 del remain[nullid]
1235 del remain[nullid]
1232 remove = base.keys()
1236 remove = base.keys()
1233 while remove:
1237 while remove:
1234 n = remove.pop(0)
1238 n = remove.pop(0)
1235 if n in remain:
1239 if n in remain:
1236 del remain[n]
1240 del remain[n]
1237 for p in self.changelog.parents(n):
1241 for p in self.changelog.parents(n):
1238 remove.append(p)
1242 remove.append(p)
1239
1243
1240 # find every node whose parents have been pruned
1244 # find every node whose parents have been pruned
1241 subset = []
1245 subset = []
1242 # find every remote head that will get new children
1246 # find every remote head that will get new children
1243 updated_heads = {}
1247 updated_heads = {}
1244 for n in remain:
1248 for n in remain:
1245 p1, p2 = self.changelog.parents(n)
1249 p1, p2 = self.changelog.parents(n)
1246 if p1 not in remain and p2 not in remain:
1250 if p1 not in remain and p2 not in remain:
1247 subset.append(n)
1251 subset.append(n)
1248 if heads:
1252 if heads:
1249 if p1 in heads:
1253 if p1 in heads:
1250 updated_heads[p1] = True
1254 updated_heads[p1] = True
1251 if p2 in heads:
1255 if p2 in heads:
1252 updated_heads[p2] = True
1256 updated_heads[p2] = True
1253
1257
1254 # this is the set of all roots we have to push
1258 # this is the set of all roots we have to push
1255 if heads:
1259 if heads:
1256 return subset, updated_heads.keys()
1260 return subset, updated_heads.keys()
1257 else:
1261 else:
1258 return subset
1262 return subset
1259
1263
1260 def pull(self, remote, heads=None, force=False, lock=None):
1264 def pull(self, remote, heads=None, force=False, lock=None):
1261 mylock = False
1265 mylock = False
1262 if not lock:
1266 if not lock:
1263 lock = self.lock()
1267 lock = self.lock()
1264 mylock = True
1268 mylock = True
1265
1269
1266 try:
1270 try:
1267 fetch = self.findincoming(remote, force=force)
1271 fetch = self.findincoming(remote, force=force)
1268 if fetch == [nullid]:
1272 if fetch == [nullid]:
1269 self.ui.status(_("requesting all changes\n"))
1273 self.ui.status(_("requesting all changes\n"))
1270
1274
1271 if not fetch:
1275 if not fetch:
1272 self.ui.status(_("no changes found\n"))
1276 self.ui.status(_("no changes found\n"))
1273 return 0
1277 return 0
1274
1278
1275 if heads is None:
1279 if heads is None:
1276 cg = remote.changegroup(fetch, 'pull')
1280 cg = remote.changegroup(fetch, 'pull')
1277 else:
1281 else:
1278 if 'changegroupsubset' not in remote.capabilities:
1282 if 'changegroupsubset' not in remote.capabilities:
1279 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1283 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1280 cg = remote.changegroupsubset(fetch, heads, 'pull')
1284 cg = remote.changegroupsubset(fetch, heads, 'pull')
1281 return self.addchangegroup(cg, 'pull', remote.url())
1285 return self.addchangegroup(cg, 'pull', remote.url())
1282 finally:
1286 finally:
1283 if mylock:
1287 if mylock:
1284 lock.release()
1288 lock.release()
1285
1289
1286 def push(self, remote, force=False, revs=None):
1290 def push(self, remote, force=False, revs=None):
1287 # there are two ways to push to remote repo:
1291 # there are two ways to push to remote repo:
1288 #
1292 #
1289 # addchangegroup assumes local user can lock remote
1293 # addchangegroup assumes local user can lock remote
1290 # repo (local filesystem, old ssh servers).
1294 # repo (local filesystem, old ssh servers).
1291 #
1295 #
1292 # unbundle assumes local user cannot lock remote repo (new ssh
1296 # unbundle assumes local user cannot lock remote repo (new ssh
1293 # servers, http servers).
1297 # servers, http servers).
1294
1298
1295 if remote.capable('unbundle'):
1299 if remote.capable('unbundle'):
1296 return self.push_unbundle(remote, force, revs)
1300 return self.push_unbundle(remote, force, revs)
1297 return self.push_addchangegroup(remote, force, revs)
1301 return self.push_addchangegroup(remote, force, revs)
1298
1302
1299 def prepush(self, remote, force, revs):
1303 def prepush(self, remote, force, revs):
1300 base = {}
1304 base = {}
1301 remote_heads = remote.heads()
1305 remote_heads = remote.heads()
1302 inc = self.findincoming(remote, base, remote_heads, force=force)
1306 inc = self.findincoming(remote, base, remote_heads, force=force)
1303
1307
1304 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1308 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1305 if revs is not None:
1309 if revs is not None:
1306 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1310 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1307 else:
1311 else:
1308 bases, heads = update, self.changelog.heads()
1312 bases, heads = update, self.changelog.heads()
1309
1313
1310 if not bases:
1314 if not bases:
1311 self.ui.status(_("no changes found\n"))
1315 self.ui.status(_("no changes found\n"))
1312 return None, 1
1316 return None, 1
1313 elif not force:
1317 elif not force:
1314 # check if we're creating new remote heads
1318 # check if we're creating new remote heads
1315 # to be a remote head after push, node must be either
1319 # to be a remote head after push, node must be either
1316 # - unknown locally
1320 # - unknown locally
1317 # - a local outgoing head descended from update
1321 # - a local outgoing head descended from update
1318 # - a remote head that's known locally and not
1322 # - a remote head that's known locally and not
1319 # ancestral to an outgoing head
1323 # ancestral to an outgoing head
1320
1324
1321 warn = 0
1325 warn = 0
1322
1326
1323 if remote_heads == [nullid]:
1327 if remote_heads == [nullid]:
1324 warn = 0
1328 warn = 0
1325 elif not revs and len(heads) > len(remote_heads):
1329 elif not revs and len(heads) > len(remote_heads):
1326 warn = 1
1330 warn = 1
1327 else:
1331 else:
1328 newheads = list(heads)
1332 newheads = list(heads)
1329 for r in remote_heads:
1333 for r in remote_heads:
1330 if r in self.changelog.nodemap:
1334 if r in self.changelog.nodemap:
1331 desc = self.changelog.heads(r)
1335 desc = self.changelog.heads(r)
1332 l = [h for h in heads if h in desc]
1336 l = [h for h in heads if h in desc]
1333 if not l:
1337 if not l:
1334 newheads.append(r)
1338 newheads.append(r)
1335 else:
1339 else:
1336 newheads.append(r)
1340 newheads.append(r)
1337 if len(newheads) > len(remote_heads):
1341 if len(newheads) > len(remote_heads):
1338 warn = 1
1342 warn = 1
1339
1343
1340 if warn:
1344 if warn:
1341 self.ui.warn(_("abort: push creates new remote branches!\n"))
1345 self.ui.warn(_("abort: push creates new remote branches!\n"))
1342 self.ui.status(_("(did you forget to merge?"
1346 self.ui.status(_("(did you forget to merge?"
1343 " use push -f to force)\n"))
1347 " use push -f to force)\n"))
1344 return None, 1
1348 return None, 1
1345 elif inc:
1349 elif inc:
1346 self.ui.warn(_("note: unsynced remote changes!\n"))
1350 self.ui.warn(_("note: unsynced remote changes!\n"))
1347
1351
1348
1352
1349 if revs is None:
1353 if revs is None:
1350 cg = self.changegroup(update, 'push')
1354 cg = self.changegroup(update, 'push')
1351 else:
1355 else:
1352 cg = self.changegroupsubset(update, revs, 'push')
1356 cg = self.changegroupsubset(update, revs, 'push')
1353 return cg, remote_heads
1357 return cg, remote_heads
1354
1358
1355 def push_addchangegroup(self, remote, force, revs):
1359 def push_addchangegroup(self, remote, force, revs):
1356 lock = remote.lock()
1360 lock = remote.lock()
1357
1361
1358 ret = self.prepush(remote, force, revs)
1362 ret = self.prepush(remote, force, revs)
1359 if ret[0] is not None:
1363 if ret[0] is not None:
1360 cg, remote_heads = ret
1364 cg, remote_heads = ret
1361 return remote.addchangegroup(cg, 'push', self.url())
1365 return remote.addchangegroup(cg, 'push', self.url())
1362 return ret[1]
1366 return ret[1]
1363
1367
1364 def push_unbundle(self, remote, force, revs):
1368 def push_unbundle(self, remote, force, revs):
1365 # local repo finds heads on server, finds out what revs it
1369 # local repo finds heads on server, finds out what revs it
1366 # must push. once revs transferred, if server finds it has
1370 # must push. once revs transferred, if server finds it has
1367 # different heads (someone else won commit/push race), server
1371 # different heads (someone else won commit/push race), server
1368 # aborts.
1372 # aborts.
1369
1373
1370 ret = self.prepush(remote, force, revs)
1374 ret = self.prepush(remote, force, revs)
1371 if ret[0] is not None:
1375 if ret[0] is not None:
1372 cg, remote_heads = ret
1376 cg, remote_heads = ret
1373 if force: remote_heads = ['force']
1377 if force: remote_heads = ['force']
1374 return remote.unbundle(cg, remote_heads, 'push')
1378 return remote.unbundle(cg, remote_heads, 'push')
1375 return ret[1]
1379 return ret[1]
1376
1380
1377 def changegroupinfo(self, nodes):
1381 def changegroupinfo(self, nodes):
1378 self.ui.note(_("%d changesets found\n") % len(nodes))
1382 self.ui.note(_("%d changesets found\n") % len(nodes))
1379 if self.ui.debugflag:
1383 if self.ui.debugflag:
1380 self.ui.debug(_("List of changesets:\n"))
1384 self.ui.debug(_("List of changesets:\n"))
1381 for node in nodes:
1385 for node in nodes:
1382 self.ui.debug("%s\n" % hex(node))
1386 self.ui.debug("%s\n" % hex(node))
1383
1387
1384 def changegroupsubset(self, bases, heads, source):
1388 def changegroupsubset(self, bases, heads, source):
1385 """This function generates a changegroup consisting of all the nodes
1389 """This function generates a changegroup consisting of all the nodes
1386 that are descendents of any of the bases, and ancestors of any of
1390 that are descendents of any of the bases, and ancestors of any of
1387 the heads.
1391 the heads.
1388
1392
1389 It is fairly complex as determining which filenodes and which
1393 It is fairly complex as determining which filenodes and which
1390 manifest nodes need to be included for the changeset to be complete
1394 manifest nodes need to be included for the changeset to be complete
1391 is non-trivial.
1395 is non-trivial.
1392
1396
1393 Another wrinkle is doing the reverse, figuring out which changeset in
1397 Another wrinkle is doing the reverse, figuring out which changeset in
1394 the changegroup a particular filenode or manifestnode belongs to."""
1398 the changegroup a particular filenode or manifestnode belongs to."""
1395
1399
1396 self.hook('preoutgoing', throw=True, source=source)
1400 self.hook('preoutgoing', throw=True, source=source)
1397
1401
1398 # Set up some initial variables
1402 # Set up some initial variables
1399 # Make it easy to refer to self.changelog
1403 # Make it easy to refer to self.changelog
1400 cl = self.changelog
1404 cl = self.changelog
1401 # msng is short for missing - compute the list of changesets in this
1405 # msng is short for missing - compute the list of changesets in this
1402 # changegroup.
1406 # changegroup.
1403 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1407 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1404 self.changegroupinfo(msng_cl_lst)
1408 self.changegroupinfo(msng_cl_lst)
1405 # Some bases may turn out to be superfluous, and some heads may be
1409 # Some bases may turn out to be superfluous, and some heads may be
1406 # too. nodesbetween will return the minimal set of bases and heads
1410 # too. nodesbetween will return the minimal set of bases and heads
1407 # necessary to re-create the changegroup.
1411 # necessary to re-create the changegroup.
1408
1412
1409 # Known heads are the list of heads that it is assumed the recipient
1413 # Known heads are the list of heads that it is assumed the recipient
1410 # of this changegroup will know about.
1414 # of this changegroup will know about.
1411 knownheads = {}
1415 knownheads = {}
1412 # We assume that all parents of bases are known heads.
1416 # We assume that all parents of bases are known heads.
1413 for n in bases:
1417 for n in bases:
1414 for p in cl.parents(n):
1418 for p in cl.parents(n):
1415 if p != nullid:
1419 if p != nullid:
1416 knownheads[p] = 1
1420 knownheads[p] = 1
1417 knownheads = knownheads.keys()
1421 knownheads = knownheads.keys()
1418 if knownheads:
1422 if knownheads:
1419 # Now that we know what heads are known, we can compute which
1423 # Now that we know what heads are known, we can compute which
1420 # changesets are known. The recipient must know about all
1424 # changesets are known. The recipient must know about all
1421 # changesets required to reach the known heads from the null
1425 # changesets required to reach the known heads from the null
1422 # changeset.
1426 # changeset.
1423 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1427 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1424 junk = None
1428 junk = None
1425 # Transform the list into an ersatz set.
1429 # Transform the list into an ersatz set.
1426 has_cl_set = dict.fromkeys(has_cl_set)
1430 has_cl_set = dict.fromkeys(has_cl_set)
1427 else:
1431 else:
1428 # If there were no known heads, the recipient cannot be assumed to
1432 # If there were no known heads, the recipient cannot be assumed to
1429 # know about any changesets.
1433 # know about any changesets.
1430 has_cl_set = {}
1434 has_cl_set = {}
1431
1435
1432 # Make it easy to refer to self.manifest
1436 # Make it easy to refer to self.manifest
1433 mnfst = self.manifest
1437 mnfst = self.manifest
1434 # We don't know which manifests are missing yet
1438 # We don't know which manifests are missing yet
1435 msng_mnfst_set = {}
1439 msng_mnfst_set = {}
1436 # Nor do we know which filenodes are missing.
1440 # Nor do we know which filenodes are missing.
1437 msng_filenode_set = {}
1441 msng_filenode_set = {}
1438
1442
1439 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1443 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1440 junk = None
1444 junk = None
1441
1445
1442 # A changeset always belongs to itself, so the changenode lookup
1446 # A changeset always belongs to itself, so the changenode lookup
1443 # function for a changenode is identity.
1447 # function for a changenode is identity.
1444 def identity(x):
1448 def identity(x):
1445 return x
1449 return x
1446
1450
1447 # A function generating function. Sets up an environment for the
1451 # A function generating function. Sets up an environment for the
1448 # inner function.
1452 # inner function.
1449 def cmp_by_rev_func(revlog):
1453 def cmp_by_rev_func(revlog):
1450 # Compare two nodes by their revision number in the environment's
1454 # Compare two nodes by their revision number in the environment's
1451 # revision history. Since the revision number both represents the
1455 # revision history. Since the revision number both represents the
1452 # most efficient order to read the nodes in, and represents a
1456 # most efficient order to read the nodes in, and represents a
1453 # topological sorting of the nodes, this function is often useful.
1457 # topological sorting of the nodes, this function is often useful.
1454 def cmp_by_rev(a, b):
1458 def cmp_by_rev(a, b):
1455 return cmp(revlog.rev(a), revlog.rev(b))
1459 return cmp(revlog.rev(a), revlog.rev(b))
1456 return cmp_by_rev
1460 return cmp_by_rev
1457
1461
1458 # If we determine that a particular file or manifest node must be a
1462 # If we determine that a particular file or manifest node must be a
1459 # node that the recipient of the changegroup will already have, we can
1463 # node that the recipient of the changegroup will already have, we can
1460 # also assume the recipient will have all the parents. This function
1464 # also assume the recipient will have all the parents. This function
1461 # prunes them from the set of missing nodes.
1465 # prunes them from the set of missing nodes.
1462 def prune_parents(revlog, hasset, msngset):
1466 def prune_parents(revlog, hasset, msngset):
1463 haslst = hasset.keys()
1467 haslst = hasset.keys()
1464 haslst.sort(cmp_by_rev_func(revlog))
1468 haslst.sort(cmp_by_rev_func(revlog))
1465 for node in haslst:
1469 for node in haslst:
1466 parentlst = [p for p in revlog.parents(node) if p != nullid]
1470 parentlst = [p for p in revlog.parents(node) if p != nullid]
1467 while parentlst:
1471 while parentlst:
1468 n = parentlst.pop()
1472 n = parentlst.pop()
1469 if n not in hasset:
1473 if n not in hasset:
1470 hasset[n] = 1
1474 hasset[n] = 1
1471 p = [p for p in revlog.parents(n) if p != nullid]
1475 p = [p for p in revlog.parents(n) if p != nullid]
1472 parentlst.extend(p)
1476 parentlst.extend(p)
1473 for n in hasset:
1477 for n in hasset:
1474 msngset.pop(n, None)
1478 msngset.pop(n, None)
1475
1479
1476 # This is a function generating function used to set up an environment
1480 # This is a function generating function used to set up an environment
1477 # for the inner function to execute in.
1481 # for the inner function to execute in.
1478 def manifest_and_file_collector(changedfileset):
1482 def manifest_and_file_collector(changedfileset):
1479 # This is an information gathering function that gathers
1483 # This is an information gathering function that gathers
1480 # information from each changeset node that goes out as part of
1484 # information from each changeset node that goes out as part of
1481 # the changegroup. The information gathered is a list of which
1485 # the changegroup. The information gathered is a list of which
1482 # manifest nodes are potentially required (the recipient may
1486 # manifest nodes are potentially required (the recipient may
1483 # already have them) and total list of all files which were
1487 # already have them) and total list of all files which were
1484 # changed in any changeset in the changegroup.
1488 # changed in any changeset in the changegroup.
1485 #
1489 #
1486 # We also remember the first changenode we saw any manifest
1490 # We also remember the first changenode we saw any manifest
1487 # referenced by so we can later determine which changenode 'owns'
1491 # referenced by so we can later determine which changenode 'owns'
1488 # the manifest.
1492 # the manifest.
1489 def collect_manifests_and_files(clnode):
1493 def collect_manifests_and_files(clnode):
1490 c = cl.read(clnode)
1494 c = cl.read(clnode)
1491 for f in c[3]:
1495 for f in c[3]:
1492 # This is to make sure we only have one instance of each
1496 # This is to make sure we only have one instance of each
1493 # filename string for each filename.
1497 # filename string for each filename.
1494 changedfileset.setdefault(f, f)
1498 changedfileset.setdefault(f, f)
1495 msng_mnfst_set.setdefault(c[0], clnode)
1499 msng_mnfst_set.setdefault(c[0], clnode)
1496 return collect_manifests_and_files
1500 return collect_manifests_and_files
1497
1501
1498 # Figure out which manifest nodes (of the ones we think might be part
1502 # Figure out which manifest nodes (of the ones we think might be part
1499 # of the changegroup) the recipient must know about and remove them
1503 # of the changegroup) the recipient must know about and remove them
1500 # from the changegroup.
1504 # from the changegroup.
1501 def prune_manifests():
1505 def prune_manifests():
1502 has_mnfst_set = {}
1506 has_mnfst_set = {}
1503 for n in msng_mnfst_set:
1507 for n in msng_mnfst_set:
1504 # If a 'missing' manifest thinks it belongs to a changenode
1508 # If a 'missing' manifest thinks it belongs to a changenode
1505 # the recipient is assumed to have, obviously the recipient
1509 # the recipient is assumed to have, obviously the recipient
1506 # must have that manifest.
1510 # must have that manifest.
1507 linknode = cl.node(mnfst.linkrev(n))
1511 linknode = cl.node(mnfst.linkrev(n))
1508 if linknode in has_cl_set:
1512 if linknode in has_cl_set:
1509 has_mnfst_set[n] = 1
1513 has_mnfst_set[n] = 1
1510 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1514 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1511
1515
1512 # Use the information collected in collect_manifests_and_files to say
1516 # Use the information collected in collect_manifests_and_files to say
1513 # which changenode any manifestnode belongs to.
1517 # which changenode any manifestnode belongs to.
1514 def lookup_manifest_link(mnfstnode):
1518 def lookup_manifest_link(mnfstnode):
1515 return msng_mnfst_set[mnfstnode]
1519 return msng_mnfst_set[mnfstnode]
1516
1520
1517 # A function generating function that sets up the initial environment
1521 # A function generating function that sets up the initial environment
1518 # the inner function.
1522 # the inner function.
1519 def filenode_collector(changedfiles):
1523 def filenode_collector(changedfiles):
1520 next_rev = [0]
1524 next_rev = [0]
1521 # This gathers information from each manifestnode included in the
1525 # This gathers information from each manifestnode included in the
1522 # changegroup about which filenodes the manifest node references
1526 # changegroup about which filenodes the manifest node references
1523 # so we can include those in the changegroup too.
1527 # so we can include those in the changegroup too.
1524 #
1528 #
1525 # It also remembers which changenode each filenode belongs to. It
1529 # It also remembers which changenode each filenode belongs to. It
1526 # does this by assuming the a filenode belongs to the changenode
1530 # does this by assuming the a filenode belongs to the changenode
1527 # the first manifest that references it belongs to.
1531 # the first manifest that references it belongs to.
1528 def collect_msng_filenodes(mnfstnode):
1532 def collect_msng_filenodes(mnfstnode):
1529 r = mnfst.rev(mnfstnode)
1533 r = mnfst.rev(mnfstnode)
1530 if r == next_rev[0]:
1534 if r == next_rev[0]:
1531 # If the last rev we looked at was the one just previous,
1535 # If the last rev we looked at was the one just previous,
1532 # we only need to see a diff.
1536 # we only need to see a diff.
1533 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1537 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1534 # For each line in the delta
1538 # For each line in the delta
1535 for dline in delta.splitlines():
1539 for dline in delta.splitlines():
1536 # get the filename and filenode for that line
1540 # get the filename and filenode for that line
1537 f, fnode = dline.split('\0')
1541 f, fnode = dline.split('\0')
1538 fnode = bin(fnode[:40])
1542 fnode = bin(fnode[:40])
1539 f = changedfiles.get(f, None)
1543 f = changedfiles.get(f, None)
1540 # And if the file is in the list of files we care
1544 # And if the file is in the list of files we care
1541 # about.
1545 # about.
1542 if f is not None:
1546 if f is not None:
1543 # Get the changenode this manifest belongs to
1547 # Get the changenode this manifest belongs to
1544 clnode = msng_mnfst_set[mnfstnode]
1548 clnode = msng_mnfst_set[mnfstnode]
1545 # Create the set of filenodes for the file if
1549 # Create the set of filenodes for the file if
1546 # there isn't one already.
1550 # there isn't one already.
1547 ndset = msng_filenode_set.setdefault(f, {})
1551 ndset = msng_filenode_set.setdefault(f, {})
1548 # And set the filenode's changelog node to the
1552 # And set the filenode's changelog node to the
1549 # manifest's if it hasn't been set already.
1553 # manifest's if it hasn't been set already.
1550 ndset.setdefault(fnode, clnode)
1554 ndset.setdefault(fnode, clnode)
1551 else:
1555 else:
1552 # Otherwise we need a full manifest.
1556 # Otherwise we need a full manifest.
1553 m = mnfst.read(mnfstnode)
1557 m = mnfst.read(mnfstnode)
1554 # For every file in we care about.
1558 # For every file in we care about.
1555 for f in changedfiles:
1559 for f in changedfiles:
1556 fnode = m.get(f, None)
1560 fnode = m.get(f, None)
1557 # If it's in the manifest
1561 # If it's in the manifest
1558 if fnode is not None:
1562 if fnode is not None:
1559 # See comments above.
1563 # See comments above.
1560 clnode = msng_mnfst_set[mnfstnode]
1564 clnode = msng_mnfst_set[mnfstnode]
1561 ndset = msng_filenode_set.setdefault(f, {})
1565 ndset = msng_filenode_set.setdefault(f, {})
1562 ndset.setdefault(fnode, clnode)
1566 ndset.setdefault(fnode, clnode)
1563 # Remember the revision we hope to see next.
1567 # Remember the revision we hope to see next.
1564 next_rev[0] = r + 1
1568 next_rev[0] = r + 1
1565 return collect_msng_filenodes
1569 return collect_msng_filenodes
1566
1570
1567 # We have a list of filenodes we think we need for a file, lets remove
1571 # We have a list of filenodes we think we need for a file, lets remove
1568 # all those we now the recipient must have.
1572 # all those we now the recipient must have.
1569 def prune_filenodes(f, filerevlog):
1573 def prune_filenodes(f, filerevlog):
1570 msngset = msng_filenode_set[f]
1574 msngset = msng_filenode_set[f]
1571 hasset = {}
1575 hasset = {}
1572 # If a 'missing' filenode thinks it belongs to a changenode we
1576 # If a 'missing' filenode thinks it belongs to a changenode we
1573 # assume the recipient must have, then the recipient must have
1577 # assume the recipient must have, then the recipient must have
1574 # that filenode.
1578 # that filenode.
1575 for n in msngset:
1579 for n in msngset:
1576 clnode = cl.node(filerevlog.linkrev(n))
1580 clnode = cl.node(filerevlog.linkrev(n))
1577 if clnode in has_cl_set:
1581 if clnode in has_cl_set:
1578 hasset[n] = 1
1582 hasset[n] = 1
1579 prune_parents(filerevlog, hasset, msngset)
1583 prune_parents(filerevlog, hasset, msngset)
1580
1584
1581 # A function generator function that sets up the a context for the
1585 # A function generator function that sets up the a context for the
1582 # inner function.
1586 # inner function.
1583 def lookup_filenode_link_func(fname):
1587 def lookup_filenode_link_func(fname):
1584 msngset = msng_filenode_set[fname]
1588 msngset = msng_filenode_set[fname]
1585 # Lookup the changenode the filenode belongs to.
1589 # Lookup the changenode the filenode belongs to.
1586 def lookup_filenode_link(fnode):
1590 def lookup_filenode_link(fnode):
1587 return msngset[fnode]
1591 return msngset[fnode]
1588 return lookup_filenode_link
1592 return lookup_filenode_link
1589
1593
1590 # Now that we have all theses utility functions to help out and
1594 # Now that we have all theses utility functions to help out and
1591 # logically divide up the task, generate the group.
1595 # logically divide up the task, generate the group.
1592 def gengroup():
1596 def gengroup():
1593 # The set of changed files starts empty.
1597 # The set of changed files starts empty.
1594 changedfiles = {}
1598 changedfiles = {}
1595 # Create a changenode group generator that will call our functions
1599 # Create a changenode group generator that will call our functions
1596 # back to lookup the owning changenode and collect information.
1600 # back to lookup the owning changenode and collect information.
1597 group = cl.group(msng_cl_lst, identity,
1601 group = cl.group(msng_cl_lst, identity,
1598 manifest_and_file_collector(changedfiles))
1602 manifest_and_file_collector(changedfiles))
1599 for chnk in group:
1603 for chnk in group:
1600 yield chnk
1604 yield chnk
1601
1605
1602 # The list of manifests has been collected by the generator
1606 # The list of manifests has been collected by the generator
1603 # calling our functions back.
1607 # calling our functions back.
1604 prune_manifests()
1608 prune_manifests()
1605 msng_mnfst_lst = msng_mnfst_set.keys()
1609 msng_mnfst_lst = msng_mnfst_set.keys()
1606 # Sort the manifestnodes by revision number.
1610 # Sort the manifestnodes by revision number.
1607 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1611 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1608 # Create a generator for the manifestnodes that calls our lookup
1612 # Create a generator for the manifestnodes that calls our lookup
1609 # and data collection functions back.
1613 # and data collection functions back.
1610 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1614 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1611 filenode_collector(changedfiles))
1615 filenode_collector(changedfiles))
1612 for chnk in group:
1616 for chnk in group:
1613 yield chnk
1617 yield chnk
1614
1618
1615 # These are no longer needed, dereference and toss the memory for
1619 # These are no longer needed, dereference and toss the memory for
1616 # them.
1620 # them.
1617 msng_mnfst_lst = None
1621 msng_mnfst_lst = None
1618 msng_mnfst_set.clear()
1622 msng_mnfst_set.clear()
1619
1623
1620 changedfiles = changedfiles.keys()
1624 changedfiles = changedfiles.keys()
1621 changedfiles.sort()
1625 changedfiles.sort()
1622 # Go through all our files in order sorted by name.
1626 # Go through all our files in order sorted by name.
1623 for fname in changedfiles:
1627 for fname in changedfiles:
1624 filerevlog = self.file(fname)
1628 filerevlog = self.file(fname)
1625 # Toss out the filenodes that the recipient isn't really
1629 # Toss out the filenodes that the recipient isn't really
1626 # missing.
1630 # missing.
1627 if msng_filenode_set.has_key(fname):
1631 if msng_filenode_set.has_key(fname):
1628 prune_filenodes(fname, filerevlog)
1632 prune_filenodes(fname, filerevlog)
1629 msng_filenode_lst = msng_filenode_set[fname].keys()
1633 msng_filenode_lst = msng_filenode_set[fname].keys()
1630 else:
1634 else:
1631 msng_filenode_lst = []
1635 msng_filenode_lst = []
1632 # If any filenodes are left, generate the group for them,
1636 # If any filenodes are left, generate the group for them,
1633 # otherwise don't bother.
1637 # otherwise don't bother.
1634 if len(msng_filenode_lst) > 0:
1638 if len(msng_filenode_lst) > 0:
1635 yield changegroup.genchunk(fname)
1639 yield changegroup.genchunk(fname)
1636 # Sort the filenodes by their revision #
1640 # Sort the filenodes by their revision #
1637 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1641 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1638 # Create a group generator and only pass in a changenode
1642 # Create a group generator and only pass in a changenode
1639 # lookup function as we need to collect no information
1643 # lookup function as we need to collect no information
1640 # from filenodes.
1644 # from filenodes.
1641 group = filerevlog.group(msng_filenode_lst,
1645 group = filerevlog.group(msng_filenode_lst,
1642 lookup_filenode_link_func(fname))
1646 lookup_filenode_link_func(fname))
1643 for chnk in group:
1647 for chnk in group:
1644 yield chnk
1648 yield chnk
1645 if msng_filenode_set.has_key(fname):
1649 if msng_filenode_set.has_key(fname):
1646 # Don't need this anymore, toss it to free memory.
1650 # Don't need this anymore, toss it to free memory.
1647 del msng_filenode_set[fname]
1651 del msng_filenode_set[fname]
1648 # Signal that no more groups are left.
1652 # Signal that no more groups are left.
1649 yield changegroup.closechunk()
1653 yield changegroup.closechunk()
1650
1654
1651 if msng_cl_lst:
1655 if msng_cl_lst:
1652 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1656 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1653
1657
1654 return util.chunkbuffer(gengroup())
1658 return util.chunkbuffer(gengroup())
1655
1659
1656 def changegroup(self, basenodes, source):
1660 def changegroup(self, basenodes, source):
1657 """Generate a changegroup of all nodes that we have that a recipient
1661 """Generate a changegroup of all nodes that we have that a recipient
1658 doesn't.
1662 doesn't.
1659
1663
1660 This is much easier than the previous function as we can assume that
1664 This is much easier than the previous function as we can assume that
1661 the recipient has any changenode we aren't sending them."""
1665 the recipient has any changenode we aren't sending them."""
1662
1666
1663 self.hook('preoutgoing', throw=True, source=source)
1667 self.hook('preoutgoing', throw=True, source=source)
1664
1668
1665 cl = self.changelog
1669 cl = self.changelog
1666 nodes = cl.nodesbetween(basenodes, None)[0]
1670 nodes = cl.nodesbetween(basenodes, None)[0]
1667 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1671 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1668 self.changegroupinfo(nodes)
1672 self.changegroupinfo(nodes)
1669
1673
1670 def identity(x):
1674 def identity(x):
1671 return x
1675 return x
1672
1676
1673 def gennodelst(revlog):
1677 def gennodelst(revlog):
1674 for r in xrange(0, revlog.count()):
1678 for r in xrange(0, revlog.count()):
1675 n = revlog.node(r)
1679 n = revlog.node(r)
1676 if revlog.linkrev(n) in revset:
1680 if revlog.linkrev(n) in revset:
1677 yield n
1681 yield n
1678
1682
1679 def changed_file_collector(changedfileset):
1683 def changed_file_collector(changedfileset):
1680 def collect_changed_files(clnode):
1684 def collect_changed_files(clnode):
1681 c = cl.read(clnode)
1685 c = cl.read(clnode)
1682 for fname in c[3]:
1686 for fname in c[3]:
1683 changedfileset[fname] = 1
1687 changedfileset[fname] = 1
1684 return collect_changed_files
1688 return collect_changed_files
1685
1689
1686 def lookuprevlink_func(revlog):
1690 def lookuprevlink_func(revlog):
1687 def lookuprevlink(n):
1691 def lookuprevlink(n):
1688 return cl.node(revlog.linkrev(n))
1692 return cl.node(revlog.linkrev(n))
1689 return lookuprevlink
1693 return lookuprevlink
1690
1694
1691 def gengroup():
1695 def gengroup():
1692 # construct a list of all changed files
1696 # construct a list of all changed files
1693 changedfiles = {}
1697 changedfiles = {}
1694
1698
1695 for chnk in cl.group(nodes, identity,
1699 for chnk in cl.group(nodes, identity,
1696 changed_file_collector(changedfiles)):
1700 changed_file_collector(changedfiles)):
1697 yield chnk
1701 yield chnk
1698 changedfiles = changedfiles.keys()
1702 changedfiles = changedfiles.keys()
1699 changedfiles.sort()
1703 changedfiles.sort()
1700
1704
1701 mnfst = self.manifest
1705 mnfst = self.manifest
1702 nodeiter = gennodelst(mnfst)
1706 nodeiter = gennodelst(mnfst)
1703 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1707 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1704 yield chnk
1708 yield chnk
1705
1709
1706 for fname in changedfiles:
1710 for fname in changedfiles:
1707 filerevlog = self.file(fname)
1711 filerevlog = self.file(fname)
1708 nodeiter = gennodelst(filerevlog)
1712 nodeiter = gennodelst(filerevlog)
1709 nodeiter = list(nodeiter)
1713 nodeiter = list(nodeiter)
1710 if nodeiter:
1714 if nodeiter:
1711 yield changegroup.genchunk(fname)
1715 yield changegroup.genchunk(fname)
1712 lookup = lookuprevlink_func(filerevlog)
1716 lookup = lookuprevlink_func(filerevlog)
1713 for chnk in filerevlog.group(nodeiter, lookup):
1717 for chnk in filerevlog.group(nodeiter, lookup):
1714 yield chnk
1718 yield chnk
1715
1719
1716 yield changegroup.closechunk()
1720 yield changegroup.closechunk()
1717
1721
1718 if nodes:
1722 if nodes:
1719 self.hook('outgoing', node=hex(nodes[0]), source=source)
1723 self.hook('outgoing', node=hex(nodes[0]), source=source)
1720
1724
1721 return util.chunkbuffer(gengroup())
1725 return util.chunkbuffer(gengroup())
1722
1726
1723 def addchangegroup(self, source, srctype, url):
1727 def addchangegroup(self, source, srctype, url):
1724 """add changegroup to repo.
1728 """add changegroup to repo.
1725 returns number of heads modified or added + 1."""
1729 returns number of heads modified or added + 1."""
1726
1730
1727 def csmap(x):
1731 def csmap(x):
1728 self.ui.debug(_("add changeset %s\n") % short(x))
1732 self.ui.debug(_("add changeset %s\n") % short(x))
1729 return cl.count()
1733 return cl.count()
1730
1734
1731 def revmap(x):
1735 def revmap(x):
1732 return cl.rev(x)
1736 return cl.rev(x)
1733
1737
1734 if not source:
1738 if not source:
1735 return 0
1739 return 0
1736
1740
1737 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1741 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1738
1742
1739 changesets = files = revisions = 0
1743 changesets = files = revisions = 0
1740
1744
1741 tr = self.transaction()
1745 tr = self.transaction()
1742
1746
1743 # write changelog data to temp files so concurrent readers will not see
1747 # write changelog data to temp files so concurrent readers will not see
1744 # inconsistent view
1748 # inconsistent view
1745 cl = None
1749 cl = None
1746 try:
1750 try:
1747 cl = appendfile.appendchangelog(self.sopener,
1751 cl = appendfile.appendchangelog(self.sopener,
1748 self.changelog.version)
1752 self.changelog.version)
1749
1753
1750 oldheads = len(cl.heads())
1754 oldheads = len(cl.heads())
1751
1755
1752 # pull off the changeset group
1756 # pull off the changeset group
1753 self.ui.status(_("adding changesets\n"))
1757 self.ui.status(_("adding changesets\n"))
1754 cor = cl.count() - 1
1758 cor = cl.count() - 1
1755 chunkiter = changegroup.chunkiter(source)
1759 chunkiter = changegroup.chunkiter(source)
1756 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1760 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1757 raise util.Abort(_("received changelog group is empty"))
1761 raise util.Abort(_("received changelog group is empty"))
1758 cnr = cl.count() - 1
1762 cnr = cl.count() - 1
1759 changesets = cnr - cor
1763 changesets = cnr - cor
1760
1764
1761 # pull off the manifest group
1765 # pull off the manifest group
1762 self.ui.status(_("adding manifests\n"))
1766 self.ui.status(_("adding manifests\n"))
1763 chunkiter = changegroup.chunkiter(source)
1767 chunkiter = changegroup.chunkiter(source)
1764 # no need to check for empty manifest group here:
1768 # no need to check for empty manifest group here:
1765 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1769 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1766 # no new manifest will be created and the manifest group will
1770 # no new manifest will be created and the manifest group will
1767 # be empty during the pull
1771 # be empty during the pull
1768 self.manifest.addgroup(chunkiter, revmap, tr)
1772 self.manifest.addgroup(chunkiter, revmap, tr)
1769
1773
1770 # process the files
1774 # process the files
1771 self.ui.status(_("adding file changes\n"))
1775 self.ui.status(_("adding file changes\n"))
1772 while 1:
1776 while 1:
1773 f = changegroup.getchunk(source)
1777 f = changegroup.getchunk(source)
1774 if not f:
1778 if not f:
1775 break
1779 break
1776 self.ui.debug(_("adding %s revisions\n") % f)
1780 self.ui.debug(_("adding %s revisions\n") % f)
1777 fl = self.file(f)
1781 fl = self.file(f)
1778 o = fl.count()
1782 o = fl.count()
1779 chunkiter = changegroup.chunkiter(source)
1783 chunkiter = changegroup.chunkiter(source)
1780 if fl.addgroup(chunkiter, revmap, tr) is None:
1784 if fl.addgroup(chunkiter, revmap, tr) is None:
1781 raise util.Abort(_("received file revlog group is empty"))
1785 raise util.Abort(_("received file revlog group is empty"))
1782 revisions += fl.count() - o
1786 revisions += fl.count() - o
1783 files += 1
1787 files += 1
1784
1788
1785 cl.writedata()
1789 cl.writedata()
1786 finally:
1790 finally:
1787 if cl:
1791 if cl:
1788 cl.cleanup()
1792 cl.cleanup()
1789
1793
1790 # make changelog see real files again
1794 # make changelog see real files again
1791 self.changelog = changelog.changelog(self.sopener,
1795 self.changelog = changelog.changelog(self.sopener,
1792 self.changelog.version)
1796 self.changelog.version)
1793 self.changelog.checkinlinesize(tr)
1797 self.changelog.checkinlinesize(tr)
1794
1798
1795 newheads = len(self.changelog.heads())
1799 newheads = len(self.changelog.heads())
1796 heads = ""
1800 heads = ""
1797 if oldheads and newheads != oldheads:
1801 if oldheads and newheads != oldheads:
1798 heads = _(" (%+d heads)") % (newheads - oldheads)
1802 heads = _(" (%+d heads)") % (newheads - oldheads)
1799
1803
1800 self.ui.status(_("added %d changesets"
1804 self.ui.status(_("added %d changesets"
1801 " with %d changes to %d files%s\n")
1805 " with %d changes to %d files%s\n")
1802 % (changesets, revisions, files, heads))
1806 % (changesets, revisions, files, heads))
1803
1807
1804 if changesets > 0:
1808 if changesets > 0:
1805 self.hook('pretxnchangegroup', throw=True,
1809 self.hook('pretxnchangegroup', throw=True,
1806 node=hex(self.changelog.node(cor+1)), source=srctype,
1810 node=hex(self.changelog.node(cor+1)), source=srctype,
1807 url=url)
1811 url=url)
1808
1812
1809 tr.close()
1813 tr.close()
1810
1814
1811 if changesets > 0:
1815 if changesets > 0:
1812 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1816 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1813 source=srctype, url=url)
1817 source=srctype, url=url)
1814
1818
1815 for i in xrange(cor + 1, cnr + 1):
1819 for i in xrange(cor + 1, cnr + 1):
1816 self.hook("incoming", node=hex(self.changelog.node(i)),
1820 self.hook("incoming", node=hex(self.changelog.node(i)),
1817 source=srctype, url=url)
1821 source=srctype, url=url)
1818
1822
1819 return newheads - oldheads + 1
1823 return newheads - oldheads + 1
1820
1824
1821
1825
1822 def stream_in(self, remote):
1826 def stream_in(self, remote):
1823 fp = remote.stream_out()
1827 fp = remote.stream_out()
1824 l = fp.readline()
1828 l = fp.readline()
1825 try:
1829 try:
1826 resp = int(l)
1830 resp = int(l)
1827 except ValueError:
1831 except ValueError:
1828 raise util.UnexpectedOutput(
1832 raise util.UnexpectedOutput(
1829 _('Unexpected response from remote server:'), l)
1833 _('Unexpected response from remote server:'), l)
1830 if resp == 1:
1834 if resp == 1:
1831 raise util.Abort(_('operation forbidden by server'))
1835 raise util.Abort(_('operation forbidden by server'))
1832 elif resp == 2:
1836 elif resp == 2:
1833 raise util.Abort(_('locking the remote repository failed'))
1837 raise util.Abort(_('locking the remote repository failed'))
1834 elif resp != 0:
1838 elif resp != 0:
1835 raise util.Abort(_('the server sent an unknown error code'))
1839 raise util.Abort(_('the server sent an unknown error code'))
1836 self.ui.status(_('streaming all changes\n'))
1840 self.ui.status(_('streaming all changes\n'))
1837 l = fp.readline()
1841 l = fp.readline()
1838 try:
1842 try:
1839 total_files, total_bytes = map(int, l.split(' ', 1))
1843 total_files, total_bytes = map(int, l.split(' ', 1))
1840 except ValueError, TypeError:
1844 except ValueError, TypeError:
1841 raise util.UnexpectedOutput(
1845 raise util.UnexpectedOutput(
1842 _('Unexpected response from remote server:'), l)
1846 _('Unexpected response from remote server:'), l)
1843 self.ui.status(_('%d files to transfer, %s of data\n') %
1847 self.ui.status(_('%d files to transfer, %s of data\n') %
1844 (total_files, util.bytecount(total_bytes)))
1848 (total_files, util.bytecount(total_bytes)))
1845 start = time.time()
1849 start = time.time()
1846 for i in xrange(total_files):
1850 for i in xrange(total_files):
1847 # XXX doesn't support '\n' or '\r' in filenames
1851 # XXX doesn't support '\n' or '\r' in filenames
1848 l = fp.readline()
1852 l = fp.readline()
1849 try:
1853 try:
1850 name, size = l.split('\0', 1)
1854 name, size = l.split('\0', 1)
1851 size = int(size)
1855 size = int(size)
1852 except ValueError, TypeError:
1856 except ValueError, TypeError:
1853 raise util.UnexpectedOutput(
1857 raise util.UnexpectedOutput(
1854 _('Unexpected response from remote server:'), l)
1858 _('Unexpected response from remote server:'), l)
1855 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1859 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1856 ofp = self.sopener(name, 'w')
1860 ofp = self.sopener(name, 'w')
1857 for chunk in util.filechunkiter(fp, limit=size):
1861 for chunk in util.filechunkiter(fp, limit=size):
1858 ofp.write(chunk)
1862 ofp.write(chunk)
1859 ofp.close()
1863 ofp.close()
1860 elapsed = time.time() - start
1864 elapsed = time.time() - start
1861 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1865 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1862 (util.bytecount(total_bytes), elapsed,
1866 (util.bytecount(total_bytes), elapsed,
1863 util.bytecount(total_bytes / elapsed)))
1867 util.bytecount(total_bytes / elapsed)))
1864 self.reload()
1868 self.reload()
1865 return len(self.heads()) + 1
1869 return len(self.heads()) + 1
1866
1870
1867 def clone(self, remote, heads=[], stream=False):
1871 def clone(self, remote, heads=[], stream=False):
1868 '''clone remote repository.
1872 '''clone remote repository.
1869
1873
1870 keyword arguments:
1874 keyword arguments:
1871 heads: list of revs to clone (forces use of pull)
1875 heads: list of revs to clone (forces use of pull)
1872 stream: use streaming clone if possible'''
1876 stream: use streaming clone if possible'''
1873
1877
1874 # now, all clients that can request uncompressed clones can
1878 # now, all clients that can request uncompressed clones can
1875 # read repo formats supported by all servers that can serve
1879 # read repo formats supported by all servers that can serve
1876 # them.
1880 # them.
1877
1881
1878 # if revlog format changes, client will have to check version
1882 # if revlog format changes, client will have to check version
1879 # and format flags on "stream" capability, and use
1883 # and format flags on "stream" capability, and use
1880 # uncompressed only if compatible.
1884 # uncompressed only if compatible.
1881
1885
1882 if stream and not heads and remote.capable('stream'):
1886 if stream and not heads and remote.capable('stream'):
1883 return self.stream_in(remote)
1887 return self.stream_in(remote)
1884 return self.pull(remote, heads)
1888 return self.pull(remote, heads)
1885
1889
1886 # used to avoid circular references so destructors work
1890 # used to avoid circular references so destructors work
1887 def aftertrans(base):
1891 def aftertrans(base):
1888 p = base
1892 p = base
1889 def a():
1893 def a():
1890 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1894 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1891 util.rename(os.path.join(p, "journal.dirstate"),
1895 util.rename(os.path.join(p, "journal.dirstate"),
1892 os.path.join(p, "undo.dirstate"))
1896 os.path.join(p, "undo.dirstate"))
1893 return a
1897 return a
1894
1898
1895 def instance(ui, path, create):
1899 def instance(ui, path, create):
1896 return localrepository(ui, util.drop_scheme('file', path), create)
1900 return localrepository(ui, util.drop_scheme('file', path), create)
1897
1901
1898 def islocal(path):
1902 def islocal(path):
1899 return True
1903 return True
@@ -1,33 +1,40
1 #!/bin/sh
1 #!/bin/sh
2
2
3 hg init t
3 hg init t
4 cd t
4 cd t
5 hg branches
5 hg branches
6
6
7 echo foo > a
7 echo foo > a
8 hg add a
8 hg add a
9 hg ci -m "initial" -d "1000000 0"
9 hg ci -m "initial" -d "1000000 0"
10 hg branch foo
10 hg branch foo
11 hg branch
11 hg branch
12 hg ci -m "add branch name" -d "1000000 0"
12 hg ci -m "add branch name" -d "1000000 0"
13 hg branch bar
13 hg branch bar
14 hg ci -m "change branch name" -d "1000000 0"
14 hg ci -m "change branch name" -d "1000000 0"
15 hg branch ""
15 hg branch ""
16 hg ci -m "clear branch name" -d "1000000 0"
16 hg ci -m "clear branch name" -d "1000000 0"
17
17
18 hg co foo
18 hg co foo
19 hg branch
19 hg branch
20 echo bleah > a
20 echo bleah > a
21 hg ci -m "modify a branch" -d "1000000 0"
21 hg ci -m "modify a branch" -d "1000000 0"
22
22
23 hg merge
23 hg merge
24 hg branch
24 hg branch
25 hg ci -m "merge" -d "1000000 0"
25 hg ci -m "merge" -d "1000000 0"
26 hg log
26 hg log
27
27
28 hg branches
28 hg branches
29 hg branches -q
29 hg branches -q
30
30
31 echo % test for invalid branch cache
31 echo % test for invalid branch cache
32 hg rollback
32 hg rollback
33 cp .hg/branches.cache .hg/bc-invalid
33 hg log -r foo
34 hg log -r foo
35 cp .hg/bc-invalid .hg/branches.cache
36 hg --debug log -r foo
37 rm .hg/branches.cache
38 echo corrupted > .hg/branches.cache
39 hg log -qr foo
40 cat .hg/branches.cache
@@ -1,58 +1,77
1 foo
1 foo
2 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
2 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
3 foo
3 foo
4 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
4 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
5 (branch merge, don't forget to commit)
5 (branch merge, don't forget to commit)
6 foo
6 foo
7 changeset: 5:5f8fb06e083e
7 changeset: 5:5f8fb06e083e
8 branch: foo
8 branch: foo
9 tag: tip
9 tag: tip
10 parent: 4:4909a3732169
10 parent: 4:4909a3732169
11 parent: 3:bf1bc2f45e83
11 parent: 3:bf1bc2f45e83
12 user: test
12 user: test
13 date: Mon Jan 12 13:46:40 1970 +0000
13 date: Mon Jan 12 13:46:40 1970 +0000
14 summary: merge
14 summary: merge
15
15
16 changeset: 4:4909a3732169
16 changeset: 4:4909a3732169
17 branch: foo
17 branch: foo
18 parent: 1:b699b1cec9c2
18 parent: 1:b699b1cec9c2
19 user: test
19 user: test
20 date: Mon Jan 12 13:46:40 1970 +0000
20 date: Mon Jan 12 13:46:40 1970 +0000
21 summary: modify a branch
21 summary: modify a branch
22
22
23 changeset: 3:bf1bc2f45e83
23 changeset: 3:bf1bc2f45e83
24 user: test
24 user: test
25 date: Mon Jan 12 13:46:40 1970 +0000
25 date: Mon Jan 12 13:46:40 1970 +0000
26 summary: clear branch name
26 summary: clear branch name
27
27
28 changeset: 2:67ec16bde7f1
28 changeset: 2:67ec16bde7f1
29 branch: bar
29 branch: bar
30 user: test
30 user: test
31 date: Mon Jan 12 13:46:40 1970 +0000
31 date: Mon Jan 12 13:46:40 1970 +0000
32 summary: change branch name
32 summary: change branch name
33
33
34 changeset: 1:b699b1cec9c2
34 changeset: 1:b699b1cec9c2
35 branch: foo
35 branch: foo
36 user: test
36 user: test
37 date: Mon Jan 12 13:46:40 1970 +0000
37 date: Mon Jan 12 13:46:40 1970 +0000
38 summary: add branch name
38 summary: add branch name
39
39
40 changeset: 0:be8523e69bf8
40 changeset: 0:be8523e69bf8
41 user: test
41 user: test
42 date: Mon Jan 12 13:46:40 1970 +0000
42 date: Mon Jan 12 13:46:40 1970 +0000
43 summary: initial
43 summary: initial
44
44
45 foo 5:5f8fb06e083e
45 foo 5:5f8fb06e083e
46 bar 2:67ec16bde7f1
46 bar 2:67ec16bde7f1
47 foo
47 foo
48 bar
48 bar
49 % test for invalid branch cache
49 % test for invalid branch cache
50 rolling back last transaction
50 rolling back last transaction
51 changeset: 4:4909a3732169
51 changeset: 4:4909a3732169
52 branch: foo
52 branch: foo
53 tag: tip
53 tag: tip
54 parent: 1:b699b1cec9c2
54 parent: 1:b699b1cec9c2
55 user: test
55 user: test
56 date: Mon Jan 12 13:46:40 1970 +0000
56 date: Mon Jan 12 13:46:40 1970 +0000
57 summary: modify a branch
57 summary: modify a branch
58
58
59 Invalid branch cache: unknown tip
60 changeset: 4:4909a3732169c0c20011c4f4b8fdff4e3d89b23f
61 branch: foo
62 tag: tip
63 parent: 1:b699b1cec9c2966b3700de4fef0dc123cd754c31
64 parent: -1:0000000000000000000000000000000000000000
65 manifest: 4:d01b250baaa05909152f7ae07d7a649deea0df9a
66 user: test
67 date: Mon Jan 12 13:46:40 1970 +0000
68 files: a
69 extra: branch=foo
70 description:
71 modify a branch
72
73
74 4:4909a3732169
75 4909a3732169c0c20011c4f4b8fdff4e3d89b23f 4
76 4909a3732169c0c20011c4f4b8fdff4e3d89b23f foo
77 67ec16bde7f1575d523313b9bca000f6a6f12dca bar
General Comments 0
You need to be logged in to leave comments. Login now