##// END OF EJS Templates
Don't report an error when closing heads during local push (issue387)
Thomas Arendsen Hein -
r3803:2aef481a default
parent child Browse files
Show More
@@ -1,1918 +1,1927
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import gettext as _
9 from i18n import gettext as _
10 from demandload import *
10 from demandload import *
11 import repo
11 import repo
12 demandload(globals(), "appendfile changegroup")
12 demandload(globals(), "appendfile changegroup")
13 demandload(globals(), "changelog dirstate filelog manifest context")
13 demandload(globals(), "changelog dirstate filelog manifest context")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 demandload(globals(), "os revlog time util")
15 demandload(globals(), "os revlog time util")
16
16
17 class localrepository(repo.repository):
17 class localrepository(repo.repository):
18 capabilities = ('lookup', 'changegroupsubset')
18 capabilities = ('lookup', 'changegroupsubset')
19
19
20 def __del__(self):
20 def __del__(self):
21 self.transhandle = None
21 self.transhandle = None
22 def __init__(self, parentui, path=None, create=0):
22 def __init__(self, parentui, path=None, create=0):
23 repo.repository.__init__(self)
23 repo.repository.__init__(self)
24 if not path:
24 if not path:
25 p = os.getcwd()
25 p = os.getcwd()
26 while not os.path.isdir(os.path.join(p, ".hg")):
26 while not os.path.isdir(os.path.join(p, ".hg")):
27 oldp = p
27 oldp = p
28 p = os.path.dirname(p)
28 p = os.path.dirname(p)
29 if p == oldp:
29 if p == oldp:
30 raise repo.RepoError(_("There is no Mercurial repository"
30 raise repo.RepoError(_("There is no Mercurial repository"
31 " here (.hg not found)"))
31 " here (.hg not found)"))
32 path = p
32 path = p
33 self.path = os.path.join(path, ".hg")
33 self.path = os.path.join(path, ".hg")
34 self.spath = self.path
34 self.spath = self.path
35
35
36 if not os.path.isdir(self.path):
36 if not os.path.isdir(self.path):
37 if create:
37 if create:
38 if not os.path.exists(path):
38 if not os.path.exists(path):
39 os.mkdir(path)
39 os.mkdir(path)
40 os.mkdir(self.path)
40 os.mkdir(self.path)
41 if self.spath != self.path:
41 if self.spath != self.path:
42 os.mkdir(self.spath)
42 os.mkdir(self.spath)
43 else:
43 else:
44 raise repo.RepoError(_("repository %s not found") % path)
44 raise repo.RepoError(_("repository %s not found") % path)
45 elif create:
45 elif create:
46 raise repo.RepoError(_("repository %s already exists") % path)
46 raise repo.RepoError(_("repository %s already exists") % path)
47
47
48 self.root = os.path.realpath(path)
48 self.root = os.path.realpath(path)
49 self.origroot = path
49 self.origroot = path
50 self.ui = ui.ui(parentui=parentui)
50 self.ui = ui.ui(parentui=parentui)
51 self.opener = util.opener(self.path)
51 self.opener = util.opener(self.path)
52 self.sopener = util.opener(self.spath)
52 self.sopener = util.opener(self.spath)
53 self.wopener = util.opener(self.root)
53 self.wopener = util.opener(self.root)
54
54
55 try:
55 try:
56 self.ui.readconfig(self.join("hgrc"), self.root)
56 self.ui.readconfig(self.join("hgrc"), self.root)
57 except IOError:
57 except IOError:
58 pass
58 pass
59
59
60 v = self.ui.configrevlog()
60 v = self.ui.configrevlog()
61 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
61 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
62 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
62 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
63 fl = v.get('flags', None)
63 fl = v.get('flags', None)
64 flags = 0
64 flags = 0
65 if fl != None:
65 if fl != None:
66 for x in fl.split():
66 for x in fl.split():
67 flags |= revlog.flagstr(x)
67 flags |= revlog.flagstr(x)
68 elif self.revlogv1:
68 elif self.revlogv1:
69 flags = revlog.REVLOG_DEFAULT_FLAGS
69 flags = revlog.REVLOG_DEFAULT_FLAGS
70
70
71 v = self.revlogversion | flags
71 v = self.revlogversion | flags
72 self.manifest = manifest.manifest(self.sopener, v)
72 self.manifest = manifest.manifest(self.sopener, v)
73 self.changelog = changelog.changelog(self.sopener, v)
73 self.changelog = changelog.changelog(self.sopener, v)
74
74
75 # the changelog might not have the inline index flag
75 # the changelog might not have the inline index flag
76 # on. If the format of the changelog is the same as found in
76 # on. If the format of the changelog is the same as found in
77 # .hgrc, apply any flags found in the .hgrc as well.
77 # .hgrc, apply any flags found in the .hgrc as well.
78 # Otherwise, just version from the changelog
78 # Otherwise, just version from the changelog
79 v = self.changelog.version
79 v = self.changelog.version
80 if v == self.revlogversion:
80 if v == self.revlogversion:
81 v |= flags
81 v |= flags
82 self.revlogversion = v
82 self.revlogversion = v
83
83
84 self.tagscache = None
84 self.tagscache = None
85 self.branchcache = None
85 self.branchcache = None
86 self.nodetagscache = None
86 self.nodetagscache = None
87 self.encodepats = None
87 self.encodepats = None
88 self.decodepats = None
88 self.decodepats = None
89 self.transhandle = None
89 self.transhandle = None
90
90
91 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
91 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
92
92
93 def url(self):
93 def url(self):
94 return 'file:' + self.root
94 return 'file:' + self.root
95
95
96 def hook(self, name, throw=False, **args):
96 def hook(self, name, throw=False, **args):
97 def callhook(hname, funcname):
97 def callhook(hname, funcname):
98 '''call python hook. hook is callable object, looked up as
98 '''call python hook. hook is callable object, looked up as
99 name in python module. if callable returns "true", hook
99 name in python module. if callable returns "true", hook
100 fails, else passes. if hook raises exception, treated as
100 fails, else passes. if hook raises exception, treated as
101 hook failure. exception propagates if throw is "true".
101 hook failure. exception propagates if throw is "true".
102
102
103 reason for "true" meaning "hook failed" is so that
103 reason for "true" meaning "hook failed" is so that
104 unmodified commands (e.g. mercurial.commands.update) can
104 unmodified commands (e.g. mercurial.commands.update) can
105 be run as hooks without wrappers to convert return values.'''
105 be run as hooks without wrappers to convert return values.'''
106
106
107 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
107 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
108 d = funcname.rfind('.')
108 d = funcname.rfind('.')
109 if d == -1:
109 if d == -1:
110 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
110 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
111 % (hname, funcname))
111 % (hname, funcname))
112 modname = funcname[:d]
112 modname = funcname[:d]
113 try:
113 try:
114 obj = __import__(modname)
114 obj = __import__(modname)
115 except ImportError:
115 except ImportError:
116 try:
116 try:
117 # extensions are loaded with hgext_ prefix
117 # extensions are loaded with hgext_ prefix
118 obj = __import__("hgext_%s" % modname)
118 obj = __import__("hgext_%s" % modname)
119 except ImportError:
119 except ImportError:
120 raise util.Abort(_('%s hook is invalid '
120 raise util.Abort(_('%s hook is invalid '
121 '(import of "%s" failed)') %
121 '(import of "%s" failed)') %
122 (hname, modname))
122 (hname, modname))
123 try:
123 try:
124 for p in funcname.split('.')[1:]:
124 for p in funcname.split('.')[1:]:
125 obj = getattr(obj, p)
125 obj = getattr(obj, p)
126 except AttributeError, err:
126 except AttributeError, err:
127 raise util.Abort(_('%s hook is invalid '
127 raise util.Abort(_('%s hook is invalid '
128 '("%s" is not defined)') %
128 '("%s" is not defined)') %
129 (hname, funcname))
129 (hname, funcname))
130 if not callable(obj):
130 if not callable(obj):
131 raise util.Abort(_('%s hook is invalid '
131 raise util.Abort(_('%s hook is invalid '
132 '("%s" is not callable)') %
132 '("%s" is not callable)') %
133 (hname, funcname))
133 (hname, funcname))
134 try:
134 try:
135 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
135 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
136 except (KeyboardInterrupt, util.SignalInterrupt):
136 except (KeyboardInterrupt, util.SignalInterrupt):
137 raise
137 raise
138 except Exception, exc:
138 except Exception, exc:
139 if isinstance(exc, util.Abort):
139 if isinstance(exc, util.Abort):
140 self.ui.warn(_('error: %s hook failed: %s\n') %
140 self.ui.warn(_('error: %s hook failed: %s\n') %
141 (hname, exc.args[0]))
141 (hname, exc.args[0]))
142 else:
142 else:
143 self.ui.warn(_('error: %s hook raised an exception: '
143 self.ui.warn(_('error: %s hook raised an exception: '
144 '%s\n') % (hname, exc))
144 '%s\n') % (hname, exc))
145 if throw:
145 if throw:
146 raise
146 raise
147 self.ui.print_exc()
147 self.ui.print_exc()
148 return True
148 return True
149 if r:
149 if r:
150 if throw:
150 if throw:
151 raise util.Abort(_('%s hook failed') % hname)
151 raise util.Abort(_('%s hook failed') % hname)
152 self.ui.warn(_('warning: %s hook failed\n') % hname)
152 self.ui.warn(_('warning: %s hook failed\n') % hname)
153 return r
153 return r
154
154
155 def runhook(name, cmd):
155 def runhook(name, cmd):
156 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
156 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
157 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
157 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
158 r = util.system(cmd, environ=env, cwd=self.root)
158 r = util.system(cmd, environ=env, cwd=self.root)
159 if r:
159 if r:
160 desc, r = util.explain_exit(r)
160 desc, r = util.explain_exit(r)
161 if throw:
161 if throw:
162 raise util.Abort(_('%s hook %s') % (name, desc))
162 raise util.Abort(_('%s hook %s') % (name, desc))
163 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
163 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
164 return r
164 return r
165
165
166 r = False
166 r = False
167 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
167 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
168 if hname.split(".", 1)[0] == name and cmd]
168 if hname.split(".", 1)[0] == name and cmd]
169 hooks.sort()
169 hooks.sort()
170 for hname, cmd in hooks:
170 for hname, cmd in hooks:
171 if cmd.startswith('python:'):
171 if cmd.startswith('python:'):
172 r = callhook(hname, cmd[7:].strip()) or r
172 r = callhook(hname, cmd[7:].strip()) or r
173 else:
173 else:
174 r = runhook(hname, cmd) or r
174 r = runhook(hname, cmd) or r
175 return r
175 return r
176
176
177 tag_disallowed = ':\r\n'
177 tag_disallowed = ':\r\n'
178
178
179 def tag(self, name, node, message, local, user, date):
179 def tag(self, name, node, message, local, user, date):
180 '''tag a revision with a symbolic name.
180 '''tag a revision with a symbolic name.
181
181
182 if local is True, the tag is stored in a per-repository file.
182 if local is True, the tag is stored in a per-repository file.
183 otherwise, it is stored in the .hgtags file, and a new
183 otherwise, it is stored in the .hgtags file, and a new
184 changeset is committed with the change.
184 changeset is committed with the change.
185
185
186 keyword arguments:
186 keyword arguments:
187
187
188 local: whether to store tag in non-version-controlled file
188 local: whether to store tag in non-version-controlled file
189 (default False)
189 (default False)
190
190
191 message: commit message to use if committing
191 message: commit message to use if committing
192
192
193 user: name of user to use if committing
193 user: name of user to use if committing
194
194
195 date: date tuple to use if committing'''
195 date: date tuple to use if committing'''
196
196
197 for c in self.tag_disallowed:
197 for c in self.tag_disallowed:
198 if c in name:
198 if c in name:
199 raise util.Abort(_('%r cannot be used in a tag name') % c)
199 raise util.Abort(_('%r cannot be used in a tag name') % c)
200
200
201 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
201 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
202
202
203 if local:
203 if local:
204 # local tags are stored in the current charset
204 # local tags are stored in the current charset
205 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
205 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
206 self.hook('tag', node=hex(node), tag=name, local=local)
206 self.hook('tag', node=hex(node), tag=name, local=local)
207 return
207 return
208
208
209 for x in self.status()[:5]:
209 for x in self.status()[:5]:
210 if '.hgtags' in x:
210 if '.hgtags' in x:
211 raise util.Abort(_('working copy of .hgtags is changed '
211 raise util.Abort(_('working copy of .hgtags is changed '
212 '(please commit .hgtags manually)'))
212 '(please commit .hgtags manually)'))
213
213
214 # committed tags are stored in UTF-8
214 # committed tags are stored in UTF-8
215 line = '%s %s\n' % (hex(node), util.fromlocal(name))
215 line = '%s %s\n' % (hex(node), util.fromlocal(name))
216 self.wfile('.hgtags', 'ab').write(line)
216 self.wfile('.hgtags', 'ab').write(line)
217 if self.dirstate.state('.hgtags') == '?':
217 if self.dirstate.state('.hgtags') == '?':
218 self.add(['.hgtags'])
218 self.add(['.hgtags'])
219
219
220 self.commit(['.hgtags'], message, user, date)
220 self.commit(['.hgtags'], message, user, date)
221 self.hook('tag', node=hex(node), tag=name, local=local)
221 self.hook('tag', node=hex(node), tag=name, local=local)
222
222
223 def tags(self):
223 def tags(self):
224 '''return a mapping of tag to node'''
224 '''return a mapping of tag to node'''
225 if not self.tagscache:
225 if not self.tagscache:
226 self.tagscache = {}
226 self.tagscache = {}
227
227
228 def parsetag(line, context):
228 def parsetag(line, context):
229 if not line:
229 if not line:
230 return
230 return
231 s = l.split(" ", 1)
231 s = l.split(" ", 1)
232 if len(s) != 2:
232 if len(s) != 2:
233 self.ui.warn(_("%s: cannot parse entry\n") % context)
233 self.ui.warn(_("%s: cannot parse entry\n") % context)
234 return
234 return
235 node, key = s
235 node, key = s
236 key = util.tolocal(key.strip()) # stored in UTF-8
236 key = util.tolocal(key.strip()) # stored in UTF-8
237 try:
237 try:
238 bin_n = bin(node)
238 bin_n = bin(node)
239 except TypeError:
239 except TypeError:
240 self.ui.warn(_("%s: node '%s' is not well formed\n") %
240 self.ui.warn(_("%s: node '%s' is not well formed\n") %
241 (context, node))
241 (context, node))
242 return
242 return
243 if bin_n not in self.changelog.nodemap:
243 if bin_n not in self.changelog.nodemap:
244 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
244 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
245 (context, key))
245 (context, key))
246 return
246 return
247 self.tagscache[key] = bin_n
247 self.tagscache[key] = bin_n
248
248
249 # read the tags file from each head, ending with the tip,
249 # read the tags file from each head, ending with the tip,
250 # and add each tag found to the map, with "newer" ones
250 # and add each tag found to the map, with "newer" ones
251 # taking precedence
251 # taking precedence
252 f = None
252 f = None
253 for rev, node, fnode in self._hgtagsnodes():
253 for rev, node, fnode in self._hgtagsnodes():
254 f = (f and f.filectx(fnode) or
254 f = (f and f.filectx(fnode) or
255 self.filectx('.hgtags', fileid=fnode))
255 self.filectx('.hgtags', fileid=fnode))
256 count = 0
256 count = 0
257 for l in f.data().splitlines():
257 for l in f.data().splitlines():
258 count += 1
258 count += 1
259 parsetag(l, _("%s, line %d") % (str(f), count))
259 parsetag(l, _("%s, line %d") % (str(f), count))
260
260
261 try:
261 try:
262 f = self.opener("localtags")
262 f = self.opener("localtags")
263 count = 0
263 count = 0
264 for l in f:
264 for l in f:
265 # localtags are stored in the local character set
265 # localtags are stored in the local character set
266 # while the internal tag table is stored in UTF-8
266 # while the internal tag table is stored in UTF-8
267 l = util.fromlocal(l)
267 l = util.fromlocal(l)
268 count += 1
268 count += 1
269 parsetag(l, _("localtags, line %d") % count)
269 parsetag(l, _("localtags, line %d") % count)
270 except IOError:
270 except IOError:
271 pass
271 pass
272
272
273 self.tagscache['tip'] = self.changelog.tip()
273 self.tagscache['tip'] = self.changelog.tip()
274
274
275 return self.tagscache
275 return self.tagscache
276
276
277 def _hgtagsnodes(self):
277 def _hgtagsnodes(self):
278 heads = self.heads()
278 heads = self.heads()
279 heads.reverse()
279 heads.reverse()
280 last = {}
280 last = {}
281 ret = []
281 ret = []
282 for node in heads:
282 for node in heads:
283 c = self.changectx(node)
283 c = self.changectx(node)
284 rev = c.rev()
284 rev = c.rev()
285 try:
285 try:
286 fnode = c.filenode('.hgtags')
286 fnode = c.filenode('.hgtags')
287 except repo.LookupError:
287 except repo.LookupError:
288 continue
288 continue
289 ret.append((rev, node, fnode))
289 ret.append((rev, node, fnode))
290 if fnode in last:
290 if fnode in last:
291 ret[last[fnode]] = None
291 ret[last[fnode]] = None
292 last[fnode] = len(ret) - 1
292 last[fnode] = len(ret) - 1
293 return [item for item in ret if item]
293 return [item for item in ret if item]
294
294
295 def tagslist(self):
295 def tagslist(self):
296 '''return a list of tags ordered by revision'''
296 '''return a list of tags ordered by revision'''
297 l = []
297 l = []
298 for t, n in self.tags().items():
298 for t, n in self.tags().items():
299 try:
299 try:
300 r = self.changelog.rev(n)
300 r = self.changelog.rev(n)
301 except:
301 except:
302 r = -2 # sort to the beginning of the list if unknown
302 r = -2 # sort to the beginning of the list if unknown
303 l.append((r, t, n))
303 l.append((r, t, n))
304 l.sort()
304 l.sort()
305 return [(t, n) for r, t, n in l]
305 return [(t, n) for r, t, n in l]
306
306
307 def nodetags(self, node):
307 def nodetags(self, node):
308 '''return the tags associated with a node'''
308 '''return the tags associated with a node'''
309 if not self.nodetagscache:
309 if not self.nodetagscache:
310 self.nodetagscache = {}
310 self.nodetagscache = {}
311 for t, n in self.tags().items():
311 for t, n in self.tags().items():
312 self.nodetagscache.setdefault(n, []).append(t)
312 self.nodetagscache.setdefault(n, []).append(t)
313 return self.nodetagscache.get(node, [])
313 return self.nodetagscache.get(node, [])
314
314
315 def branchtags(self):
315 def branchtags(self):
316 if self.branchcache != None:
316 if self.branchcache != None:
317 return self.branchcache
317 return self.branchcache
318
318
319 self.branchcache = {} # avoid recursion in changectx
319 self.branchcache = {} # avoid recursion in changectx
320
320
321 partial, last, lrev = self._readbranchcache()
321 partial, last, lrev = self._readbranchcache()
322
322
323 tiprev = self.changelog.count() - 1
323 tiprev = self.changelog.count() - 1
324 if lrev != tiprev:
324 if lrev != tiprev:
325 self._updatebranchcache(partial, lrev+1, tiprev+1)
325 self._updatebranchcache(partial, lrev+1, tiprev+1)
326 self._writebranchcache(partial, self.changelog.tip(), tiprev)
326 self._writebranchcache(partial, self.changelog.tip(), tiprev)
327
327
328 # the branch cache is stored on disk as UTF-8, but in the local
328 # the branch cache is stored on disk as UTF-8, but in the local
329 # charset internally
329 # charset internally
330 for k, v in partial.items():
330 for k, v in partial.items():
331 self.branchcache[util.tolocal(k)] = v
331 self.branchcache[util.tolocal(k)] = v
332 return self.branchcache
332 return self.branchcache
333
333
334 def _readbranchcache(self):
334 def _readbranchcache(self):
335 partial = {}
335 partial = {}
336 try:
336 try:
337 f = self.opener("branches.cache")
337 f = self.opener("branches.cache")
338 lines = f.read().split('\n')
338 lines = f.read().split('\n')
339 f.close()
339 f.close()
340 last, lrev = lines.pop(0).rstrip().split(" ", 1)
340 last, lrev = lines.pop(0).rstrip().split(" ", 1)
341 last, lrev = bin(last), int(lrev)
341 last, lrev = bin(last), int(lrev)
342 if not (lrev < self.changelog.count() and
342 if not (lrev < self.changelog.count() and
343 self.changelog.node(lrev) == last): # sanity check
343 self.changelog.node(lrev) == last): # sanity check
344 # invalidate the cache
344 # invalidate the cache
345 raise ValueError('Invalid branch cache: unknown tip')
345 raise ValueError('Invalid branch cache: unknown tip')
346 for l in lines:
346 for l in lines:
347 if not l: continue
347 if not l: continue
348 node, label = l.rstrip().split(" ", 1)
348 node, label = l.rstrip().split(" ", 1)
349 partial[label] = bin(node)
349 partial[label] = bin(node)
350 except (KeyboardInterrupt, util.SignalInterrupt):
350 except (KeyboardInterrupt, util.SignalInterrupt):
351 raise
351 raise
352 except Exception, inst:
352 except Exception, inst:
353 if self.ui.debugflag:
353 if self.ui.debugflag:
354 self.ui.warn(str(inst), '\n')
354 self.ui.warn(str(inst), '\n')
355 partial, last, lrev = {}, nullid, nullrev
355 partial, last, lrev = {}, nullid, nullrev
356 return partial, last, lrev
356 return partial, last, lrev
357
357
358 def _writebranchcache(self, branches, tip, tiprev):
358 def _writebranchcache(self, branches, tip, tiprev):
359 try:
359 try:
360 f = self.opener("branches.cache", "w")
360 f = self.opener("branches.cache", "w")
361 f.write("%s %s\n" % (hex(tip), tiprev))
361 f.write("%s %s\n" % (hex(tip), tiprev))
362 for label, node in branches.iteritems():
362 for label, node in branches.iteritems():
363 f.write("%s %s\n" % (hex(node), label))
363 f.write("%s %s\n" % (hex(node), label))
364 except IOError:
364 except IOError:
365 pass
365 pass
366
366
367 def _updatebranchcache(self, partial, start, end):
367 def _updatebranchcache(self, partial, start, end):
368 for r in xrange(start, end):
368 for r in xrange(start, end):
369 c = self.changectx(r)
369 c = self.changectx(r)
370 b = c.branch()
370 b = c.branch()
371 if b:
371 if b:
372 partial[b] = c.node()
372 partial[b] = c.node()
373
373
374 def lookup(self, key):
374 def lookup(self, key):
375 if key == '.':
375 if key == '.':
376 key = self.dirstate.parents()[0]
376 key = self.dirstate.parents()[0]
377 if key == nullid:
377 if key == nullid:
378 raise repo.RepoError(_("no revision checked out"))
378 raise repo.RepoError(_("no revision checked out"))
379 elif key == 'null':
379 elif key == 'null':
380 return nullid
380 return nullid
381 n = self.changelog._match(key)
381 n = self.changelog._match(key)
382 if n:
382 if n:
383 return n
383 return n
384 if key in self.tags():
384 if key in self.tags():
385 return self.tags()[key]
385 return self.tags()[key]
386 if key in self.branchtags():
386 if key in self.branchtags():
387 return self.branchtags()[key]
387 return self.branchtags()[key]
388 n = self.changelog._partialmatch(key)
388 n = self.changelog._partialmatch(key)
389 if n:
389 if n:
390 return n
390 return n
391 raise repo.RepoError(_("unknown revision '%s'") % key)
391 raise repo.RepoError(_("unknown revision '%s'") % key)
392
392
393 def dev(self):
393 def dev(self):
394 return os.lstat(self.path).st_dev
394 return os.lstat(self.path).st_dev
395
395
396 def local(self):
396 def local(self):
397 return True
397 return True
398
398
399 def join(self, f):
399 def join(self, f):
400 return os.path.join(self.path, f)
400 return os.path.join(self.path, f)
401
401
402 def sjoin(self, f):
402 def sjoin(self, f):
403 return os.path.join(self.spath, f)
403 return os.path.join(self.spath, f)
404
404
405 def wjoin(self, f):
405 def wjoin(self, f):
406 return os.path.join(self.root, f)
406 return os.path.join(self.root, f)
407
407
408 def file(self, f):
408 def file(self, f):
409 if f[0] == '/':
409 if f[0] == '/':
410 f = f[1:]
410 f = f[1:]
411 return filelog.filelog(self.sopener, f, self.revlogversion)
411 return filelog.filelog(self.sopener, f, self.revlogversion)
412
412
413 def changectx(self, changeid=None):
413 def changectx(self, changeid=None):
414 return context.changectx(self, changeid)
414 return context.changectx(self, changeid)
415
415
416 def workingctx(self):
416 def workingctx(self):
417 return context.workingctx(self)
417 return context.workingctx(self)
418
418
419 def parents(self, changeid=None):
419 def parents(self, changeid=None):
420 '''
420 '''
421 get list of changectxs for parents of changeid or working directory
421 get list of changectxs for parents of changeid or working directory
422 '''
422 '''
423 if changeid is None:
423 if changeid is None:
424 pl = self.dirstate.parents()
424 pl = self.dirstate.parents()
425 else:
425 else:
426 n = self.changelog.lookup(changeid)
426 n = self.changelog.lookup(changeid)
427 pl = self.changelog.parents(n)
427 pl = self.changelog.parents(n)
428 if pl[1] == nullid:
428 if pl[1] == nullid:
429 return [self.changectx(pl[0])]
429 return [self.changectx(pl[0])]
430 return [self.changectx(pl[0]), self.changectx(pl[1])]
430 return [self.changectx(pl[0]), self.changectx(pl[1])]
431
431
432 def filectx(self, path, changeid=None, fileid=None):
432 def filectx(self, path, changeid=None, fileid=None):
433 """changeid can be a changeset revision, node, or tag.
433 """changeid can be a changeset revision, node, or tag.
434 fileid can be a file revision or node."""
434 fileid can be a file revision or node."""
435 return context.filectx(self, path, changeid, fileid)
435 return context.filectx(self, path, changeid, fileid)
436
436
437 def getcwd(self):
437 def getcwd(self):
438 return self.dirstate.getcwd()
438 return self.dirstate.getcwd()
439
439
440 def wfile(self, f, mode='r'):
440 def wfile(self, f, mode='r'):
441 return self.wopener(f, mode)
441 return self.wopener(f, mode)
442
442
443 def wread(self, filename):
443 def wread(self, filename):
444 if self.encodepats == None:
444 if self.encodepats == None:
445 l = []
445 l = []
446 for pat, cmd in self.ui.configitems("encode"):
446 for pat, cmd in self.ui.configitems("encode"):
447 mf = util.matcher(self.root, "", [pat], [], [])[1]
447 mf = util.matcher(self.root, "", [pat], [], [])[1]
448 l.append((mf, cmd))
448 l.append((mf, cmd))
449 self.encodepats = l
449 self.encodepats = l
450
450
451 data = self.wopener(filename, 'r').read()
451 data = self.wopener(filename, 'r').read()
452
452
453 for mf, cmd in self.encodepats:
453 for mf, cmd in self.encodepats:
454 if mf(filename):
454 if mf(filename):
455 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
455 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
456 data = util.filter(data, cmd)
456 data = util.filter(data, cmd)
457 break
457 break
458
458
459 return data
459 return data
460
460
461 def wwrite(self, filename, data, fd=None):
461 def wwrite(self, filename, data, fd=None):
462 if self.decodepats == None:
462 if self.decodepats == None:
463 l = []
463 l = []
464 for pat, cmd in self.ui.configitems("decode"):
464 for pat, cmd in self.ui.configitems("decode"):
465 mf = util.matcher(self.root, "", [pat], [], [])[1]
465 mf = util.matcher(self.root, "", [pat], [], [])[1]
466 l.append((mf, cmd))
466 l.append((mf, cmd))
467 self.decodepats = l
467 self.decodepats = l
468
468
469 for mf, cmd in self.decodepats:
469 for mf, cmd in self.decodepats:
470 if mf(filename):
470 if mf(filename):
471 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
471 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
472 data = util.filter(data, cmd)
472 data = util.filter(data, cmd)
473 break
473 break
474
474
475 if fd:
475 if fd:
476 return fd.write(data)
476 return fd.write(data)
477 return self.wopener(filename, 'w').write(data)
477 return self.wopener(filename, 'w').write(data)
478
478
479 def transaction(self):
479 def transaction(self):
480 tr = self.transhandle
480 tr = self.transhandle
481 if tr != None and tr.running():
481 if tr != None and tr.running():
482 return tr.nest()
482 return tr.nest()
483
483
484 # save dirstate for rollback
484 # save dirstate for rollback
485 try:
485 try:
486 ds = self.opener("dirstate").read()
486 ds = self.opener("dirstate").read()
487 except IOError:
487 except IOError:
488 ds = ""
488 ds = ""
489 self.opener("journal.dirstate", "w").write(ds)
489 self.opener("journal.dirstate", "w").write(ds)
490
490
491 renames = [(self.sjoin("journal"), self.sjoin("undo")),
491 renames = [(self.sjoin("journal"), self.sjoin("undo")),
492 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
492 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
493 tr = transaction.transaction(self.ui.warn, self.sopener,
493 tr = transaction.transaction(self.ui.warn, self.sopener,
494 self.sjoin("journal"),
494 self.sjoin("journal"),
495 aftertrans(renames))
495 aftertrans(renames))
496 self.transhandle = tr
496 self.transhandle = tr
497 return tr
497 return tr
498
498
499 def recover(self):
499 def recover(self):
500 l = self.lock()
500 l = self.lock()
501 if os.path.exists(self.sjoin("journal")):
501 if os.path.exists(self.sjoin("journal")):
502 self.ui.status(_("rolling back interrupted transaction\n"))
502 self.ui.status(_("rolling back interrupted transaction\n"))
503 transaction.rollback(self.sopener, self.sjoin("journal"))
503 transaction.rollback(self.sopener, self.sjoin("journal"))
504 self.reload()
504 self.reload()
505 return True
505 return True
506 else:
506 else:
507 self.ui.warn(_("no interrupted transaction available\n"))
507 self.ui.warn(_("no interrupted transaction available\n"))
508 return False
508 return False
509
509
510 def rollback(self, wlock=None):
510 def rollback(self, wlock=None):
511 if not wlock:
511 if not wlock:
512 wlock = self.wlock()
512 wlock = self.wlock()
513 l = self.lock()
513 l = self.lock()
514 if os.path.exists(self.sjoin("undo")):
514 if os.path.exists(self.sjoin("undo")):
515 self.ui.status(_("rolling back last transaction\n"))
515 self.ui.status(_("rolling back last transaction\n"))
516 transaction.rollback(self.sopener, self.sjoin("undo"))
516 transaction.rollback(self.sopener, self.sjoin("undo"))
517 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
517 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
518 self.reload()
518 self.reload()
519 self.wreload()
519 self.wreload()
520 else:
520 else:
521 self.ui.warn(_("no rollback information available\n"))
521 self.ui.warn(_("no rollback information available\n"))
522
522
523 def wreload(self):
523 def wreload(self):
524 self.dirstate.read()
524 self.dirstate.read()
525
525
526 def reload(self):
526 def reload(self):
527 self.changelog.load()
527 self.changelog.load()
528 self.manifest.load()
528 self.manifest.load()
529 self.tagscache = None
529 self.tagscache = None
530 self.nodetagscache = None
530 self.nodetagscache = None
531
531
532 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
532 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
533 desc=None):
533 desc=None):
534 try:
534 try:
535 l = lock.lock(lockname, 0, releasefn, desc=desc)
535 l = lock.lock(lockname, 0, releasefn, desc=desc)
536 except lock.LockHeld, inst:
536 except lock.LockHeld, inst:
537 if not wait:
537 if not wait:
538 raise
538 raise
539 self.ui.warn(_("waiting for lock on %s held by %r\n") %
539 self.ui.warn(_("waiting for lock on %s held by %r\n") %
540 (desc, inst.locker))
540 (desc, inst.locker))
541 # default to 600 seconds timeout
541 # default to 600 seconds timeout
542 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
542 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
543 releasefn, desc=desc)
543 releasefn, desc=desc)
544 if acquirefn:
544 if acquirefn:
545 acquirefn()
545 acquirefn()
546 return l
546 return l
547
547
548 def lock(self, wait=1):
548 def lock(self, wait=1):
549 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
549 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
550 desc=_('repository %s') % self.origroot)
550 desc=_('repository %s') % self.origroot)
551
551
552 def wlock(self, wait=1):
552 def wlock(self, wait=1):
553 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
553 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
554 self.wreload,
554 self.wreload,
555 desc=_('working directory of %s') % self.origroot)
555 desc=_('working directory of %s') % self.origroot)
556
556
557 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
557 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
558 """
558 """
559 commit an individual file as part of a larger transaction
559 commit an individual file as part of a larger transaction
560 """
560 """
561
561
562 t = self.wread(fn)
562 t = self.wread(fn)
563 fl = self.file(fn)
563 fl = self.file(fn)
564 fp1 = manifest1.get(fn, nullid)
564 fp1 = manifest1.get(fn, nullid)
565 fp2 = manifest2.get(fn, nullid)
565 fp2 = manifest2.get(fn, nullid)
566
566
567 meta = {}
567 meta = {}
568 cp = self.dirstate.copied(fn)
568 cp = self.dirstate.copied(fn)
569 if cp:
569 if cp:
570 meta["copy"] = cp
570 meta["copy"] = cp
571 if not manifest2: # not a branch merge
571 if not manifest2: # not a branch merge
572 meta["copyrev"] = hex(manifest1.get(cp, nullid))
572 meta["copyrev"] = hex(manifest1.get(cp, nullid))
573 fp2 = nullid
573 fp2 = nullid
574 elif fp2 != nullid: # copied on remote side
574 elif fp2 != nullid: # copied on remote side
575 meta["copyrev"] = hex(manifest1.get(cp, nullid))
575 meta["copyrev"] = hex(manifest1.get(cp, nullid))
576 elif fp1 != nullid: # copied on local side, reversed
576 elif fp1 != nullid: # copied on local side, reversed
577 meta["copyrev"] = hex(manifest2.get(cp))
577 meta["copyrev"] = hex(manifest2.get(cp))
578 fp2 = nullid
578 fp2 = nullid
579 else: # directory rename
579 else: # directory rename
580 meta["copyrev"] = hex(manifest1.get(cp, nullid))
580 meta["copyrev"] = hex(manifest1.get(cp, nullid))
581 self.ui.debug(_(" %s: copy %s:%s\n") %
581 self.ui.debug(_(" %s: copy %s:%s\n") %
582 (fn, cp, meta["copyrev"]))
582 (fn, cp, meta["copyrev"]))
583 fp1 = nullid
583 fp1 = nullid
584 elif fp2 != nullid:
584 elif fp2 != nullid:
585 # is one parent an ancestor of the other?
585 # is one parent an ancestor of the other?
586 fpa = fl.ancestor(fp1, fp2)
586 fpa = fl.ancestor(fp1, fp2)
587 if fpa == fp1:
587 if fpa == fp1:
588 fp1, fp2 = fp2, nullid
588 fp1, fp2 = fp2, nullid
589 elif fpa == fp2:
589 elif fpa == fp2:
590 fp2 = nullid
590 fp2 = nullid
591
591
592 # is the file unmodified from the parent? report existing entry
592 # is the file unmodified from the parent? report existing entry
593 if fp2 == nullid and not fl.cmp(fp1, t):
593 if fp2 == nullid and not fl.cmp(fp1, t):
594 return fp1
594 return fp1
595
595
596 changelist.append(fn)
596 changelist.append(fn)
597 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
597 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
598
598
599 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
599 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
600 if p1 is None:
600 if p1 is None:
601 p1, p2 = self.dirstate.parents()
601 p1, p2 = self.dirstate.parents()
602 return self.commit(files=files, text=text, user=user, date=date,
602 return self.commit(files=files, text=text, user=user, date=date,
603 p1=p1, p2=p2, wlock=wlock)
603 p1=p1, p2=p2, wlock=wlock)
604
604
605 def commit(self, files=None, text="", user=None, date=None,
605 def commit(self, files=None, text="", user=None, date=None,
606 match=util.always, force=False, lock=None, wlock=None,
606 match=util.always, force=False, lock=None, wlock=None,
607 force_editor=False, p1=None, p2=None, extra={}):
607 force_editor=False, p1=None, p2=None, extra={}):
608
608
609 commit = []
609 commit = []
610 remove = []
610 remove = []
611 changed = []
611 changed = []
612 use_dirstate = (p1 is None) # not rawcommit
612 use_dirstate = (p1 is None) # not rawcommit
613 extra = extra.copy()
613 extra = extra.copy()
614
614
615 if use_dirstate:
615 if use_dirstate:
616 if files:
616 if files:
617 for f in files:
617 for f in files:
618 s = self.dirstate.state(f)
618 s = self.dirstate.state(f)
619 if s in 'nmai':
619 if s in 'nmai':
620 commit.append(f)
620 commit.append(f)
621 elif s == 'r':
621 elif s == 'r':
622 remove.append(f)
622 remove.append(f)
623 else:
623 else:
624 self.ui.warn(_("%s not tracked!\n") % f)
624 self.ui.warn(_("%s not tracked!\n") % f)
625 else:
625 else:
626 changes = self.status(match=match)[:5]
626 changes = self.status(match=match)[:5]
627 modified, added, removed, deleted, unknown = changes
627 modified, added, removed, deleted, unknown = changes
628 commit = modified + added
628 commit = modified + added
629 remove = removed
629 remove = removed
630 else:
630 else:
631 commit = files
631 commit = files
632
632
633 if use_dirstate:
633 if use_dirstate:
634 p1, p2 = self.dirstate.parents()
634 p1, p2 = self.dirstate.parents()
635 update_dirstate = True
635 update_dirstate = True
636 else:
636 else:
637 p1, p2 = p1, p2 or nullid
637 p1, p2 = p1, p2 or nullid
638 update_dirstate = (self.dirstate.parents()[0] == p1)
638 update_dirstate = (self.dirstate.parents()[0] == p1)
639
639
640 c1 = self.changelog.read(p1)
640 c1 = self.changelog.read(p1)
641 c2 = self.changelog.read(p2)
641 c2 = self.changelog.read(p2)
642 m1 = self.manifest.read(c1[0]).copy()
642 m1 = self.manifest.read(c1[0]).copy()
643 m2 = self.manifest.read(c2[0])
643 m2 = self.manifest.read(c2[0])
644
644
645 if use_dirstate:
645 if use_dirstate:
646 branchname = util.fromlocal(self.workingctx().branch())
646 branchname = util.fromlocal(self.workingctx().branch())
647 else:
647 else:
648 branchname = ""
648 branchname = ""
649
649
650 if use_dirstate:
650 if use_dirstate:
651 oldname = c1[5].get("branch", "") # stored in UTF-8
651 oldname = c1[5].get("branch", "") # stored in UTF-8
652 if not commit and not remove and not force and p2 == nullid and \
652 if not commit and not remove and not force and p2 == nullid and \
653 branchname == oldname:
653 branchname == oldname:
654 self.ui.status(_("nothing changed\n"))
654 self.ui.status(_("nothing changed\n"))
655 return None
655 return None
656
656
657 xp1 = hex(p1)
657 xp1 = hex(p1)
658 if p2 == nullid: xp2 = ''
658 if p2 == nullid: xp2 = ''
659 else: xp2 = hex(p2)
659 else: xp2 = hex(p2)
660
660
661 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
661 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
662
662
663 if not wlock:
663 if not wlock:
664 wlock = self.wlock()
664 wlock = self.wlock()
665 if not lock:
665 if not lock:
666 lock = self.lock()
666 lock = self.lock()
667 tr = self.transaction()
667 tr = self.transaction()
668
668
669 # check in files
669 # check in files
670 new = {}
670 new = {}
671 linkrev = self.changelog.count()
671 linkrev = self.changelog.count()
672 commit.sort()
672 commit.sort()
673 for f in commit:
673 for f in commit:
674 self.ui.note(f + "\n")
674 self.ui.note(f + "\n")
675 try:
675 try:
676 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
676 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
677 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
677 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
678 except IOError:
678 except IOError:
679 if use_dirstate:
679 if use_dirstate:
680 self.ui.warn(_("trouble committing %s!\n") % f)
680 self.ui.warn(_("trouble committing %s!\n") % f)
681 raise
681 raise
682 else:
682 else:
683 remove.append(f)
683 remove.append(f)
684
684
685 # update manifest
685 # update manifest
686 m1.update(new)
686 m1.update(new)
687 remove.sort()
687 remove.sort()
688
688
689 for f in remove:
689 for f in remove:
690 if f in m1:
690 if f in m1:
691 del m1[f]
691 del m1[f]
692 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
692 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
693
693
694 # add changeset
694 # add changeset
695 new = new.keys()
695 new = new.keys()
696 new.sort()
696 new.sort()
697
697
698 user = user or self.ui.username()
698 user = user or self.ui.username()
699 if not text or force_editor:
699 if not text or force_editor:
700 edittext = []
700 edittext = []
701 if text:
701 if text:
702 edittext.append(text)
702 edittext.append(text)
703 edittext.append("")
703 edittext.append("")
704 edittext.append("HG: user: %s" % user)
704 edittext.append("HG: user: %s" % user)
705 if p2 != nullid:
705 if p2 != nullid:
706 edittext.append("HG: branch merge")
706 edittext.append("HG: branch merge")
707 edittext.extend(["HG: changed %s" % f for f in changed])
707 edittext.extend(["HG: changed %s" % f for f in changed])
708 edittext.extend(["HG: removed %s" % f for f in remove])
708 edittext.extend(["HG: removed %s" % f for f in remove])
709 if not changed and not remove:
709 if not changed and not remove:
710 edittext.append("HG: no files changed")
710 edittext.append("HG: no files changed")
711 edittext.append("")
711 edittext.append("")
712 # run editor in the repository root
712 # run editor in the repository root
713 olddir = os.getcwd()
713 olddir = os.getcwd()
714 os.chdir(self.root)
714 os.chdir(self.root)
715 text = self.ui.edit("\n".join(edittext), user)
715 text = self.ui.edit("\n".join(edittext), user)
716 os.chdir(olddir)
716 os.chdir(olddir)
717
717
718 lines = [line.rstrip() for line in text.rstrip().splitlines()]
718 lines = [line.rstrip() for line in text.rstrip().splitlines()]
719 while lines and not lines[0]:
719 while lines and not lines[0]:
720 del lines[0]
720 del lines[0]
721 if not lines:
721 if not lines:
722 return None
722 return None
723 text = '\n'.join(lines)
723 text = '\n'.join(lines)
724 if branchname:
724 if branchname:
725 extra["branch"] = branchname
725 extra["branch"] = branchname
726 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
726 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
727 user, date, extra)
727 user, date, extra)
728 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
728 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
729 parent2=xp2)
729 parent2=xp2)
730 tr.close()
730 tr.close()
731
731
732 if use_dirstate or update_dirstate:
732 if use_dirstate or update_dirstate:
733 self.dirstate.setparents(n)
733 self.dirstate.setparents(n)
734 if use_dirstate:
734 if use_dirstate:
735 self.dirstate.update(new, "n")
735 self.dirstate.update(new, "n")
736 self.dirstate.forget(remove)
736 self.dirstate.forget(remove)
737
737
738 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
738 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
739 return n
739 return n
740
740
741 def walk(self, node=None, files=[], match=util.always, badmatch=None):
741 def walk(self, node=None, files=[], match=util.always, badmatch=None):
742 '''
742 '''
743 walk recursively through the directory tree or a given
743 walk recursively through the directory tree or a given
744 changeset, finding all files matched by the match
744 changeset, finding all files matched by the match
745 function
745 function
746
746
747 results are yielded in a tuple (src, filename), where src
747 results are yielded in a tuple (src, filename), where src
748 is one of:
748 is one of:
749 'f' the file was found in the directory tree
749 'f' the file was found in the directory tree
750 'm' the file was only in the dirstate and not in the tree
750 'm' the file was only in the dirstate and not in the tree
751 'b' file was not found and matched badmatch
751 'b' file was not found and matched badmatch
752 '''
752 '''
753
753
754 if node:
754 if node:
755 fdict = dict.fromkeys(files)
755 fdict = dict.fromkeys(files)
756 for fn in self.manifest.read(self.changelog.read(node)[0]):
756 for fn in self.manifest.read(self.changelog.read(node)[0]):
757 for ffn in fdict:
757 for ffn in fdict:
758 # match if the file is the exact name or a directory
758 # match if the file is the exact name or a directory
759 if ffn == fn or fn.startswith("%s/" % ffn):
759 if ffn == fn or fn.startswith("%s/" % ffn):
760 del fdict[ffn]
760 del fdict[ffn]
761 break
761 break
762 if match(fn):
762 if match(fn):
763 yield 'm', fn
763 yield 'm', fn
764 for fn in fdict:
764 for fn in fdict:
765 if badmatch and badmatch(fn):
765 if badmatch and badmatch(fn):
766 if match(fn):
766 if match(fn):
767 yield 'b', fn
767 yield 'b', fn
768 else:
768 else:
769 self.ui.warn(_('%s: No such file in rev %s\n') % (
769 self.ui.warn(_('%s: No such file in rev %s\n') % (
770 util.pathto(self.getcwd(), fn), short(node)))
770 util.pathto(self.getcwd(), fn), short(node)))
771 else:
771 else:
772 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
772 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
773 yield src, fn
773 yield src, fn
774
774
775 def status(self, node1=None, node2=None, files=[], match=util.always,
775 def status(self, node1=None, node2=None, files=[], match=util.always,
776 wlock=None, list_ignored=False, list_clean=False):
776 wlock=None, list_ignored=False, list_clean=False):
777 """return status of files between two nodes or node and working directory
777 """return status of files between two nodes or node and working directory
778
778
779 If node1 is None, use the first dirstate parent instead.
779 If node1 is None, use the first dirstate parent instead.
780 If node2 is None, compare node1 with working directory.
780 If node2 is None, compare node1 with working directory.
781 """
781 """
782
782
783 def fcmp(fn, mf):
783 def fcmp(fn, mf):
784 t1 = self.wread(fn)
784 t1 = self.wread(fn)
785 return self.file(fn).cmp(mf.get(fn, nullid), t1)
785 return self.file(fn).cmp(mf.get(fn, nullid), t1)
786
786
787 def mfmatches(node):
787 def mfmatches(node):
788 change = self.changelog.read(node)
788 change = self.changelog.read(node)
789 mf = self.manifest.read(change[0]).copy()
789 mf = self.manifest.read(change[0]).copy()
790 for fn in mf.keys():
790 for fn in mf.keys():
791 if not match(fn):
791 if not match(fn):
792 del mf[fn]
792 del mf[fn]
793 return mf
793 return mf
794
794
795 modified, added, removed, deleted, unknown = [], [], [], [], []
795 modified, added, removed, deleted, unknown = [], [], [], [], []
796 ignored, clean = [], []
796 ignored, clean = [], []
797
797
798 compareworking = False
798 compareworking = False
799 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
799 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
800 compareworking = True
800 compareworking = True
801
801
802 if not compareworking:
802 if not compareworking:
803 # read the manifest from node1 before the manifest from node2,
803 # read the manifest from node1 before the manifest from node2,
804 # so that we'll hit the manifest cache if we're going through
804 # so that we'll hit the manifest cache if we're going through
805 # all the revisions in parent->child order.
805 # all the revisions in parent->child order.
806 mf1 = mfmatches(node1)
806 mf1 = mfmatches(node1)
807
807
808 # are we comparing the working directory?
808 # are we comparing the working directory?
809 if not node2:
809 if not node2:
810 if not wlock:
810 if not wlock:
811 try:
811 try:
812 wlock = self.wlock(wait=0)
812 wlock = self.wlock(wait=0)
813 except lock.LockException:
813 except lock.LockException:
814 wlock = None
814 wlock = None
815 (lookup, modified, added, removed, deleted, unknown,
815 (lookup, modified, added, removed, deleted, unknown,
816 ignored, clean) = self.dirstate.status(files, match,
816 ignored, clean) = self.dirstate.status(files, match,
817 list_ignored, list_clean)
817 list_ignored, list_clean)
818
818
819 # are we comparing working dir against its parent?
819 # are we comparing working dir against its parent?
820 if compareworking:
820 if compareworking:
821 if lookup:
821 if lookup:
822 # do a full compare of any files that might have changed
822 # do a full compare of any files that might have changed
823 mf2 = mfmatches(self.dirstate.parents()[0])
823 mf2 = mfmatches(self.dirstate.parents()[0])
824 for f in lookup:
824 for f in lookup:
825 if fcmp(f, mf2):
825 if fcmp(f, mf2):
826 modified.append(f)
826 modified.append(f)
827 else:
827 else:
828 clean.append(f)
828 clean.append(f)
829 if wlock is not None:
829 if wlock is not None:
830 self.dirstate.update([f], "n")
830 self.dirstate.update([f], "n")
831 else:
831 else:
832 # we are comparing working dir against non-parent
832 # we are comparing working dir against non-parent
833 # generate a pseudo-manifest for the working dir
833 # generate a pseudo-manifest for the working dir
834 # XXX: create it in dirstate.py ?
834 # XXX: create it in dirstate.py ?
835 mf2 = mfmatches(self.dirstate.parents()[0])
835 mf2 = mfmatches(self.dirstate.parents()[0])
836 for f in lookup + modified + added:
836 for f in lookup + modified + added:
837 mf2[f] = ""
837 mf2[f] = ""
838 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
838 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
839 for f in removed:
839 for f in removed:
840 if f in mf2:
840 if f in mf2:
841 del mf2[f]
841 del mf2[f]
842 else:
842 else:
843 # we are comparing two revisions
843 # we are comparing two revisions
844 mf2 = mfmatches(node2)
844 mf2 = mfmatches(node2)
845
845
846 if not compareworking:
846 if not compareworking:
847 # flush lists from dirstate before comparing manifests
847 # flush lists from dirstate before comparing manifests
848 modified, added, clean = [], [], []
848 modified, added, clean = [], [], []
849
849
850 # make sure to sort the files so we talk to the disk in a
850 # make sure to sort the files so we talk to the disk in a
851 # reasonable order
851 # reasonable order
852 mf2keys = mf2.keys()
852 mf2keys = mf2.keys()
853 mf2keys.sort()
853 mf2keys.sort()
854 for fn in mf2keys:
854 for fn in mf2keys:
855 if mf1.has_key(fn):
855 if mf1.has_key(fn):
856 if mf1.flags(fn) != mf2.flags(fn) or \
856 if mf1.flags(fn) != mf2.flags(fn) or \
857 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
857 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
858 modified.append(fn)
858 modified.append(fn)
859 elif list_clean:
859 elif list_clean:
860 clean.append(fn)
860 clean.append(fn)
861 del mf1[fn]
861 del mf1[fn]
862 else:
862 else:
863 added.append(fn)
863 added.append(fn)
864
864
865 removed = mf1.keys()
865 removed = mf1.keys()
866
866
867 # sort and return results:
867 # sort and return results:
868 for l in modified, added, removed, deleted, unknown, ignored, clean:
868 for l in modified, added, removed, deleted, unknown, ignored, clean:
869 l.sort()
869 l.sort()
870 return (modified, added, removed, deleted, unknown, ignored, clean)
870 return (modified, added, removed, deleted, unknown, ignored, clean)
871
871
872 def add(self, list, wlock=None):
872 def add(self, list, wlock=None):
873 if not wlock:
873 if not wlock:
874 wlock = self.wlock()
874 wlock = self.wlock()
875 for f in list:
875 for f in list:
876 p = self.wjoin(f)
876 p = self.wjoin(f)
877 if not os.path.exists(p):
877 if not os.path.exists(p):
878 self.ui.warn(_("%s does not exist!\n") % f)
878 self.ui.warn(_("%s does not exist!\n") % f)
879 elif not os.path.isfile(p):
879 elif not os.path.isfile(p):
880 self.ui.warn(_("%s not added: only files supported currently\n")
880 self.ui.warn(_("%s not added: only files supported currently\n")
881 % f)
881 % f)
882 elif self.dirstate.state(f) in 'an':
882 elif self.dirstate.state(f) in 'an':
883 self.ui.warn(_("%s already tracked!\n") % f)
883 self.ui.warn(_("%s already tracked!\n") % f)
884 else:
884 else:
885 self.dirstate.update([f], "a")
885 self.dirstate.update([f], "a")
886
886
887 def forget(self, list, wlock=None):
887 def forget(self, list, wlock=None):
888 if not wlock:
888 if not wlock:
889 wlock = self.wlock()
889 wlock = self.wlock()
890 for f in list:
890 for f in list:
891 if self.dirstate.state(f) not in 'ai':
891 if self.dirstate.state(f) not in 'ai':
892 self.ui.warn(_("%s not added!\n") % f)
892 self.ui.warn(_("%s not added!\n") % f)
893 else:
893 else:
894 self.dirstate.forget([f])
894 self.dirstate.forget([f])
895
895
896 def remove(self, list, unlink=False, wlock=None):
896 def remove(self, list, unlink=False, wlock=None):
897 if unlink:
897 if unlink:
898 for f in list:
898 for f in list:
899 try:
899 try:
900 util.unlink(self.wjoin(f))
900 util.unlink(self.wjoin(f))
901 except OSError, inst:
901 except OSError, inst:
902 if inst.errno != errno.ENOENT:
902 if inst.errno != errno.ENOENT:
903 raise
903 raise
904 if not wlock:
904 if not wlock:
905 wlock = self.wlock()
905 wlock = self.wlock()
906 for f in list:
906 for f in list:
907 p = self.wjoin(f)
907 p = self.wjoin(f)
908 if os.path.exists(p):
908 if os.path.exists(p):
909 self.ui.warn(_("%s still exists!\n") % f)
909 self.ui.warn(_("%s still exists!\n") % f)
910 elif self.dirstate.state(f) == 'a':
910 elif self.dirstate.state(f) == 'a':
911 self.dirstate.forget([f])
911 self.dirstate.forget([f])
912 elif f not in self.dirstate:
912 elif f not in self.dirstate:
913 self.ui.warn(_("%s not tracked!\n") % f)
913 self.ui.warn(_("%s not tracked!\n") % f)
914 else:
914 else:
915 self.dirstate.update([f], "r")
915 self.dirstate.update([f], "r")
916
916
917 def undelete(self, list, wlock=None):
917 def undelete(self, list, wlock=None):
918 p = self.dirstate.parents()[0]
918 p = self.dirstate.parents()[0]
919 mn = self.changelog.read(p)[0]
919 mn = self.changelog.read(p)[0]
920 m = self.manifest.read(mn)
920 m = self.manifest.read(mn)
921 if not wlock:
921 if not wlock:
922 wlock = self.wlock()
922 wlock = self.wlock()
923 for f in list:
923 for f in list:
924 if self.dirstate.state(f) not in "r":
924 if self.dirstate.state(f) not in "r":
925 self.ui.warn("%s not removed!\n" % f)
925 self.ui.warn("%s not removed!\n" % f)
926 else:
926 else:
927 t = self.file(f).read(m[f])
927 t = self.file(f).read(m[f])
928 self.wwrite(f, t)
928 self.wwrite(f, t)
929 util.set_exec(self.wjoin(f), m.execf(f))
929 util.set_exec(self.wjoin(f), m.execf(f))
930 self.dirstate.update([f], "n")
930 self.dirstate.update([f], "n")
931
931
932 def copy(self, source, dest, wlock=None):
932 def copy(self, source, dest, wlock=None):
933 p = self.wjoin(dest)
933 p = self.wjoin(dest)
934 if not os.path.exists(p):
934 if not os.path.exists(p):
935 self.ui.warn(_("%s does not exist!\n") % dest)
935 self.ui.warn(_("%s does not exist!\n") % dest)
936 elif not os.path.isfile(p):
936 elif not os.path.isfile(p):
937 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
937 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
938 else:
938 else:
939 if not wlock:
939 if not wlock:
940 wlock = self.wlock()
940 wlock = self.wlock()
941 if self.dirstate.state(dest) == '?':
941 if self.dirstate.state(dest) == '?':
942 self.dirstate.update([dest], "a")
942 self.dirstate.update([dest], "a")
943 self.dirstate.copy(source, dest)
943 self.dirstate.copy(source, dest)
944
944
945 def heads(self, start=None):
945 def heads(self, start=None):
946 heads = self.changelog.heads(start)
946 heads = self.changelog.heads(start)
947 # sort the output in rev descending order
947 # sort the output in rev descending order
948 heads = [(-self.changelog.rev(h), h) for h in heads]
948 heads = [(-self.changelog.rev(h), h) for h in heads]
949 heads.sort()
949 heads.sort()
950 return [n for (r, n) in heads]
950 return [n for (r, n) in heads]
951
951
952 # branchlookup returns a dict giving a list of branches for
952 # branchlookup returns a dict giving a list of branches for
953 # each head. A branch is defined as the tag of a node or
953 # each head. A branch is defined as the tag of a node or
954 # the branch of the node's parents. If a node has multiple
954 # the branch of the node's parents. If a node has multiple
955 # branch tags, tags are eliminated if they are visible from other
955 # branch tags, tags are eliminated if they are visible from other
956 # branch tags.
956 # branch tags.
957 #
957 #
958 # So, for this graph: a->b->c->d->e
958 # So, for this graph: a->b->c->d->e
959 # \ /
959 # \ /
960 # aa -----/
960 # aa -----/
961 # a has tag 2.6.12
961 # a has tag 2.6.12
962 # d has tag 2.6.13
962 # d has tag 2.6.13
963 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
963 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
964 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
964 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
965 # from the list.
965 # from the list.
966 #
966 #
967 # It is possible that more than one head will have the same branch tag.
967 # It is possible that more than one head will have the same branch tag.
968 # callers need to check the result for multiple heads under the same
968 # callers need to check the result for multiple heads under the same
969 # branch tag if that is a problem for them (ie checkout of a specific
969 # branch tag if that is a problem for them (ie checkout of a specific
970 # branch).
970 # branch).
971 #
971 #
972 # passing in a specific branch will limit the depth of the search
972 # passing in a specific branch will limit the depth of the search
973 # through the parents. It won't limit the branches returned in the
973 # through the parents. It won't limit the branches returned in the
974 # result though.
974 # result though.
975 def branchlookup(self, heads=None, branch=None):
975 def branchlookup(self, heads=None, branch=None):
976 if not heads:
976 if not heads:
977 heads = self.heads()
977 heads = self.heads()
978 headt = [ h for h in heads ]
978 headt = [ h for h in heads ]
979 chlog = self.changelog
979 chlog = self.changelog
980 branches = {}
980 branches = {}
981 merges = []
981 merges = []
982 seenmerge = {}
982 seenmerge = {}
983
983
984 # traverse the tree once for each head, recording in the branches
984 # traverse the tree once for each head, recording in the branches
985 # dict which tags are visible from this head. The branches
985 # dict which tags are visible from this head. The branches
986 # dict also records which tags are visible from each tag
986 # dict also records which tags are visible from each tag
987 # while we traverse.
987 # while we traverse.
988 while headt or merges:
988 while headt or merges:
989 if merges:
989 if merges:
990 n, found = merges.pop()
990 n, found = merges.pop()
991 visit = [n]
991 visit = [n]
992 else:
992 else:
993 h = headt.pop()
993 h = headt.pop()
994 visit = [h]
994 visit = [h]
995 found = [h]
995 found = [h]
996 seen = {}
996 seen = {}
997 while visit:
997 while visit:
998 n = visit.pop()
998 n = visit.pop()
999 if n in seen:
999 if n in seen:
1000 continue
1000 continue
1001 pp = chlog.parents(n)
1001 pp = chlog.parents(n)
1002 tags = self.nodetags(n)
1002 tags = self.nodetags(n)
1003 if tags:
1003 if tags:
1004 for x in tags:
1004 for x in tags:
1005 if x == 'tip':
1005 if x == 'tip':
1006 continue
1006 continue
1007 for f in found:
1007 for f in found:
1008 branches.setdefault(f, {})[n] = 1
1008 branches.setdefault(f, {})[n] = 1
1009 branches.setdefault(n, {})[n] = 1
1009 branches.setdefault(n, {})[n] = 1
1010 break
1010 break
1011 if n not in found:
1011 if n not in found:
1012 found.append(n)
1012 found.append(n)
1013 if branch in tags:
1013 if branch in tags:
1014 continue
1014 continue
1015 seen[n] = 1
1015 seen[n] = 1
1016 if pp[1] != nullid and n not in seenmerge:
1016 if pp[1] != nullid and n not in seenmerge:
1017 merges.append((pp[1], [x for x in found]))
1017 merges.append((pp[1], [x for x in found]))
1018 seenmerge[n] = 1
1018 seenmerge[n] = 1
1019 if pp[0] != nullid:
1019 if pp[0] != nullid:
1020 visit.append(pp[0])
1020 visit.append(pp[0])
1021 # traverse the branches dict, eliminating branch tags from each
1021 # traverse the branches dict, eliminating branch tags from each
1022 # head that are visible from another branch tag for that head.
1022 # head that are visible from another branch tag for that head.
1023 out = {}
1023 out = {}
1024 viscache = {}
1024 viscache = {}
1025 for h in heads:
1025 for h in heads:
1026 def visible(node):
1026 def visible(node):
1027 if node in viscache:
1027 if node in viscache:
1028 return viscache[node]
1028 return viscache[node]
1029 ret = {}
1029 ret = {}
1030 visit = [node]
1030 visit = [node]
1031 while visit:
1031 while visit:
1032 x = visit.pop()
1032 x = visit.pop()
1033 if x in viscache:
1033 if x in viscache:
1034 ret.update(viscache[x])
1034 ret.update(viscache[x])
1035 elif x not in ret:
1035 elif x not in ret:
1036 ret[x] = 1
1036 ret[x] = 1
1037 if x in branches:
1037 if x in branches:
1038 visit[len(visit):] = branches[x].keys()
1038 visit[len(visit):] = branches[x].keys()
1039 viscache[node] = ret
1039 viscache[node] = ret
1040 return ret
1040 return ret
1041 if h not in branches:
1041 if h not in branches:
1042 continue
1042 continue
1043 # O(n^2), but somewhat limited. This only searches the
1043 # O(n^2), but somewhat limited. This only searches the
1044 # tags visible from a specific head, not all the tags in the
1044 # tags visible from a specific head, not all the tags in the
1045 # whole repo.
1045 # whole repo.
1046 for b in branches[h]:
1046 for b in branches[h]:
1047 vis = False
1047 vis = False
1048 for bb in branches[h].keys():
1048 for bb in branches[h].keys():
1049 if b != bb:
1049 if b != bb:
1050 if b in visible(bb):
1050 if b in visible(bb):
1051 vis = True
1051 vis = True
1052 break
1052 break
1053 if not vis:
1053 if not vis:
1054 l = out.setdefault(h, [])
1054 l = out.setdefault(h, [])
1055 l[len(l):] = self.nodetags(b)
1055 l[len(l):] = self.nodetags(b)
1056 return out
1056 return out
1057
1057
1058 def branches(self, nodes):
1058 def branches(self, nodes):
1059 if not nodes:
1059 if not nodes:
1060 nodes = [self.changelog.tip()]
1060 nodes = [self.changelog.tip()]
1061 b = []
1061 b = []
1062 for n in nodes:
1062 for n in nodes:
1063 t = n
1063 t = n
1064 while 1:
1064 while 1:
1065 p = self.changelog.parents(n)
1065 p = self.changelog.parents(n)
1066 if p[1] != nullid or p[0] == nullid:
1066 if p[1] != nullid or p[0] == nullid:
1067 b.append((t, n, p[0], p[1]))
1067 b.append((t, n, p[0], p[1]))
1068 break
1068 break
1069 n = p[0]
1069 n = p[0]
1070 return b
1070 return b
1071
1071
1072 def between(self, pairs):
1072 def between(self, pairs):
1073 r = []
1073 r = []
1074
1074
1075 for top, bottom in pairs:
1075 for top, bottom in pairs:
1076 n, l, i = top, [], 0
1076 n, l, i = top, [], 0
1077 f = 1
1077 f = 1
1078
1078
1079 while n != bottom:
1079 while n != bottom:
1080 p = self.changelog.parents(n)[0]
1080 p = self.changelog.parents(n)[0]
1081 if i == f:
1081 if i == f:
1082 l.append(n)
1082 l.append(n)
1083 f = f * 2
1083 f = f * 2
1084 n = p
1084 n = p
1085 i += 1
1085 i += 1
1086
1086
1087 r.append(l)
1087 r.append(l)
1088
1088
1089 return r
1089 return r
1090
1090
1091 def findincoming(self, remote, base=None, heads=None, force=False):
1091 def findincoming(self, remote, base=None, heads=None, force=False):
1092 """Return list of roots of the subsets of missing nodes from remote
1092 """Return list of roots of the subsets of missing nodes from remote
1093
1093
1094 If base dict is specified, assume that these nodes and their parents
1094 If base dict is specified, assume that these nodes and their parents
1095 exist on the remote side and that no child of a node of base exists
1095 exist on the remote side and that no child of a node of base exists
1096 in both remote and self.
1096 in both remote and self.
1097 Furthermore base will be updated to include the nodes that exists
1097 Furthermore base will be updated to include the nodes that exists
1098 in self and remote but no children exists in self and remote.
1098 in self and remote but no children exists in self and remote.
1099 If a list of heads is specified, return only nodes which are heads
1099 If a list of heads is specified, return only nodes which are heads
1100 or ancestors of these heads.
1100 or ancestors of these heads.
1101
1101
1102 All the ancestors of base are in self and in remote.
1102 All the ancestors of base are in self and in remote.
1103 All the descendants of the list returned are missing in self.
1103 All the descendants of the list returned are missing in self.
1104 (and so we know that the rest of the nodes are missing in remote, see
1104 (and so we know that the rest of the nodes are missing in remote, see
1105 outgoing)
1105 outgoing)
1106 """
1106 """
1107 m = self.changelog.nodemap
1107 m = self.changelog.nodemap
1108 search = []
1108 search = []
1109 fetch = {}
1109 fetch = {}
1110 seen = {}
1110 seen = {}
1111 seenbranch = {}
1111 seenbranch = {}
1112 if base == None:
1112 if base == None:
1113 base = {}
1113 base = {}
1114
1114
1115 if not heads:
1115 if not heads:
1116 heads = remote.heads()
1116 heads = remote.heads()
1117
1117
1118 if self.changelog.tip() == nullid:
1118 if self.changelog.tip() == nullid:
1119 base[nullid] = 1
1119 base[nullid] = 1
1120 if heads != [nullid]:
1120 if heads != [nullid]:
1121 return [nullid]
1121 return [nullid]
1122 return []
1122 return []
1123
1123
1124 # assume we're closer to the tip than the root
1124 # assume we're closer to the tip than the root
1125 # and start by examining the heads
1125 # and start by examining the heads
1126 self.ui.status(_("searching for changes\n"))
1126 self.ui.status(_("searching for changes\n"))
1127
1127
1128 unknown = []
1128 unknown = []
1129 for h in heads:
1129 for h in heads:
1130 if h not in m:
1130 if h not in m:
1131 unknown.append(h)
1131 unknown.append(h)
1132 else:
1132 else:
1133 base[h] = 1
1133 base[h] = 1
1134
1134
1135 if not unknown:
1135 if not unknown:
1136 return []
1136 return []
1137
1137
1138 req = dict.fromkeys(unknown)
1138 req = dict.fromkeys(unknown)
1139 reqcnt = 0
1139 reqcnt = 0
1140
1140
1141 # search through remote branches
1141 # search through remote branches
1142 # a 'branch' here is a linear segment of history, with four parts:
1142 # a 'branch' here is a linear segment of history, with four parts:
1143 # head, root, first parent, second parent
1143 # head, root, first parent, second parent
1144 # (a branch always has two parents (or none) by definition)
1144 # (a branch always has two parents (or none) by definition)
1145 unknown = remote.branches(unknown)
1145 unknown = remote.branches(unknown)
1146 while unknown:
1146 while unknown:
1147 r = []
1147 r = []
1148 while unknown:
1148 while unknown:
1149 n = unknown.pop(0)
1149 n = unknown.pop(0)
1150 if n[0] in seen:
1150 if n[0] in seen:
1151 continue
1151 continue
1152
1152
1153 self.ui.debug(_("examining %s:%s\n")
1153 self.ui.debug(_("examining %s:%s\n")
1154 % (short(n[0]), short(n[1])))
1154 % (short(n[0]), short(n[1])))
1155 if n[0] == nullid: # found the end of the branch
1155 if n[0] == nullid: # found the end of the branch
1156 pass
1156 pass
1157 elif n in seenbranch:
1157 elif n in seenbranch:
1158 self.ui.debug(_("branch already found\n"))
1158 self.ui.debug(_("branch already found\n"))
1159 continue
1159 continue
1160 elif n[1] and n[1] in m: # do we know the base?
1160 elif n[1] and n[1] in m: # do we know the base?
1161 self.ui.debug(_("found incomplete branch %s:%s\n")
1161 self.ui.debug(_("found incomplete branch %s:%s\n")
1162 % (short(n[0]), short(n[1])))
1162 % (short(n[0]), short(n[1])))
1163 search.append(n) # schedule branch range for scanning
1163 search.append(n) # schedule branch range for scanning
1164 seenbranch[n] = 1
1164 seenbranch[n] = 1
1165 else:
1165 else:
1166 if n[1] not in seen and n[1] not in fetch:
1166 if n[1] not in seen and n[1] not in fetch:
1167 if n[2] in m and n[3] in m:
1167 if n[2] in m and n[3] in m:
1168 self.ui.debug(_("found new changeset %s\n") %
1168 self.ui.debug(_("found new changeset %s\n") %
1169 short(n[1]))
1169 short(n[1]))
1170 fetch[n[1]] = 1 # earliest unknown
1170 fetch[n[1]] = 1 # earliest unknown
1171 for p in n[2:4]:
1171 for p in n[2:4]:
1172 if p in m:
1172 if p in m:
1173 base[p] = 1 # latest known
1173 base[p] = 1 # latest known
1174
1174
1175 for p in n[2:4]:
1175 for p in n[2:4]:
1176 if p not in req and p not in m:
1176 if p not in req and p not in m:
1177 r.append(p)
1177 r.append(p)
1178 req[p] = 1
1178 req[p] = 1
1179 seen[n[0]] = 1
1179 seen[n[0]] = 1
1180
1180
1181 if r:
1181 if r:
1182 reqcnt += 1
1182 reqcnt += 1
1183 self.ui.debug(_("request %d: %s\n") %
1183 self.ui.debug(_("request %d: %s\n") %
1184 (reqcnt, " ".join(map(short, r))))
1184 (reqcnt, " ".join(map(short, r))))
1185 for p in xrange(0, len(r), 10):
1185 for p in xrange(0, len(r), 10):
1186 for b in remote.branches(r[p:p+10]):
1186 for b in remote.branches(r[p:p+10]):
1187 self.ui.debug(_("received %s:%s\n") %
1187 self.ui.debug(_("received %s:%s\n") %
1188 (short(b[0]), short(b[1])))
1188 (short(b[0]), short(b[1])))
1189 unknown.append(b)
1189 unknown.append(b)
1190
1190
1191 # do binary search on the branches we found
1191 # do binary search on the branches we found
1192 while search:
1192 while search:
1193 n = search.pop(0)
1193 n = search.pop(0)
1194 reqcnt += 1
1194 reqcnt += 1
1195 l = remote.between([(n[0], n[1])])[0]
1195 l = remote.between([(n[0], n[1])])[0]
1196 l.append(n[1])
1196 l.append(n[1])
1197 p = n[0]
1197 p = n[0]
1198 f = 1
1198 f = 1
1199 for i in l:
1199 for i in l:
1200 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1200 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1201 if i in m:
1201 if i in m:
1202 if f <= 2:
1202 if f <= 2:
1203 self.ui.debug(_("found new branch changeset %s\n") %
1203 self.ui.debug(_("found new branch changeset %s\n") %
1204 short(p))
1204 short(p))
1205 fetch[p] = 1
1205 fetch[p] = 1
1206 base[i] = 1
1206 base[i] = 1
1207 else:
1207 else:
1208 self.ui.debug(_("narrowed branch search to %s:%s\n")
1208 self.ui.debug(_("narrowed branch search to %s:%s\n")
1209 % (short(p), short(i)))
1209 % (short(p), short(i)))
1210 search.append((p, i))
1210 search.append((p, i))
1211 break
1211 break
1212 p, f = i, f * 2
1212 p, f = i, f * 2
1213
1213
1214 # sanity check our fetch list
1214 # sanity check our fetch list
1215 for f in fetch.keys():
1215 for f in fetch.keys():
1216 if f in m:
1216 if f in m:
1217 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1217 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1218
1218
1219 if base.keys() == [nullid]:
1219 if base.keys() == [nullid]:
1220 if force:
1220 if force:
1221 self.ui.warn(_("warning: repository is unrelated\n"))
1221 self.ui.warn(_("warning: repository is unrelated\n"))
1222 else:
1222 else:
1223 raise util.Abort(_("repository is unrelated"))
1223 raise util.Abort(_("repository is unrelated"))
1224
1224
1225 self.ui.debug(_("found new changesets starting at ") +
1225 self.ui.debug(_("found new changesets starting at ") +
1226 " ".join([short(f) for f in fetch]) + "\n")
1226 " ".join([short(f) for f in fetch]) + "\n")
1227
1227
1228 self.ui.debug(_("%d total queries\n") % reqcnt)
1228 self.ui.debug(_("%d total queries\n") % reqcnt)
1229
1229
1230 return fetch.keys()
1230 return fetch.keys()
1231
1231
1232 def findoutgoing(self, remote, base=None, heads=None, force=False):
1232 def findoutgoing(self, remote, base=None, heads=None, force=False):
1233 """Return list of nodes that are roots of subsets not in remote
1233 """Return list of nodes that are roots of subsets not in remote
1234
1234
1235 If base dict is specified, assume that these nodes and their parents
1235 If base dict is specified, assume that these nodes and their parents
1236 exist on the remote side.
1236 exist on the remote side.
1237 If a list of heads is specified, return only nodes which are heads
1237 If a list of heads is specified, return only nodes which are heads
1238 or ancestors of these heads, and return a second element which
1238 or ancestors of these heads, and return a second element which
1239 contains all remote heads which get new children.
1239 contains all remote heads which get new children.
1240 """
1240 """
1241 if base == None:
1241 if base == None:
1242 base = {}
1242 base = {}
1243 self.findincoming(remote, base, heads, force=force)
1243 self.findincoming(remote, base, heads, force=force)
1244
1244
1245 self.ui.debug(_("common changesets up to ")
1245 self.ui.debug(_("common changesets up to ")
1246 + " ".join(map(short, base.keys())) + "\n")
1246 + " ".join(map(short, base.keys())) + "\n")
1247
1247
1248 remain = dict.fromkeys(self.changelog.nodemap)
1248 remain = dict.fromkeys(self.changelog.nodemap)
1249
1249
1250 # prune everything remote has from the tree
1250 # prune everything remote has from the tree
1251 del remain[nullid]
1251 del remain[nullid]
1252 remove = base.keys()
1252 remove = base.keys()
1253 while remove:
1253 while remove:
1254 n = remove.pop(0)
1254 n = remove.pop(0)
1255 if n in remain:
1255 if n in remain:
1256 del remain[n]
1256 del remain[n]
1257 for p in self.changelog.parents(n):
1257 for p in self.changelog.parents(n):
1258 remove.append(p)
1258 remove.append(p)
1259
1259
1260 # find every node whose parents have been pruned
1260 # find every node whose parents have been pruned
1261 subset = []
1261 subset = []
1262 # find every remote head that will get new children
1262 # find every remote head that will get new children
1263 updated_heads = {}
1263 updated_heads = {}
1264 for n in remain:
1264 for n in remain:
1265 p1, p2 = self.changelog.parents(n)
1265 p1, p2 = self.changelog.parents(n)
1266 if p1 not in remain and p2 not in remain:
1266 if p1 not in remain and p2 not in remain:
1267 subset.append(n)
1267 subset.append(n)
1268 if heads:
1268 if heads:
1269 if p1 in heads:
1269 if p1 in heads:
1270 updated_heads[p1] = True
1270 updated_heads[p1] = True
1271 if p2 in heads:
1271 if p2 in heads:
1272 updated_heads[p2] = True
1272 updated_heads[p2] = True
1273
1273
1274 # this is the set of all roots we have to push
1274 # this is the set of all roots we have to push
1275 if heads:
1275 if heads:
1276 return subset, updated_heads.keys()
1276 return subset, updated_heads.keys()
1277 else:
1277 else:
1278 return subset
1278 return subset
1279
1279
1280 def pull(self, remote, heads=None, force=False, lock=None):
1280 def pull(self, remote, heads=None, force=False, lock=None):
1281 mylock = False
1281 mylock = False
1282 if not lock:
1282 if not lock:
1283 lock = self.lock()
1283 lock = self.lock()
1284 mylock = True
1284 mylock = True
1285
1285
1286 try:
1286 try:
1287 fetch = self.findincoming(remote, force=force)
1287 fetch = self.findincoming(remote, force=force)
1288 if fetch == [nullid]:
1288 if fetch == [nullid]:
1289 self.ui.status(_("requesting all changes\n"))
1289 self.ui.status(_("requesting all changes\n"))
1290
1290
1291 if not fetch:
1291 if not fetch:
1292 self.ui.status(_("no changes found\n"))
1292 self.ui.status(_("no changes found\n"))
1293 return 0
1293 return 0
1294
1294
1295 if heads is None:
1295 if heads is None:
1296 cg = remote.changegroup(fetch, 'pull')
1296 cg = remote.changegroup(fetch, 'pull')
1297 else:
1297 else:
1298 if 'changegroupsubset' not in remote.capabilities:
1298 if 'changegroupsubset' not in remote.capabilities:
1299 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1299 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1300 cg = remote.changegroupsubset(fetch, heads, 'pull')
1300 cg = remote.changegroupsubset(fetch, heads, 'pull')
1301 return self.addchangegroup(cg, 'pull', remote.url())
1301 return self.addchangegroup(cg, 'pull', remote.url())
1302 finally:
1302 finally:
1303 if mylock:
1303 if mylock:
1304 lock.release()
1304 lock.release()
1305
1305
1306 def push(self, remote, force=False, revs=None):
1306 def push(self, remote, force=False, revs=None):
1307 # there are two ways to push to remote repo:
1307 # there are two ways to push to remote repo:
1308 #
1308 #
1309 # addchangegroup assumes local user can lock remote
1309 # addchangegroup assumes local user can lock remote
1310 # repo (local filesystem, old ssh servers).
1310 # repo (local filesystem, old ssh servers).
1311 #
1311 #
1312 # unbundle assumes local user cannot lock remote repo (new ssh
1312 # unbundle assumes local user cannot lock remote repo (new ssh
1313 # servers, http servers).
1313 # servers, http servers).
1314
1314
1315 if remote.capable('unbundle'):
1315 if remote.capable('unbundle'):
1316 return self.push_unbundle(remote, force, revs)
1316 return self.push_unbundle(remote, force, revs)
1317 return self.push_addchangegroup(remote, force, revs)
1317 return self.push_addchangegroup(remote, force, revs)
1318
1318
1319 def prepush(self, remote, force, revs):
1319 def prepush(self, remote, force, revs):
1320 base = {}
1320 base = {}
1321 remote_heads = remote.heads()
1321 remote_heads = remote.heads()
1322 inc = self.findincoming(remote, base, remote_heads, force=force)
1322 inc = self.findincoming(remote, base, remote_heads, force=force)
1323
1323
1324 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1324 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1325 if revs is not None:
1325 if revs is not None:
1326 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1326 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1327 else:
1327 else:
1328 bases, heads = update, self.changelog.heads()
1328 bases, heads = update, self.changelog.heads()
1329
1329
1330 if not bases:
1330 if not bases:
1331 self.ui.status(_("no changes found\n"))
1331 self.ui.status(_("no changes found\n"))
1332 return None, 1
1332 return None, 1
1333 elif not force:
1333 elif not force:
1334 # check if we're creating new remote heads
1334 # check if we're creating new remote heads
1335 # to be a remote head after push, node must be either
1335 # to be a remote head after push, node must be either
1336 # - unknown locally
1336 # - unknown locally
1337 # - a local outgoing head descended from update
1337 # - a local outgoing head descended from update
1338 # - a remote head that's known locally and not
1338 # - a remote head that's known locally and not
1339 # ancestral to an outgoing head
1339 # ancestral to an outgoing head
1340
1340
1341 warn = 0
1341 warn = 0
1342
1342
1343 if remote_heads == [nullid]:
1343 if remote_heads == [nullid]:
1344 warn = 0
1344 warn = 0
1345 elif not revs and len(heads) > len(remote_heads):
1345 elif not revs and len(heads) > len(remote_heads):
1346 warn = 1
1346 warn = 1
1347 else:
1347 else:
1348 newheads = list(heads)
1348 newheads = list(heads)
1349 for r in remote_heads:
1349 for r in remote_heads:
1350 if r in self.changelog.nodemap:
1350 if r in self.changelog.nodemap:
1351 desc = self.changelog.heads(r)
1351 desc = self.changelog.heads(r)
1352 l = [h for h in heads if h in desc]
1352 l = [h for h in heads if h in desc]
1353 if not l:
1353 if not l:
1354 newheads.append(r)
1354 newheads.append(r)
1355 else:
1355 else:
1356 newheads.append(r)
1356 newheads.append(r)
1357 if len(newheads) > len(remote_heads):
1357 if len(newheads) > len(remote_heads):
1358 warn = 1
1358 warn = 1
1359
1359
1360 if warn:
1360 if warn:
1361 self.ui.warn(_("abort: push creates new remote branches!\n"))
1361 self.ui.warn(_("abort: push creates new remote branches!\n"))
1362 self.ui.status(_("(did you forget to merge?"
1362 self.ui.status(_("(did you forget to merge?"
1363 " use push -f to force)\n"))
1363 " use push -f to force)\n"))
1364 return None, 1
1364 return None, 1
1365 elif inc:
1365 elif inc:
1366 self.ui.warn(_("note: unsynced remote changes!\n"))
1366 self.ui.warn(_("note: unsynced remote changes!\n"))
1367
1367
1368
1368
1369 if revs is None:
1369 if revs is None:
1370 cg = self.changegroup(update, 'push')
1370 cg = self.changegroup(update, 'push')
1371 else:
1371 else:
1372 cg = self.changegroupsubset(update, revs, 'push')
1372 cg = self.changegroupsubset(update, revs, 'push')
1373 return cg, remote_heads
1373 return cg, remote_heads
1374
1374
1375 def push_addchangegroup(self, remote, force, revs):
1375 def push_addchangegroup(self, remote, force, revs):
1376 lock = remote.lock()
1376 lock = remote.lock()
1377
1377
1378 ret = self.prepush(remote, force, revs)
1378 ret = self.prepush(remote, force, revs)
1379 if ret[0] is not None:
1379 if ret[0] is not None:
1380 cg, remote_heads = ret
1380 cg, remote_heads = ret
1381 return remote.addchangegroup(cg, 'push', self.url())
1381 return remote.addchangegroup(cg, 'push', self.url())
1382 return ret[1]
1382 return ret[1]
1383
1383
1384 def push_unbundle(self, remote, force, revs):
1384 def push_unbundle(self, remote, force, revs):
1385 # local repo finds heads on server, finds out what revs it
1385 # local repo finds heads on server, finds out what revs it
1386 # must push. once revs transferred, if server finds it has
1386 # must push. once revs transferred, if server finds it has
1387 # different heads (someone else won commit/push race), server
1387 # different heads (someone else won commit/push race), server
1388 # aborts.
1388 # aborts.
1389
1389
1390 ret = self.prepush(remote, force, revs)
1390 ret = self.prepush(remote, force, revs)
1391 if ret[0] is not None:
1391 if ret[0] is not None:
1392 cg, remote_heads = ret
1392 cg, remote_heads = ret
1393 if force: remote_heads = ['force']
1393 if force: remote_heads = ['force']
1394 return remote.unbundle(cg, remote_heads, 'push')
1394 return remote.unbundle(cg, remote_heads, 'push')
1395 return ret[1]
1395 return ret[1]
1396
1396
1397 def changegroupinfo(self, nodes):
1397 def changegroupinfo(self, nodes):
1398 self.ui.note(_("%d changesets found\n") % len(nodes))
1398 self.ui.note(_("%d changesets found\n") % len(nodes))
1399 if self.ui.debugflag:
1399 if self.ui.debugflag:
1400 self.ui.debug(_("List of changesets:\n"))
1400 self.ui.debug(_("List of changesets:\n"))
1401 for node in nodes:
1401 for node in nodes:
1402 self.ui.debug("%s\n" % hex(node))
1402 self.ui.debug("%s\n" % hex(node))
1403
1403
1404 def changegroupsubset(self, bases, heads, source):
1404 def changegroupsubset(self, bases, heads, source):
1405 """This function generates a changegroup consisting of all the nodes
1405 """This function generates a changegroup consisting of all the nodes
1406 that are descendents of any of the bases, and ancestors of any of
1406 that are descendents of any of the bases, and ancestors of any of
1407 the heads.
1407 the heads.
1408
1408
1409 It is fairly complex as determining which filenodes and which
1409 It is fairly complex as determining which filenodes and which
1410 manifest nodes need to be included for the changeset to be complete
1410 manifest nodes need to be included for the changeset to be complete
1411 is non-trivial.
1411 is non-trivial.
1412
1412
1413 Another wrinkle is doing the reverse, figuring out which changeset in
1413 Another wrinkle is doing the reverse, figuring out which changeset in
1414 the changegroup a particular filenode or manifestnode belongs to."""
1414 the changegroup a particular filenode or manifestnode belongs to."""
1415
1415
1416 self.hook('preoutgoing', throw=True, source=source)
1416 self.hook('preoutgoing', throw=True, source=source)
1417
1417
1418 # Set up some initial variables
1418 # Set up some initial variables
1419 # Make it easy to refer to self.changelog
1419 # Make it easy to refer to self.changelog
1420 cl = self.changelog
1420 cl = self.changelog
1421 # msng is short for missing - compute the list of changesets in this
1421 # msng is short for missing - compute the list of changesets in this
1422 # changegroup.
1422 # changegroup.
1423 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1423 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1424 self.changegroupinfo(msng_cl_lst)
1424 self.changegroupinfo(msng_cl_lst)
1425 # Some bases may turn out to be superfluous, and some heads may be
1425 # Some bases may turn out to be superfluous, and some heads may be
1426 # too. nodesbetween will return the minimal set of bases and heads
1426 # too. nodesbetween will return the minimal set of bases and heads
1427 # necessary to re-create the changegroup.
1427 # necessary to re-create the changegroup.
1428
1428
1429 # Known heads are the list of heads that it is assumed the recipient
1429 # Known heads are the list of heads that it is assumed the recipient
1430 # of this changegroup will know about.
1430 # of this changegroup will know about.
1431 knownheads = {}
1431 knownheads = {}
1432 # We assume that all parents of bases are known heads.
1432 # We assume that all parents of bases are known heads.
1433 for n in bases:
1433 for n in bases:
1434 for p in cl.parents(n):
1434 for p in cl.parents(n):
1435 if p != nullid:
1435 if p != nullid:
1436 knownheads[p] = 1
1436 knownheads[p] = 1
1437 knownheads = knownheads.keys()
1437 knownheads = knownheads.keys()
1438 if knownheads:
1438 if knownheads:
1439 # Now that we know what heads are known, we can compute which
1439 # Now that we know what heads are known, we can compute which
1440 # changesets are known. The recipient must know about all
1440 # changesets are known. The recipient must know about all
1441 # changesets required to reach the known heads from the null
1441 # changesets required to reach the known heads from the null
1442 # changeset.
1442 # changeset.
1443 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1443 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1444 junk = None
1444 junk = None
1445 # Transform the list into an ersatz set.
1445 # Transform the list into an ersatz set.
1446 has_cl_set = dict.fromkeys(has_cl_set)
1446 has_cl_set = dict.fromkeys(has_cl_set)
1447 else:
1447 else:
1448 # If there were no known heads, the recipient cannot be assumed to
1448 # If there were no known heads, the recipient cannot be assumed to
1449 # know about any changesets.
1449 # know about any changesets.
1450 has_cl_set = {}
1450 has_cl_set = {}
1451
1451
1452 # Make it easy to refer to self.manifest
1452 # Make it easy to refer to self.manifest
1453 mnfst = self.manifest
1453 mnfst = self.manifest
1454 # We don't know which manifests are missing yet
1454 # We don't know which manifests are missing yet
1455 msng_mnfst_set = {}
1455 msng_mnfst_set = {}
1456 # Nor do we know which filenodes are missing.
1456 # Nor do we know which filenodes are missing.
1457 msng_filenode_set = {}
1457 msng_filenode_set = {}
1458
1458
1459 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1459 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1460 junk = None
1460 junk = None
1461
1461
1462 # A changeset always belongs to itself, so the changenode lookup
1462 # A changeset always belongs to itself, so the changenode lookup
1463 # function for a changenode is identity.
1463 # function for a changenode is identity.
1464 def identity(x):
1464 def identity(x):
1465 return x
1465 return x
1466
1466
1467 # A function generating function. Sets up an environment for the
1467 # A function generating function. Sets up an environment for the
1468 # inner function.
1468 # inner function.
1469 def cmp_by_rev_func(revlog):
1469 def cmp_by_rev_func(revlog):
1470 # Compare two nodes by their revision number in the environment's
1470 # Compare two nodes by their revision number in the environment's
1471 # revision history. Since the revision number both represents the
1471 # revision history. Since the revision number both represents the
1472 # most efficient order to read the nodes in, and represents a
1472 # most efficient order to read the nodes in, and represents a
1473 # topological sorting of the nodes, this function is often useful.
1473 # topological sorting of the nodes, this function is often useful.
1474 def cmp_by_rev(a, b):
1474 def cmp_by_rev(a, b):
1475 return cmp(revlog.rev(a), revlog.rev(b))
1475 return cmp(revlog.rev(a), revlog.rev(b))
1476 return cmp_by_rev
1476 return cmp_by_rev
1477
1477
1478 # If we determine that a particular file or manifest node must be a
1478 # If we determine that a particular file or manifest node must be a
1479 # node that the recipient of the changegroup will already have, we can
1479 # node that the recipient of the changegroup will already have, we can
1480 # also assume the recipient will have all the parents. This function
1480 # also assume the recipient will have all the parents. This function
1481 # prunes them from the set of missing nodes.
1481 # prunes them from the set of missing nodes.
1482 def prune_parents(revlog, hasset, msngset):
1482 def prune_parents(revlog, hasset, msngset):
1483 haslst = hasset.keys()
1483 haslst = hasset.keys()
1484 haslst.sort(cmp_by_rev_func(revlog))
1484 haslst.sort(cmp_by_rev_func(revlog))
1485 for node in haslst:
1485 for node in haslst:
1486 parentlst = [p for p in revlog.parents(node) if p != nullid]
1486 parentlst = [p for p in revlog.parents(node) if p != nullid]
1487 while parentlst:
1487 while parentlst:
1488 n = parentlst.pop()
1488 n = parentlst.pop()
1489 if n not in hasset:
1489 if n not in hasset:
1490 hasset[n] = 1
1490 hasset[n] = 1
1491 p = [p for p in revlog.parents(n) if p != nullid]
1491 p = [p for p in revlog.parents(n) if p != nullid]
1492 parentlst.extend(p)
1492 parentlst.extend(p)
1493 for n in hasset:
1493 for n in hasset:
1494 msngset.pop(n, None)
1494 msngset.pop(n, None)
1495
1495
1496 # This is a function generating function used to set up an environment
1496 # This is a function generating function used to set up an environment
1497 # for the inner function to execute in.
1497 # for the inner function to execute in.
1498 def manifest_and_file_collector(changedfileset):
1498 def manifest_and_file_collector(changedfileset):
1499 # This is an information gathering function that gathers
1499 # This is an information gathering function that gathers
1500 # information from each changeset node that goes out as part of
1500 # information from each changeset node that goes out as part of
1501 # the changegroup. The information gathered is a list of which
1501 # the changegroup. The information gathered is a list of which
1502 # manifest nodes are potentially required (the recipient may
1502 # manifest nodes are potentially required (the recipient may
1503 # already have them) and total list of all files which were
1503 # already have them) and total list of all files which were
1504 # changed in any changeset in the changegroup.
1504 # changed in any changeset in the changegroup.
1505 #
1505 #
1506 # We also remember the first changenode we saw any manifest
1506 # We also remember the first changenode we saw any manifest
1507 # referenced by so we can later determine which changenode 'owns'
1507 # referenced by so we can later determine which changenode 'owns'
1508 # the manifest.
1508 # the manifest.
1509 def collect_manifests_and_files(clnode):
1509 def collect_manifests_and_files(clnode):
1510 c = cl.read(clnode)
1510 c = cl.read(clnode)
1511 for f in c[3]:
1511 for f in c[3]:
1512 # This is to make sure we only have one instance of each
1512 # This is to make sure we only have one instance of each
1513 # filename string for each filename.
1513 # filename string for each filename.
1514 changedfileset.setdefault(f, f)
1514 changedfileset.setdefault(f, f)
1515 msng_mnfst_set.setdefault(c[0], clnode)
1515 msng_mnfst_set.setdefault(c[0], clnode)
1516 return collect_manifests_and_files
1516 return collect_manifests_and_files
1517
1517
1518 # Figure out which manifest nodes (of the ones we think might be part
1518 # Figure out which manifest nodes (of the ones we think might be part
1519 # of the changegroup) the recipient must know about and remove them
1519 # of the changegroup) the recipient must know about and remove them
1520 # from the changegroup.
1520 # from the changegroup.
1521 def prune_manifests():
1521 def prune_manifests():
1522 has_mnfst_set = {}
1522 has_mnfst_set = {}
1523 for n in msng_mnfst_set:
1523 for n in msng_mnfst_set:
1524 # If a 'missing' manifest thinks it belongs to a changenode
1524 # If a 'missing' manifest thinks it belongs to a changenode
1525 # the recipient is assumed to have, obviously the recipient
1525 # the recipient is assumed to have, obviously the recipient
1526 # must have that manifest.
1526 # must have that manifest.
1527 linknode = cl.node(mnfst.linkrev(n))
1527 linknode = cl.node(mnfst.linkrev(n))
1528 if linknode in has_cl_set:
1528 if linknode in has_cl_set:
1529 has_mnfst_set[n] = 1
1529 has_mnfst_set[n] = 1
1530 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1530 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1531
1531
1532 # Use the information collected in collect_manifests_and_files to say
1532 # Use the information collected in collect_manifests_and_files to say
1533 # which changenode any manifestnode belongs to.
1533 # which changenode any manifestnode belongs to.
1534 def lookup_manifest_link(mnfstnode):
1534 def lookup_manifest_link(mnfstnode):
1535 return msng_mnfst_set[mnfstnode]
1535 return msng_mnfst_set[mnfstnode]
1536
1536
1537 # A function generating function that sets up the initial environment
1537 # A function generating function that sets up the initial environment
1538 # the inner function.
1538 # the inner function.
1539 def filenode_collector(changedfiles):
1539 def filenode_collector(changedfiles):
1540 next_rev = [0]
1540 next_rev = [0]
1541 # This gathers information from each manifestnode included in the
1541 # This gathers information from each manifestnode included in the
1542 # changegroup about which filenodes the manifest node references
1542 # changegroup about which filenodes the manifest node references
1543 # so we can include those in the changegroup too.
1543 # so we can include those in the changegroup too.
1544 #
1544 #
1545 # It also remembers which changenode each filenode belongs to. It
1545 # It also remembers which changenode each filenode belongs to. It
1546 # does this by assuming the a filenode belongs to the changenode
1546 # does this by assuming the a filenode belongs to the changenode
1547 # the first manifest that references it belongs to.
1547 # the first manifest that references it belongs to.
1548 def collect_msng_filenodes(mnfstnode):
1548 def collect_msng_filenodes(mnfstnode):
1549 r = mnfst.rev(mnfstnode)
1549 r = mnfst.rev(mnfstnode)
1550 if r == next_rev[0]:
1550 if r == next_rev[0]:
1551 # If the last rev we looked at was the one just previous,
1551 # If the last rev we looked at was the one just previous,
1552 # we only need to see a diff.
1552 # we only need to see a diff.
1553 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1553 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1554 # For each line in the delta
1554 # For each line in the delta
1555 for dline in delta.splitlines():
1555 for dline in delta.splitlines():
1556 # get the filename and filenode for that line
1556 # get the filename and filenode for that line
1557 f, fnode = dline.split('\0')
1557 f, fnode = dline.split('\0')
1558 fnode = bin(fnode[:40])
1558 fnode = bin(fnode[:40])
1559 f = changedfiles.get(f, None)
1559 f = changedfiles.get(f, None)
1560 # And if the file is in the list of files we care
1560 # And if the file is in the list of files we care
1561 # about.
1561 # about.
1562 if f is not None:
1562 if f is not None:
1563 # Get the changenode this manifest belongs to
1563 # Get the changenode this manifest belongs to
1564 clnode = msng_mnfst_set[mnfstnode]
1564 clnode = msng_mnfst_set[mnfstnode]
1565 # Create the set of filenodes for the file if
1565 # Create the set of filenodes for the file if
1566 # there isn't one already.
1566 # there isn't one already.
1567 ndset = msng_filenode_set.setdefault(f, {})
1567 ndset = msng_filenode_set.setdefault(f, {})
1568 # And set the filenode's changelog node to the
1568 # And set the filenode's changelog node to the
1569 # manifest's if it hasn't been set already.
1569 # manifest's if it hasn't been set already.
1570 ndset.setdefault(fnode, clnode)
1570 ndset.setdefault(fnode, clnode)
1571 else:
1571 else:
1572 # Otherwise we need a full manifest.
1572 # Otherwise we need a full manifest.
1573 m = mnfst.read(mnfstnode)
1573 m = mnfst.read(mnfstnode)
1574 # For every file in we care about.
1574 # For every file in we care about.
1575 for f in changedfiles:
1575 for f in changedfiles:
1576 fnode = m.get(f, None)
1576 fnode = m.get(f, None)
1577 # If it's in the manifest
1577 # If it's in the manifest
1578 if fnode is not None:
1578 if fnode is not None:
1579 # See comments above.
1579 # See comments above.
1580 clnode = msng_mnfst_set[mnfstnode]
1580 clnode = msng_mnfst_set[mnfstnode]
1581 ndset = msng_filenode_set.setdefault(f, {})
1581 ndset = msng_filenode_set.setdefault(f, {})
1582 ndset.setdefault(fnode, clnode)
1582 ndset.setdefault(fnode, clnode)
1583 # Remember the revision we hope to see next.
1583 # Remember the revision we hope to see next.
1584 next_rev[0] = r + 1
1584 next_rev[0] = r + 1
1585 return collect_msng_filenodes
1585 return collect_msng_filenodes
1586
1586
1587 # We have a list of filenodes we think we need for a file, lets remove
1587 # We have a list of filenodes we think we need for a file, lets remove
1588 # all those we now the recipient must have.
1588 # all those we now the recipient must have.
1589 def prune_filenodes(f, filerevlog):
1589 def prune_filenodes(f, filerevlog):
1590 msngset = msng_filenode_set[f]
1590 msngset = msng_filenode_set[f]
1591 hasset = {}
1591 hasset = {}
1592 # If a 'missing' filenode thinks it belongs to a changenode we
1592 # If a 'missing' filenode thinks it belongs to a changenode we
1593 # assume the recipient must have, then the recipient must have
1593 # assume the recipient must have, then the recipient must have
1594 # that filenode.
1594 # that filenode.
1595 for n in msngset:
1595 for n in msngset:
1596 clnode = cl.node(filerevlog.linkrev(n))
1596 clnode = cl.node(filerevlog.linkrev(n))
1597 if clnode in has_cl_set:
1597 if clnode in has_cl_set:
1598 hasset[n] = 1
1598 hasset[n] = 1
1599 prune_parents(filerevlog, hasset, msngset)
1599 prune_parents(filerevlog, hasset, msngset)
1600
1600
1601 # A function generator function that sets up the a context for the
1601 # A function generator function that sets up the a context for the
1602 # inner function.
1602 # inner function.
1603 def lookup_filenode_link_func(fname):
1603 def lookup_filenode_link_func(fname):
1604 msngset = msng_filenode_set[fname]
1604 msngset = msng_filenode_set[fname]
1605 # Lookup the changenode the filenode belongs to.
1605 # Lookup the changenode the filenode belongs to.
1606 def lookup_filenode_link(fnode):
1606 def lookup_filenode_link(fnode):
1607 return msngset[fnode]
1607 return msngset[fnode]
1608 return lookup_filenode_link
1608 return lookup_filenode_link
1609
1609
1610 # Now that we have all theses utility functions to help out and
1610 # Now that we have all theses utility functions to help out and
1611 # logically divide up the task, generate the group.
1611 # logically divide up the task, generate the group.
1612 def gengroup():
1612 def gengroup():
1613 # The set of changed files starts empty.
1613 # The set of changed files starts empty.
1614 changedfiles = {}
1614 changedfiles = {}
1615 # Create a changenode group generator that will call our functions
1615 # Create a changenode group generator that will call our functions
1616 # back to lookup the owning changenode and collect information.
1616 # back to lookup the owning changenode and collect information.
1617 group = cl.group(msng_cl_lst, identity,
1617 group = cl.group(msng_cl_lst, identity,
1618 manifest_and_file_collector(changedfiles))
1618 manifest_and_file_collector(changedfiles))
1619 for chnk in group:
1619 for chnk in group:
1620 yield chnk
1620 yield chnk
1621
1621
1622 # The list of manifests has been collected by the generator
1622 # The list of manifests has been collected by the generator
1623 # calling our functions back.
1623 # calling our functions back.
1624 prune_manifests()
1624 prune_manifests()
1625 msng_mnfst_lst = msng_mnfst_set.keys()
1625 msng_mnfst_lst = msng_mnfst_set.keys()
1626 # Sort the manifestnodes by revision number.
1626 # Sort the manifestnodes by revision number.
1627 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1627 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1628 # Create a generator for the manifestnodes that calls our lookup
1628 # Create a generator for the manifestnodes that calls our lookup
1629 # and data collection functions back.
1629 # and data collection functions back.
1630 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1630 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1631 filenode_collector(changedfiles))
1631 filenode_collector(changedfiles))
1632 for chnk in group:
1632 for chnk in group:
1633 yield chnk
1633 yield chnk
1634
1634
1635 # These are no longer needed, dereference and toss the memory for
1635 # These are no longer needed, dereference and toss the memory for
1636 # them.
1636 # them.
1637 msng_mnfst_lst = None
1637 msng_mnfst_lst = None
1638 msng_mnfst_set.clear()
1638 msng_mnfst_set.clear()
1639
1639
1640 changedfiles = changedfiles.keys()
1640 changedfiles = changedfiles.keys()
1641 changedfiles.sort()
1641 changedfiles.sort()
1642 # Go through all our files in order sorted by name.
1642 # Go through all our files in order sorted by name.
1643 for fname in changedfiles:
1643 for fname in changedfiles:
1644 filerevlog = self.file(fname)
1644 filerevlog = self.file(fname)
1645 # Toss out the filenodes that the recipient isn't really
1645 # Toss out the filenodes that the recipient isn't really
1646 # missing.
1646 # missing.
1647 if msng_filenode_set.has_key(fname):
1647 if msng_filenode_set.has_key(fname):
1648 prune_filenodes(fname, filerevlog)
1648 prune_filenodes(fname, filerevlog)
1649 msng_filenode_lst = msng_filenode_set[fname].keys()
1649 msng_filenode_lst = msng_filenode_set[fname].keys()
1650 else:
1650 else:
1651 msng_filenode_lst = []
1651 msng_filenode_lst = []
1652 # If any filenodes are left, generate the group for them,
1652 # If any filenodes are left, generate the group for them,
1653 # otherwise don't bother.
1653 # otherwise don't bother.
1654 if len(msng_filenode_lst) > 0:
1654 if len(msng_filenode_lst) > 0:
1655 yield changegroup.genchunk(fname)
1655 yield changegroup.genchunk(fname)
1656 # Sort the filenodes by their revision #
1656 # Sort the filenodes by their revision #
1657 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1657 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1658 # Create a group generator and only pass in a changenode
1658 # Create a group generator and only pass in a changenode
1659 # lookup function as we need to collect no information
1659 # lookup function as we need to collect no information
1660 # from filenodes.
1660 # from filenodes.
1661 group = filerevlog.group(msng_filenode_lst,
1661 group = filerevlog.group(msng_filenode_lst,
1662 lookup_filenode_link_func(fname))
1662 lookup_filenode_link_func(fname))
1663 for chnk in group:
1663 for chnk in group:
1664 yield chnk
1664 yield chnk
1665 if msng_filenode_set.has_key(fname):
1665 if msng_filenode_set.has_key(fname):
1666 # Don't need this anymore, toss it to free memory.
1666 # Don't need this anymore, toss it to free memory.
1667 del msng_filenode_set[fname]
1667 del msng_filenode_set[fname]
1668 # Signal that no more groups are left.
1668 # Signal that no more groups are left.
1669 yield changegroup.closechunk()
1669 yield changegroup.closechunk()
1670
1670
1671 if msng_cl_lst:
1671 if msng_cl_lst:
1672 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1672 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1673
1673
1674 return util.chunkbuffer(gengroup())
1674 return util.chunkbuffer(gengroup())
1675
1675
1676 def changegroup(self, basenodes, source):
1676 def changegroup(self, basenodes, source):
1677 """Generate a changegroup of all nodes that we have that a recipient
1677 """Generate a changegroup of all nodes that we have that a recipient
1678 doesn't.
1678 doesn't.
1679
1679
1680 This is much easier than the previous function as we can assume that
1680 This is much easier than the previous function as we can assume that
1681 the recipient has any changenode we aren't sending them."""
1681 the recipient has any changenode we aren't sending them."""
1682
1682
1683 self.hook('preoutgoing', throw=True, source=source)
1683 self.hook('preoutgoing', throw=True, source=source)
1684
1684
1685 cl = self.changelog
1685 cl = self.changelog
1686 nodes = cl.nodesbetween(basenodes, None)[0]
1686 nodes = cl.nodesbetween(basenodes, None)[0]
1687 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1687 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1688 self.changegroupinfo(nodes)
1688 self.changegroupinfo(nodes)
1689
1689
1690 def identity(x):
1690 def identity(x):
1691 return x
1691 return x
1692
1692
1693 def gennodelst(revlog):
1693 def gennodelst(revlog):
1694 for r in xrange(0, revlog.count()):
1694 for r in xrange(0, revlog.count()):
1695 n = revlog.node(r)
1695 n = revlog.node(r)
1696 if revlog.linkrev(n) in revset:
1696 if revlog.linkrev(n) in revset:
1697 yield n
1697 yield n
1698
1698
1699 def changed_file_collector(changedfileset):
1699 def changed_file_collector(changedfileset):
1700 def collect_changed_files(clnode):
1700 def collect_changed_files(clnode):
1701 c = cl.read(clnode)
1701 c = cl.read(clnode)
1702 for fname in c[3]:
1702 for fname in c[3]:
1703 changedfileset[fname] = 1
1703 changedfileset[fname] = 1
1704 return collect_changed_files
1704 return collect_changed_files
1705
1705
1706 def lookuprevlink_func(revlog):
1706 def lookuprevlink_func(revlog):
1707 def lookuprevlink(n):
1707 def lookuprevlink(n):
1708 return cl.node(revlog.linkrev(n))
1708 return cl.node(revlog.linkrev(n))
1709 return lookuprevlink
1709 return lookuprevlink
1710
1710
1711 def gengroup():
1711 def gengroup():
1712 # construct a list of all changed files
1712 # construct a list of all changed files
1713 changedfiles = {}
1713 changedfiles = {}
1714
1714
1715 for chnk in cl.group(nodes, identity,
1715 for chnk in cl.group(nodes, identity,
1716 changed_file_collector(changedfiles)):
1716 changed_file_collector(changedfiles)):
1717 yield chnk
1717 yield chnk
1718 changedfiles = changedfiles.keys()
1718 changedfiles = changedfiles.keys()
1719 changedfiles.sort()
1719 changedfiles.sort()
1720
1720
1721 mnfst = self.manifest
1721 mnfst = self.manifest
1722 nodeiter = gennodelst(mnfst)
1722 nodeiter = gennodelst(mnfst)
1723 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1723 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1724 yield chnk
1724 yield chnk
1725
1725
1726 for fname in changedfiles:
1726 for fname in changedfiles:
1727 filerevlog = self.file(fname)
1727 filerevlog = self.file(fname)
1728 nodeiter = gennodelst(filerevlog)
1728 nodeiter = gennodelst(filerevlog)
1729 nodeiter = list(nodeiter)
1729 nodeiter = list(nodeiter)
1730 if nodeiter:
1730 if nodeiter:
1731 yield changegroup.genchunk(fname)
1731 yield changegroup.genchunk(fname)
1732 lookup = lookuprevlink_func(filerevlog)
1732 lookup = lookuprevlink_func(filerevlog)
1733 for chnk in filerevlog.group(nodeiter, lookup):
1733 for chnk in filerevlog.group(nodeiter, lookup):
1734 yield chnk
1734 yield chnk
1735
1735
1736 yield changegroup.closechunk()
1736 yield changegroup.closechunk()
1737
1737
1738 if nodes:
1738 if nodes:
1739 self.hook('outgoing', node=hex(nodes[0]), source=source)
1739 self.hook('outgoing', node=hex(nodes[0]), source=source)
1740
1740
1741 return util.chunkbuffer(gengroup())
1741 return util.chunkbuffer(gengroup())
1742
1742
1743 def addchangegroup(self, source, srctype, url):
1743 def addchangegroup(self, source, srctype, url):
1744 """add changegroup to repo.
1744 """add changegroup to repo.
1745 returns number of heads modified or added + 1."""
1746
1745
1746 return values:
1747 - nothing changed or no source: 0
1748 - more heads than before: 1+added heads (2..n)
1749 - less heads than before: -1-removed heads (-2..-n)
1750 - number of heads stays the same: 1
1751 """
1747 def csmap(x):
1752 def csmap(x):
1748 self.ui.debug(_("add changeset %s\n") % short(x))
1753 self.ui.debug(_("add changeset %s\n") % short(x))
1749 return cl.count()
1754 return cl.count()
1750
1755
1751 def revmap(x):
1756 def revmap(x):
1752 return cl.rev(x)
1757 return cl.rev(x)
1753
1758
1754 if not source:
1759 if not source:
1755 return 0
1760 return 0
1756
1761
1757 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1762 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1758
1763
1759 changesets = files = revisions = 0
1764 changesets = files = revisions = 0
1760
1765
1761 tr = self.transaction()
1766 tr = self.transaction()
1762
1767
1763 # write changelog data to temp files so concurrent readers will not see
1768 # write changelog data to temp files so concurrent readers will not see
1764 # inconsistent view
1769 # inconsistent view
1765 cl = None
1770 cl = None
1766 try:
1771 try:
1767 cl = appendfile.appendchangelog(self.sopener,
1772 cl = appendfile.appendchangelog(self.sopener,
1768 self.changelog.version)
1773 self.changelog.version)
1769
1774
1770 oldheads = len(cl.heads())
1775 oldheads = len(cl.heads())
1771
1776
1772 # pull off the changeset group
1777 # pull off the changeset group
1773 self.ui.status(_("adding changesets\n"))
1778 self.ui.status(_("adding changesets\n"))
1774 cor = cl.count() - 1
1779 cor = cl.count() - 1
1775 chunkiter = changegroup.chunkiter(source)
1780 chunkiter = changegroup.chunkiter(source)
1776 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1781 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1777 raise util.Abort(_("received changelog group is empty"))
1782 raise util.Abort(_("received changelog group is empty"))
1778 cnr = cl.count() - 1
1783 cnr = cl.count() - 1
1779 changesets = cnr - cor
1784 changesets = cnr - cor
1780
1785
1781 # pull off the manifest group
1786 # pull off the manifest group
1782 self.ui.status(_("adding manifests\n"))
1787 self.ui.status(_("adding manifests\n"))
1783 chunkiter = changegroup.chunkiter(source)
1788 chunkiter = changegroup.chunkiter(source)
1784 # no need to check for empty manifest group here:
1789 # no need to check for empty manifest group here:
1785 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1790 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1786 # no new manifest will be created and the manifest group will
1791 # no new manifest will be created and the manifest group will
1787 # be empty during the pull
1792 # be empty during the pull
1788 self.manifest.addgroup(chunkiter, revmap, tr)
1793 self.manifest.addgroup(chunkiter, revmap, tr)
1789
1794
1790 # process the files
1795 # process the files
1791 self.ui.status(_("adding file changes\n"))
1796 self.ui.status(_("adding file changes\n"))
1792 while 1:
1797 while 1:
1793 f = changegroup.getchunk(source)
1798 f = changegroup.getchunk(source)
1794 if not f:
1799 if not f:
1795 break
1800 break
1796 self.ui.debug(_("adding %s revisions\n") % f)
1801 self.ui.debug(_("adding %s revisions\n") % f)
1797 fl = self.file(f)
1802 fl = self.file(f)
1798 o = fl.count()
1803 o = fl.count()
1799 chunkiter = changegroup.chunkiter(source)
1804 chunkiter = changegroup.chunkiter(source)
1800 if fl.addgroup(chunkiter, revmap, tr) is None:
1805 if fl.addgroup(chunkiter, revmap, tr) is None:
1801 raise util.Abort(_("received file revlog group is empty"))
1806 raise util.Abort(_("received file revlog group is empty"))
1802 revisions += fl.count() - o
1807 revisions += fl.count() - o
1803 files += 1
1808 files += 1
1804
1809
1805 cl.writedata()
1810 cl.writedata()
1806 finally:
1811 finally:
1807 if cl:
1812 if cl:
1808 cl.cleanup()
1813 cl.cleanup()
1809
1814
1810 # make changelog see real files again
1815 # make changelog see real files again
1811 self.changelog = changelog.changelog(self.sopener,
1816 self.changelog = changelog.changelog(self.sopener,
1812 self.changelog.version)
1817 self.changelog.version)
1813 self.changelog.checkinlinesize(tr)
1818 self.changelog.checkinlinesize(tr)
1814
1819
1815 newheads = len(self.changelog.heads())
1820 newheads = len(self.changelog.heads())
1816 heads = ""
1821 heads = ""
1817 if oldheads and newheads != oldheads:
1822 if oldheads and newheads != oldheads:
1818 heads = _(" (%+d heads)") % (newheads - oldheads)
1823 heads = _(" (%+d heads)") % (newheads - oldheads)
1819
1824
1820 self.ui.status(_("added %d changesets"
1825 self.ui.status(_("added %d changesets"
1821 " with %d changes to %d files%s\n")
1826 " with %d changes to %d files%s\n")
1822 % (changesets, revisions, files, heads))
1827 % (changesets, revisions, files, heads))
1823
1828
1824 if changesets > 0:
1829 if changesets > 0:
1825 self.hook('pretxnchangegroup', throw=True,
1830 self.hook('pretxnchangegroup', throw=True,
1826 node=hex(self.changelog.node(cor+1)), source=srctype,
1831 node=hex(self.changelog.node(cor+1)), source=srctype,
1827 url=url)
1832 url=url)
1828
1833
1829 tr.close()
1834 tr.close()
1830
1835
1831 if changesets > 0:
1836 if changesets > 0:
1832 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1837 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1833 source=srctype, url=url)
1838 source=srctype, url=url)
1834
1839
1835 for i in xrange(cor + 1, cnr + 1):
1840 for i in xrange(cor + 1, cnr + 1):
1836 self.hook("incoming", node=hex(self.changelog.node(i)),
1841 self.hook("incoming", node=hex(self.changelog.node(i)),
1837 source=srctype, url=url)
1842 source=srctype, url=url)
1838
1843
1839 return newheads - oldheads + 1
1844 # never return 0 here:
1845 if newheads < oldheads:
1846 return newheads - oldheads - 1
1847 else:
1848 return newheads - oldheads + 1
1840
1849
1841
1850
1842 def stream_in(self, remote):
1851 def stream_in(self, remote):
1843 fp = remote.stream_out()
1852 fp = remote.stream_out()
1844 l = fp.readline()
1853 l = fp.readline()
1845 try:
1854 try:
1846 resp = int(l)
1855 resp = int(l)
1847 except ValueError:
1856 except ValueError:
1848 raise util.UnexpectedOutput(
1857 raise util.UnexpectedOutput(
1849 _('Unexpected response from remote server:'), l)
1858 _('Unexpected response from remote server:'), l)
1850 if resp == 1:
1859 if resp == 1:
1851 raise util.Abort(_('operation forbidden by server'))
1860 raise util.Abort(_('operation forbidden by server'))
1852 elif resp == 2:
1861 elif resp == 2:
1853 raise util.Abort(_('locking the remote repository failed'))
1862 raise util.Abort(_('locking the remote repository failed'))
1854 elif resp != 0:
1863 elif resp != 0:
1855 raise util.Abort(_('the server sent an unknown error code'))
1864 raise util.Abort(_('the server sent an unknown error code'))
1856 self.ui.status(_('streaming all changes\n'))
1865 self.ui.status(_('streaming all changes\n'))
1857 l = fp.readline()
1866 l = fp.readline()
1858 try:
1867 try:
1859 total_files, total_bytes = map(int, l.split(' ', 1))
1868 total_files, total_bytes = map(int, l.split(' ', 1))
1860 except ValueError, TypeError:
1869 except ValueError, TypeError:
1861 raise util.UnexpectedOutput(
1870 raise util.UnexpectedOutput(
1862 _('Unexpected response from remote server:'), l)
1871 _('Unexpected response from remote server:'), l)
1863 self.ui.status(_('%d files to transfer, %s of data\n') %
1872 self.ui.status(_('%d files to transfer, %s of data\n') %
1864 (total_files, util.bytecount(total_bytes)))
1873 (total_files, util.bytecount(total_bytes)))
1865 start = time.time()
1874 start = time.time()
1866 for i in xrange(total_files):
1875 for i in xrange(total_files):
1867 # XXX doesn't support '\n' or '\r' in filenames
1876 # XXX doesn't support '\n' or '\r' in filenames
1868 l = fp.readline()
1877 l = fp.readline()
1869 try:
1878 try:
1870 name, size = l.split('\0', 1)
1879 name, size = l.split('\0', 1)
1871 size = int(size)
1880 size = int(size)
1872 except ValueError, TypeError:
1881 except ValueError, TypeError:
1873 raise util.UnexpectedOutput(
1882 raise util.UnexpectedOutput(
1874 _('Unexpected response from remote server:'), l)
1883 _('Unexpected response from remote server:'), l)
1875 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1884 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1876 ofp = self.sopener(name, 'w')
1885 ofp = self.sopener(name, 'w')
1877 for chunk in util.filechunkiter(fp, limit=size):
1886 for chunk in util.filechunkiter(fp, limit=size):
1878 ofp.write(chunk)
1887 ofp.write(chunk)
1879 ofp.close()
1888 ofp.close()
1880 elapsed = time.time() - start
1889 elapsed = time.time() - start
1881 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1890 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1882 (util.bytecount(total_bytes), elapsed,
1891 (util.bytecount(total_bytes), elapsed,
1883 util.bytecount(total_bytes / elapsed)))
1892 util.bytecount(total_bytes / elapsed)))
1884 self.reload()
1893 self.reload()
1885 return len(self.heads()) + 1
1894 return len(self.heads()) + 1
1886
1895
1887 def clone(self, remote, heads=[], stream=False):
1896 def clone(self, remote, heads=[], stream=False):
1888 '''clone remote repository.
1897 '''clone remote repository.
1889
1898
1890 keyword arguments:
1899 keyword arguments:
1891 heads: list of revs to clone (forces use of pull)
1900 heads: list of revs to clone (forces use of pull)
1892 stream: use streaming clone if possible'''
1901 stream: use streaming clone if possible'''
1893
1902
1894 # now, all clients that can request uncompressed clones can
1903 # now, all clients that can request uncompressed clones can
1895 # read repo formats supported by all servers that can serve
1904 # read repo formats supported by all servers that can serve
1896 # them.
1905 # them.
1897
1906
1898 # if revlog format changes, client will have to check version
1907 # if revlog format changes, client will have to check version
1899 # and format flags on "stream" capability, and use
1908 # and format flags on "stream" capability, and use
1900 # uncompressed only if compatible.
1909 # uncompressed only if compatible.
1901
1910
1902 if stream and not heads and remote.capable('stream'):
1911 if stream and not heads and remote.capable('stream'):
1903 return self.stream_in(remote)
1912 return self.stream_in(remote)
1904 return self.pull(remote, heads)
1913 return self.pull(remote, heads)
1905
1914
1906 # used to avoid circular references so destructors work
1915 # used to avoid circular references so destructors work
1907 def aftertrans(files):
1916 def aftertrans(files):
1908 renamefiles = [tuple(t) for t in files]
1917 renamefiles = [tuple(t) for t in files]
1909 def a():
1918 def a():
1910 for src, dest in renamefiles:
1919 for src, dest in renamefiles:
1911 util.rename(src, dest)
1920 util.rename(src, dest)
1912 return a
1921 return a
1913
1922
1914 def instance(ui, path, create):
1923 def instance(ui, path, create):
1915 return localrepository(ui, util.drop_scheme('file', path), create)
1924 return localrepository(ui, util.drop_scheme('file', path), create)
1916
1925
1917 def islocal(path):
1926 def islocal(path):
1918 return True
1927 return True
@@ -1,56 +1,57
1 #!/bin/sh
1 #!/bin/sh
2
2
3 mkdir a
3 mkdir a
4 cd a
4 cd a
5 hg init
5 hg init
6 echo foo > t1
6 echo foo > t1
7 hg add t1
7 hg add t1
8 hg commit -m "1" -d "1000000 0"
8 hg commit -m "1" -d "1000000 0"
9
9
10 cd ..
10 cd ..
11 hg clone a b
11 hg clone a b
12
12
13 cd a
13 cd a
14 echo foo > t2
14 echo foo > t2
15 hg add t2
15 hg add t2
16 hg commit -m "2" -d "1000000 0"
16 hg commit -m "2" -d "1000000 0"
17
17
18 cd ../b
18 cd ../b
19 echo foo > t3
19 echo foo > t3
20 hg add t3
20 hg add t3
21 hg commit -m "3" -d "1000000 0"
21 hg commit -m "3" -d "1000000 0"
22
22
23 hg push ../a
23 hg push ../a
24 hg pull ../a
24 hg pull ../a
25 hg push ../a
25 hg push ../a
26 hg merge
26 hg merge
27 hg commit -m "4" -d "1000000 0"
27 hg commit -m "4" -d "1000000 0"
28 hg push ../a
28 hg push ../a
29 cd ..
29 cd ..
30
30
31 hg init c
31 hg init c
32 cd c
32 cd c
33 for i in 0 1 2; do
33 for i in 0 1 2; do
34 echo $i >> foo
34 echo $i >> foo
35 hg ci -Am $i -d "1000000 0"
35 hg ci -Am $i -d "1000000 0"
36 done
36 done
37 cd ..
37 cd ..
38
38
39 hg clone c d
39 hg clone c d
40 cd d
40 cd d
41 for i in 0 1; do
41 for i in 0 1; do
42 hg co -C $i
42 hg co -C $i
43 echo d-$i >> foo
43 echo d-$i >> foo
44 hg ci -m d-$i -d "1000000 0"
44 hg ci -m d-$i -d "1000000 0"
45 done
45 done
46
46
47 HGMERGE=true hg merge 3
47 HGMERGE=true hg merge 3
48 hg ci -m c-d -d "1000000 0"
48 hg ci -m c-d -d "1000000 0"
49
49
50 hg push ../c
50 hg push ../c; echo $?
51 hg push -r 2 ../c
51 hg push -r 2 ../c; echo $?
52 hg push -r 3 -r 4 ../c
52 hg push -r 3 ../c; echo $?
53 hg push -f -r 3 -r 4 ../c
53 hg push -r 3 -r 4 ../c; echo $?
54 hg push -r 5 ../c
54 hg push -f -r 3 -r 4 ../c; echo $?
55 hg push -r 5 ../c; echo $?
55
56
56 exit 0
57 exit 0
@@ -1,54 +1,64
1 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
2 pushing to ../a
2 pushing to ../a
3 searching for changes
3 searching for changes
4 abort: push creates new remote branches!
4 abort: push creates new remote branches!
5 (did you forget to merge? use push -f to force)
5 (did you forget to merge? use push -f to force)
6 pulling from ../a
6 pulling from ../a
7 searching for changes
7 searching for changes
8 adding changesets
8 adding changesets
9 adding manifests
9 adding manifests
10 adding file changes
10 adding file changes
11 added 1 changesets with 1 changes to 1 files (+1 heads)
11 added 1 changesets with 1 changes to 1 files (+1 heads)
12 (run 'hg heads' to see heads, 'hg merge' to merge)
12 (run 'hg heads' to see heads, 'hg merge' to merge)
13 pushing to ../a
13 pushing to ../a
14 searching for changes
14 searching for changes
15 abort: push creates new remote branches!
15 abort: push creates new remote branches!
16 (did you forget to merge? use push -f to force)
16 (did you forget to merge? use push -f to force)
17 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
17 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
18 (branch merge, don't forget to commit)
18 (branch merge, don't forget to commit)
19 pushing to ../a
19 pushing to ../a
20 searching for changes
20 searching for changes
21 adding changesets
21 adding changesets
22 adding manifests
22 adding manifests
23 adding file changes
23 adding file changes
24 added 2 changesets with 1 changes to 1 files
24 added 2 changesets with 1 changes to 1 files
25 adding foo
25 adding foo
26 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
26 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
27 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
27 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
28 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
28 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
29 merging foo
29 merging foo
30 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
30 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
31 (branch merge, don't forget to commit)
31 (branch merge, don't forget to commit)
32 pushing to ../c
32 pushing to ../c
33 searching for changes
33 searching for changes
34 abort: push creates new remote branches!
34 abort: push creates new remote branches!
35 (did you forget to merge? use push -f to force)
35 (did you forget to merge? use push -f to force)
36 0
36 pushing to ../c
37 pushing to ../c
37 searching for changes
38 searching for changes
38 no changes found
39 no changes found
40 0
39 pushing to ../c
41 pushing to ../c
40 searching for changes
42 searching for changes
41 abort: push creates new remote branches!
43 abort: push creates new remote branches!
42 (did you forget to merge? use push -f to force)
44 (did you forget to merge? use push -f to force)
45 0
46 pushing to ../c
47 searching for changes
48 abort: push creates new remote branches!
49 (did you forget to merge? use push -f to force)
50 0
43 pushing to ../c
51 pushing to ../c
44 searching for changes
52 searching for changes
45 adding changesets
53 adding changesets
46 adding manifests
54 adding manifests
47 adding file changes
55 adding file changes
48 added 2 changesets with 2 changes to 1 files (+2 heads)
56 added 2 changesets with 2 changes to 1 files (+2 heads)
57 0
49 pushing to ../c
58 pushing to ../c
50 searching for changes
59 searching for changes
51 adding changesets
60 adding changesets
52 adding manifests
61 adding manifests
53 adding file changes
62 adding file changes
54 added 1 changesets with 1 changes to 1 files (-1 heads)
63 added 1 changesets with 1 changes to 1 files (-1 heads)
64 0
General Comments 0
You need to be logged in to leave comments. Login now