##// END OF EJS Templates
localrepo: change aftertrans to be independant of the store path
Benoit Boissinot -
r3790:f183c185 default
parent child Browse files
Show More
@@ -1,1912 +1,1913 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import gettext as _
9 from i18n import gettext as _
10 from demandload import *
10 from demandload import *
11 import repo
11 import repo
12 demandload(globals(), "appendfile changegroup")
12 demandload(globals(), "appendfile changegroup")
13 demandload(globals(), "changelog dirstate filelog manifest context")
13 demandload(globals(), "changelog dirstate filelog manifest context")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 demandload(globals(), "os revlog time util")
15 demandload(globals(), "os revlog time util")
16
16
17 class localrepository(repo.repository):
17 class localrepository(repo.repository):
18 capabilities = ('lookup', 'changegroupsubset')
18 capabilities = ('lookup', 'changegroupsubset')
19
19
20 def __del__(self):
20 def __del__(self):
21 self.transhandle = None
21 self.transhandle = None
22 def __init__(self, parentui, path=None, create=0):
22 def __init__(self, parentui, path=None, create=0):
23 repo.repository.__init__(self)
23 repo.repository.__init__(self)
24 if not path:
24 if not path:
25 p = os.getcwd()
25 p = os.getcwd()
26 while not os.path.isdir(os.path.join(p, ".hg")):
26 while not os.path.isdir(os.path.join(p, ".hg")):
27 oldp = p
27 oldp = p
28 p = os.path.dirname(p)
28 p = os.path.dirname(p)
29 if p == oldp:
29 if p == oldp:
30 raise repo.RepoError(_("There is no Mercurial repository"
30 raise repo.RepoError(_("There is no Mercurial repository"
31 " here (.hg not found)"))
31 " here (.hg not found)"))
32 path = p
32 path = p
33 self.path = os.path.join(path, ".hg")
33 self.path = os.path.join(path, ".hg")
34
34
35 if not os.path.isdir(self.path):
35 if not os.path.isdir(self.path):
36 if create:
36 if create:
37 if not os.path.exists(path):
37 if not os.path.exists(path):
38 os.mkdir(path)
38 os.mkdir(path)
39 os.mkdir(self.path)
39 os.mkdir(self.path)
40 else:
40 else:
41 raise repo.RepoError(_("repository %s not found") % path)
41 raise repo.RepoError(_("repository %s not found") % path)
42 elif create:
42 elif create:
43 raise repo.RepoError(_("repository %s already exists") % path)
43 raise repo.RepoError(_("repository %s already exists") % path)
44
44
45 self.root = os.path.realpath(path)
45 self.root = os.path.realpath(path)
46 self.origroot = path
46 self.origroot = path
47 self.ui = ui.ui(parentui=parentui)
47 self.ui = ui.ui(parentui=parentui)
48 self.opener = util.opener(self.path)
48 self.opener = util.opener(self.path)
49 self.sopener = util.opener(self.path)
49 self.sopener = util.opener(self.path)
50 self.wopener = util.opener(self.root)
50 self.wopener = util.opener(self.root)
51
51
52 try:
52 try:
53 self.ui.readconfig(self.join("hgrc"), self.root)
53 self.ui.readconfig(self.join("hgrc"), self.root)
54 except IOError:
54 except IOError:
55 pass
55 pass
56
56
57 v = self.ui.configrevlog()
57 v = self.ui.configrevlog()
58 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
58 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
59 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
59 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
60 fl = v.get('flags', None)
60 fl = v.get('flags', None)
61 flags = 0
61 flags = 0
62 if fl != None:
62 if fl != None:
63 for x in fl.split():
63 for x in fl.split():
64 flags |= revlog.flagstr(x)
64 flags |= revlog.flagstr(x)
65 elif self.revlogv1:
65 elif self.revlogv1:
66 flags = revlog.REVLOG_DEFAULT_FLAGS
66 flags = revlog.REVLOG_DEFAULT_FLAGS
67
67
68 v = self.revlogversion | flags
68 v = self.revlogversion | flags
69 self.manifest = manifest.manifest(self.sopener, v)
69 self.manifest = manifest.manifest(self.sopener, v)
70 self.changelog = changelog.changelog(self.sopener, v)
70 self.changelog = changelog.changelog(self.sopener, v)
71
71
72 # the changelog might not have the inline index flag
72 # the changelog might not have the inline index flag
73 # on. If the format of the changelog is the same as found in
73 # on. If the format of the changelog is the same as found in
74 # .hgrc, apply any flags found in the .hgrc as well.
74 # .hgrc, apply any flags found in the .hgrc as well.
75 # Otherwise, just version from the changelog
75 # Otherwise, just version from the changelog
76 v = self.changelog.version
76 v = self.changelog.version
77 if v == self.revlogversion:
77 if v == self.revlogversion:
78 v |= flags
78 v |= flags
79 self.revlogversion = v
79 self.revlogversion = v
80
80
81 self.tagscache = None
81 self.tagscache = None
82 self.branchcache = None
82 self.branchcache = None
83 self.nodetagscache = None
83 self.nodetagscache = None
84 self.encodepats = None
84 self.encodepats = None
85 self.decodepats = None
85 self.decodepats = None
86 self.transhandle = None
86 self.transhandle = None
87
87
88 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
88 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
89
89
90 def url(self):
90 def url(self):
91 return 'file:' + self.root
91 return 'file:' + self.root
92
92
93 def hook(self, name, throw=False, **args):
93 def hook(self, name, throw=False, **args):
94 def callhook(hname, funcname):
94 def callhook(hname, funcname):
95 '''call python hook. hook is callable object, looked up as
95 '''call python hook. hook is callable object, looked up as
96 name in python module. if callable returns "true", hook
96 name in python module. if callable returns "true", hook
97 fails, else passes. if hook raises exception, treated as
97 fails, else passes. if hook raises exception, treated as
98 hook failure. exception propagates if throw is "true".
98 hook failure. exception propagates if throw is "true".
99
99
100 reason for "true" meaning "hook failed" is so that
100 reason for "true" meaning "hook failed" is so that
101 unmodified commands (e.g. mercurial.commands.update) can
101 unmodified commands (e.g. mercurial.commands.update) can
102 be run as hooks without wrappers to convert return values.'''
102 be run as hooks without wrappers to convert return values.'''
103
103
104 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
104 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
105 d = funcname.rfind('.')
105 d = funcname.rfind('.')
106 if d == -1:
106 if d == -1:
107 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
107 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
108 % (hname, funcname))
108 % (hname, funcname))
109 modname = funcname[:d]
109 modname = funcname[:d]
110 try:
110 try:
111 obj = __import__(modname)
111 obj = __import__(modname)
112 except ImportError:
112 except ImportError:
113 try:
113 try:
114 # extensions are loaded with hgext_ prefix
114 # extensions are loaded with hgext_ prefix
115 obj = __import__("hgext_%s" % modname)
115 obj = __import__("hgext_%s" % modname)
116 except ImportError:
116 except ImportError:
117 raise util.Abort(_('%s hook is invalid '
117 raise util.Abort(_('%s hook is invalid '
118 '(import of "%s" failed)') %
118 '(import of "%s" failed)') %
119 (hname, modname))
119 (hname, modname))
120 try:
120 try:
121 for p in funcname.split('.')[1:]:
121 for p in funcname.split('.')[1:]:
122 obj = getattr(obj, p)
122 obj = getattr(obj, p)
123 except AttributeError, err:
123 except AttributeError, err:
124 raise util.Abort(_('%s hook is invalid '
124 raise util.Abort(_('%s hook is invalid '
125 '("%s" is not defined)') %
125 '("%s" is not defined)') %
126 (hname, funcname))
126 (hname, funcname))
127 if not callable(obj):
127 if not callable(obj):
128 raise util.Abort(_('%s hook is invalid '
128 raise util.Abort(_('%s hook is invalid '
129 '("%s" is not callable)') %
129 '("%s" is not callable)') %
130 (hname, funcname))
130 (hname, funcname))
131 try:
131 try:
132 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
132 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
133 except (KeyboardInterrupt, util.SignalInterrupt):
133 except (KeyboardInterrupt, util.SignalInterrupt):
134 raise
134 raise
135 except Exception, exc:
135 except Exception, exc:
136 if isinstance(exc, util.Abort):
136 if isinstance(exc, util.Abort):
137 self.ui.warn(_('error: %s hook failed: %s\n') %
137 self.ui.warn(_('error: %s hook failed: %s\n') %
138 (hname, exc.args[0]))
138 (hname, exc.args[0]))
139 else:
139 else:
140 self.ui.warn(_('error: %s hook raised an exception: '
140 self.ui.warn(_('error: %s hook raised an exception: '
141 '%s\n') % (hname, exc))
141 '%s\n') % (hname, exc))
142 if throw:
142 if throw:
143 raise
143 raise
144 self.ui.print_exc()
144 self.ui.print_exc()
145 return True
145 return True
146 if r:
146 if r:
147 if throw:
147 if throw:
148 raise util.Abort(_('%s hook failed') % hname)
148 raise util.Abort(_('%s hook failed') % hname)
149 self.ui.warn(_('warning: %s hook failed\n') % hname)
149 self.ui.warn(_('warning: %s hook failed\n') % hname)
150 return r
150 return r
151
151
152 def runhook(name, cmd):
152 def runhook(name, cmd):
153 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
153 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
154 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
154 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
155 r = util.system(cmd, environ=env, cwd=self.root)
155 r = util.system(cmd, environ=env, cwd=self.root)
156 if r:
156 if r:
157 desc, r = util.explain_exit(r)
157 desc, r = util.explain_exit(r)
158 if throw:
158 if throw:
159 raise util.Abort(_('%s hook %s') % (name, desc))
159 raise util.Abort(_('%s hook %s') % (name, desc))
160 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
160 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
161 return r
161 return r
162
162
163 r = False
163 r = False
164 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
164 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
165 if hname.split(".", 1)[0] == name and cmd]
165 if hname.split(".", 1)[0] == name and cmd]
166 hooks.sort()
166 hooks.sort()
167 for hname, cmd in hooks:
167 for hname, cmd in hooks:
168 if cmd.startswith('python:'):
168 if cmd.startswith('python:'):
169 r = callhook(hname, cmd[7:].strip()) or r
169 r = callhook(hname, cmd[7:].strip()) or r
170 else:
170 else:
171 r = runhook(hname, cmd) or r
171 r = runhook(hname, cmd) or r
172 return r
172 return r
173
173
174 tag_disallowed = ':\r\n'
174 tag_disallowed = ':\r\n'
175
175
176 def tag(self, name, node, message, local, user, date):
176 def tag(self, name, node, message, local, user, date):
177 '''tag a revision with a symbolic name.
177 '''tag a revision with a symbolic name.
178
178
179 if local is True, the tag is stored in a per-repository file.
179 if local is True, the tag is stored in a per-repository file.
180 otherwise, it is stored in the .hgtags file, and a new
180 otherwise, it is stored in the .hgtags file, and a new
181 changeset is committed with the change.
181 changeset is committed with the change.
182
182
183 keyword arguments:
183 keyword arguments:
184
184
185 local: whether to store tag in non-version-controlled file
185 local: whether to store tag in non-version-controlled file
186 (default False)
186 (default False)
187
187
188 message: commit message to use if committing
188 message: commit message to use if committing
189
189
190 user: name of user to use if committing
190 user: name of user to use if committing
191
191
192 date: date tuple to use if committing'''
192 date: date tuple to use if committing'''
193
193
194 for c in self.tag_disallowed:
194 for c in self.tag_disallowed:
195 if c in name:
195 if c in name:
196 raise util.Abort(_('%r cannot be used in a tag name') % c)
196 raise util.Abort(_('%r cannot be used in a tag name') % c)
197
197
198 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
198 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
199
199
200 if local:
200 if local:
201 # local tags are stored in the current charset
201 # local tags are stored in the current charset
202 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
202 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
203 self.hook('tag', node=hex(node), tag=name, local=local)
203 self.hook('tag', node=hex(node), tag=name, local=local)
204 return
204 return
205
205
206 for x in self.status()[:5]:
206 for x in self.status()[:5]:
207 if '.hgtags' in x:
207 if '.hgtags' in x:
208 raise util.Abort(_('working copy of .hgtags is changed '
208 raise util.Abort(_('working copy of .hgtags is changed '
209 '(please commit .hgtags manually)'))
209 '(please commit .hgtags manually)'))
210
210
211 # committed tags are stored in UTF-8
211 # committed tags are stored in UTF-8
212 line = '%s %s\n' % (hex(node), util.fromlocal(name))
212 line = '%s %s\n' % (hex(node), util.fromlocal(name))
213 self.wfile('.hgtags', 'ab').write(line)
213 self.wfile('.hgtags', 'ab').write(line)
214 if self.dirstate.state('.hgtags') == '?':
214 if self.dirstate.state('.hgtags') == '?':
215 self.add(['.hgtags'])
215 self.add(['.hgtags'])
216
216
217 self.commit(['.hgtags'], message, user, date)
217 self.commit(['.hgtags'], message, user, date)
218 self.hook('tag', node=hex(node), tag=name, local=local)
218 self.hook('tag', node=hex(node), tag=name, local=local)
219
219
220 def tags(self):
220 def tags(self):
221 '''return a mapping of tag to node'''
221 '''return a mapping of tag to node'''
222 if not self.tagscache:
222 if not self.tagscache:
223 self.tagscache = {}
223 self.tagscache = {}
224
224
225 def parsetag(line, context):
225 def parsetag(line, context):
226 if not line:
226 if not line:
227 return
227 return
228 s = l.split(" ", 1)
228 s = l.split(" ", 1)
229 if len(s) != 2:
229 if len(s) != 2:
230 self.ui.warn(_("%s: cannot parse entry\n") % context)
230 self.ui.warn(_("%s: cannot parse entry\n") % context)
231 return
231 return
232 node, key = s
232 node, key = s
233 key = util.tolocal(key.strip()) # stored in UTF-8
233 key = util.tolocal(key.strip()) # stored in UTF-8
234 try:
234 try:
235 bin_n = bin(node)
235 bin_n = bin(node)
236 except TypeError:
236 except TypeError:
237 self.ui.warn(_("%s: node '%s' is not well formed\n") %
237 self.ui.warn(_("%s: node '%s' is not well formed\n") %
238 (context, node))
238 (context, node))
239 return
239 return
240 if bin_n not in self.changelog.nodemap:
240 if bin_n not in self.changelog.nodemap:
241 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
241 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
242 (context, key))
242 (context, key))
243 return
243 return
244 self.tagscache[key] = bin_n
244 self.tagscache[key] = bin_n
245
245
246 # read the tags file from each head, ending with the tip,
246 # read the tags file from each head, ending with the tip,
247 # and add each tag found to the map, with "newer" ones
247 # and add each tag found to the map, with "newer" ones
248 # taking precedence
248 # taking precedence
249 f = None
249 f = None
250 for rev, node, fnode in self._hgtagsnodes():
250 for rev, node, fnode in self._hgtagsnodes():
251 f = (f and f.filectx(fnode) or
251 f = (f and f.filectx(fnode) or
252 self.filectx('.hgtags', fileid=fnode))
252 self.filectx('.hgtags', fileid=fnode))
253 count = 0
253 count = 0
254 for l in f.data().splitlines():
254 for l in f.data().splitlines():
255 count += 1
255 count += 1
256 parsetag(l, _("%s, line %d") % (str(f), count))
256 parsetag(l, _("%s, line %d") % (str(f), count))
257
257
258 try:
258 try:
259 f = self.opener("localtags")
259 f = self.opener("localtags")
260 count = 0
260 count = 0
261 for l in f:
261 for l in f:
262 # localtags are stored in the local character set
262 # localtags are stored in the local character set
263 # while the internal tag table is stored in UTF-8
263 # while the internal tag table is stored in UTF-8
264 l = util.fromlocal(l)
264 l = util.fromlocal(l)
265 count += 1
265 count += 1
266 parsetag(l, _("localtags, line %d") % count)
266 parsetag(l, _("localtags, line %d") % count)
267 except IOError:
267 except IOError:
268 pass
268 pass
269
269
270 self.tagscache['tip'] = self.changelog.tip()
270 self.tagscache['tip'] = self.changelog.tip()
271
271
272 return self.tagscache
272 return self.tagscache
273
273
274 def _hgtagsnodes(self):
274 def _hgtagsnodes(self):
275 heads = self.heads()
275 heads = self.heads()
276 heads.reverse()
276 heads.reverse()
277 last = {}
277 last = {}
278 ret = []
278 ret = []
279 for node in heads:
279 for node in heads:
280 c = self.changectx(node)
280 c = self.changectx(node)
281 rev = c.rev()
281 rev = c.rev()
282 try:
282 try:
283 fnode = c.filenode('.hgtags')
283 fnode = c.filenode('.hgtags')
284 except repo.LookupError:
284 except repo.LookupError:
285 continue
285 continue
286 ret.append((rev, node, fnode))
286 ret.append((rev, node, fnode))
287 if fnode in last:
287 if fnode in last:
288 ret[last[fnode]] = None
288 ret[last[fnode]] = None
289 last[fnode] = len(ret) - 1
289 last[fnode] = len(ret) - 1
290 return [item for item in ret if item]
290 return [item for item in ret if item]
291
291
292 def tagslist(self):
292 def tagslist(self):
293 '''return a list of tags ordered by revision'''
293 '''return a list of tags ordered by revision'''
294 l = []
294 l = []
295 for t, n in self.tags().items():
295 for t, n in self.tags().items():
296 try:
296 try:
297 r = self.changelog.rev(n)
297 r = self.changelog.rev(n)
298 except:
298 except:
299 r = -2 # sort to the beginning of the list if unknown
299 r = -2 # sort to the beginning of the list if unknown
300 l.append((r, t, n))
300 l.append((r, t, n))
301 l.sort()
301 l.sort()
302 return [(t, n) for r, t, n in l]
302 return [(t, n) for r, t, n in l]
303
303
304 def nodetags(self, node):
304 def nodetags(self, node):
305 '''return the tags associated with a node'''
305 '''return the tags associated with a node'''
306 if not self.nodetagscache:
306 if not self.nodetagscache:
307 self.nodetagscache = {}
307 self.nodetagscache = {}
308 for t, n in self.tags().items():
308 for t, n in self.tags().items():
309 self.nodetagscache.setdefault(n, []).append(t)
309 self.nodetagscache.setdefault(n, []).append(t)
310 return self.nodetagscache.get(node, [])
310 return self.nodetagscache.get(node, [])
311
311
312 def branchtags(self):
312 def branchtags(self):
313 if self.branchcache != None:
313 if self.branchcache != None:
314 return self.branchcache
314 return self.branchcache
315
315
316 self.branchcache = {} # avoid recursion in changectx
316 self.branchcache = {} # avoid recursion in changectx
317
317
318 partial, last, lrev = self._readbranchcache()
318 partial, last, lrev = self._readbranchcache()
319
319
320 tiprev = self.changelog.count() - 1
320 tiprev = self.changelog.count() - 1
321 if lrev != tiprev:
321 if lrev != tiprev:
322 self._updatebranchcache(partial, lrev+1, tiprev+1)
322 self._updatebranchcache(partial, lrev+1, tiprev+1)
323 self._writebranchcache(partial, self.changelog.tip(), tiprev)
323 self._writebranchcache(partial, self.changelog.tip(), tiprev)
324
324
325 # the branch cache is stored on disk as UTF-8, but in the local
325 # the branch cache is stored on disk as UTF-8, but in the local
326 # charset internally
326 # charset internally
327 for k, v in partial.items():
327 for k, v in partial.items():
328 self.branchcache[util.tolocal(k)] = v
328 self.branchcache[util.tolocal(k)] = v
329 return self.branchcache
329 return self.branchcache
330
330
331 def _readbranchcache(self):
331 def _readbranchcache(self):
332 partial = {}
332 partial = {}
333 try:
333 try:
334 f = self.opener("branches.cache")
334 f = self.opener("branches.cache")
335 lines = f.read().split('\n')
335 lines = f.read().split('\n')
336 f.close()
336 f.close()
337 last, lrev = lines.pop(0).rstrip().split(" ", 1)
337 last, lrev = lines.pop(0).rstrip().split(" ", 1)
338 last, lrev = bin(last), int(lrev)
338 last, lrev = bin(last), int(lrev)
339 if not (lrev < self.changelog.count() and
339 if not (lrev < self.changelog.count() and
340 self.changelog.node(lrev) == last): # sanity check
340 self.changelog.node(lrev) == last): # sanity check
341 # invalidate the cache
341 # invalidate the cache
342 raise ValueError('Invalid branch cache: unknown tip')
342 raise ValueError('Invalid branch cache: unknown tip')
343 for l in lines:
343 for l in lines:
344 if not l: continue
344 if not l: continue
345 node, label = l.rstrip().split(" ", 1)
345 node, label = l.rstrip().split(" ", 1)
346 partial[label] = bin(node)
346 partial[label] = bin(node)
347 except (KeyboardInterrupt, util.SignalInterrupt):
347 except (KeyboardInterrupt, util.SignalInterrupt):
348 raise
348 raise
349 except Exception, inst:
349 except Exception, inst:
350 if self.ui.debugflag:
350 if self.ui.debugflag:
351 self.ui.warn(str(inst), '\n')
351 self.ui.warn(str(inst), '\n')
352 partial, last, lrev = {}, nullid, nullrev
352 partial, last, lrev = {}, nullid, nullrev
353 return partial, last, lrev
353 return partial, last, lrev
354
354
355 def _writebranchcache(self, branches, tip, tiprev):
355 def _writebranchcache(self, branches, tip, tiprev):
356 try:
356 try:
357 f = self.opener("branches.cache", "w")
357 f = self.opener("branches.cache", "w")
358 f.write("%s %s\n" % (hex(tip), tiprev))
358 f.write("%s %s\n" % (hex(tip), tiprev))
359 for label, node in branches.iteritems():
359 for label, node in branches.iteritems():
360 f.write("%s %s\n" % (hex(node), label))
360 f.write("%s %s\n" % (hex(node), label))
361 except IOError:
361 except IOError:
362 pass
362 pass
363
363
364 def _updatebranchcache(self, partial, start, end):
364 def _updatebranchcache(self, partial, start, end):
365 for r in xrange(start, end):
365 for r in xrange(start, end):
366 c = self.changectx(r)
366 c = self.changectx(r)
367 b = c.branch()
367 b = c.branch()
368 if b:
368 if b:
369 partial[b] = c.node()
369 partial[b] = c.node()
370
370
371 def lookup(self, key):
371 def lookup(self, key):
372 if key == '.':
372 if key == '.':
373 key = self.dirstate.parents()[0]
373 key = self.dirstate.parents()[0]
374 if key == nullid:
374 if key == nullid:
375 raise repo.RepoError(_("no revision checked out"))
375 raise repo.RepoError(_("no revision checked out"))
376 n = self.changelog._match(key)
376 n = self.changelog._match(key)
377 if n:
377 if n:
378 return n
378 return n
379 if key in self.tags():
379 if key in self.tags():
380 return self.tags()[key]
380 return self.tags()[key]
381 if key in self.branchtags():
381 if key in self.branchtags():
382 return self.branchtags()[key]
382 return self.branchtags()[key]
383 n = self.changelog._partialmatch(key)
383 n = self.changelog._partialmatch(key)
384 if n:
384 if n:
385 return n
385 return n
386 raise repo.RepoError(_("unknown revision '%s'") % key)
386 raise repo.RepoError(_("unknown revision '%s'") % key)
387
387
388 def dev(self):
388 def dev(self):
389 return os.lstat(self.path).st_dev
389 return os.lstat(self.path).st_dev
390
390
391 def local(self):
391 def local(self):
392 return True
392 return True
393
393
394 def join(self, f):
394 def join(self, f):
395 return os.path.join(self.path, f)
395 return os.path.join(self.path, f)
396
396
397 def sjoin(self, f):
397 def sjoin(self, f):
398 return os.path.join(self.path, f)
398 return os.path.join(self.path, f)
399
399
400 def wjoin(self, f):
400 def wjoin(self, f):
401 return os.path.join(self.root, f)
401 return os.path.join(self.root, f)
402
402
403 def file(self, f):
403 def file(self, f):
404 if f[0] == '/':
404 if f[0] == '/':
405 f = f[1:]
405 f = f[1:]
406 return filelog.filelog(self.sopener, f, self.revlogversion)
406 return filelog.filelog(self.sopener, f, self.revlogversion)
407
407
408 def changectx(self, changeid=None):
408 def changectx(self, changeid=None):
409 return context.changectx(self, changeid)
409 return context.changectx(self, changeid)
410
410
411 def workingctx(self):
411 def workingctx(self):
412 return context.workingctx(self)
412 return context.workingctx(self)
413
413
414 def parents(self, changeid=None):
414 def parents(self, changeid=None):
415 '''
415 '''
416 get list of changectxs for parents of changeid or working directory
416 get list of changectxs for parents of changeid or working directory
417 '''
417 '''
418 if changeid is None:
418 if changeid is None:
419 pl = self.dirstate.parents()
419 pl = self.dirstate.parents()
420 else:
420 else:
421 n = self.changelog.lookup(changeid)
421 n = self.changelog.lookup(changeid)
422 pl = self.changelog.parents(n)
422 pl = self.changelog.parents(n)
423 if pl[1] == nullid:
423 if pl[1] == nullid:
424 return [self.changectx(pl[0])]
424 return [self.changectx(pl[0])]
425 return [self.changectx(pl[0]), self.changectx(pl[1])]
425 return [self.changectx(pl[0]), self.changectx(pl[1])]
426
426
427 def filectx(self, path, changeid=None, fileid=None):
427 def filectx(self, path, changeid=None, fileid=None):
428 """changeid can be a changeset revision, node, or tag.
428 """changeid can be a changeset revision, node, or tag.
429 fileid can be a file revision or node."""
429 fileid can be a file revision or node."""
430 return context.filectx(self, path, changeid, fileid)
430 return context.filectx(self, path, changeid, fileid)
431
431
432 def getcwd(self):
432 def getcwd(self):
433 return self.dirstate.getcwd()
433 return self.dirstate.getcwd()
434
434
435 def wfile(self, f, mode='r'):
435 def wfile(self, f, mode='r'):
436 return self.wopener(f, mode)
436 return self.wopener(f, mode)
437
437
438 def wread(self, filename):
438 def wread(self, filename):
439 if self.encodepats == None:
439 if self.encodepats == None:
440 l = []
440 l = []
441 for pat, cmd in self.ui.configitems("encode"):
441 for pat, cmd in self.ui.configitems("encode"):
442 mf = util.matcher(self.root, "", [pat], [], [])[1]
442 mf = util.matcher(self.root, "", [pat], [], [])[1]
443 l.append((mf, cmd))
443 l.append((mf, cmd))
444 self.encodepats = l
444 self.encodepats = l
445
445
446 data = self.wopener(filename, 'r').read()
446 data = self.wopener(filename, 'r').read()
447
447
448 for mf, cmd in self.encodepats:
448 for mf, cmd in self.encodepats:
449 if mf(filename):
449 if mf(filename):
450 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
450 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
451 data = util.filter(data, cmd)
451 data = util.filter(data, cmd)
452 break
452 break
453
453
454 return data
454 return data
455
455
456 def wwrite(self, filename, data, fd=None):
456 def wwrite(self, filename, data, fd=None):
457 if self.decodepats == None:
457 if self.decodepats == None:
458 l = []
458 l = []
459 for pat, cmd in self.ui.configitems("decode"):
459 for pat, cmd in self.ui.configitems("decode"):
460 mf = util.matcher(self.root, "", [pat], [], [])[1]
460 mf = util.matcher(self.root, "", [pat], [], [])[1]
461 l.append((mf, cmd))
461 l.append((mf, cmd))
462 self.decodepats = l
462 self.decodepats = l
463
463
464 for mf, cmd in self.decodepats:
464 for mf, cmd in self.decodepats:
465 if mf(filename):
465 if mf(filename):
466 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
466 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
467 data = util.filter(data, cmd)
467 data = util.filter(data, cmd)
468 break
468 break
469
469
470 if fd:
470 if fd:
471 return fd.write(data)
471 return fd.write(data)
472 return self.wopener(filename, 'w').write(data)
472 return self.wopener(filename, 'w').write(data)
473
473
474 def transaction(self):
474 def transaction(self):
475 tr = self.transhandle
475 tr = self.transhandle
476 if tr != None and tr.running():
476 if tr != None and tr.running():
477 return tr.nest()
477 return tr.nest()
478
478
479 # save dirstate for rollback
479 # save dirstate for rollback
480 try:
480 try:
481 ds = self.opener("dirstate").read()
481 ds = self.opener("dirstate").read()
482 except IOError:
482 except IOError:
483 ds = ""
483 ds = ""
484 self.opener("journal.dirstate", "w").write(ds)
484 self.opener("journal.dirstate", "w").write(ds)
485
485
486 renames = [(self.sjoin("journal"), self.sjoin("undo")),
487 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
486 tr = transaction.transaction(self.ui.warn, self.sopener,
488 tr = transaction.transaction(self.ui.warn, self.sopener,
487 self.sjoin("journal"),
489 self.sjoin("journal"),
488 aftertrans(self.path))
490 aftertrans(renames))
489 self.transhandle = tr
491 self.transhandle = tr
490 return tr
492 return tr
491
493
492 def recover(self):
494 def recover(self):
493 l = self.lock()
495 l = self.lock()
494 if os.path.exists(self.sjoin("journal")):
496 if os.path.exists(self.sjoin("journal")):
495 self.ui.status(_("rolling back interrupted transaction\n"))
497 self.ui.status(_("rolling back interrupted transaction\n"))
496 transaction.rollback(self.sopener, self.sjoin("journal"))
498 transaction.rollback(self.sopener, self.sjoin("journal"))
497 self.reload()
499 self.reload()
498 return True
500 return True
499 else:
501 else:
500 self.ui.warn(_("no interrupted transaction available\n"))
502 self.ui.warn(_("no interrupted transaction available\n"))
501 return False
503 return False
502
504
503 def rollback(self, wlock=None):
505 def rollback(self, wlock=None):
504 if not wlock:
506 if not wlock:
505 wlock = self.wlock()
507 wlock = self.wlock()
506 l = self.lock()
508 l = self.lock()
507 if os.path.exists(self.sjoin("undo")):
509 if os.path.exists(self.sjoin("undo")):
508 self.ui.status(_("rolling back last transaction\n"))
510 self.ui.status(_("rolling back last transaction\n"))
509 transaction.rollback(self.sopener, self.sjoin("undo"))
511 transaction.rollback(self.sopener, self.sjoin("undo"))
510 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
512 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
511 self.reload()
513 self.reload()
512 self.wreload()
514 self.wreload()
513 else:
515 else:
514 self.ui.warn(_("no rollback information available\n"))
516 self.ui.warn(_("no rollback information available\n"))
515
517
516 def wreload(self):
518 def wreload(self):
517 self.dirstate.read()
519 self.dirstate.read()
518
520
519 def reload(self):
521 def reload(self):
520 self.changelog.load()
522 self.changelog.load()
521 self.manifest.load()
523 self.manifest.load()
522 self.tagscache = None
524 self.tagscache = None
523 self.nodetagscache = None
525 self.nodetagscache = None
524
526
525 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
527 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
526 desc=None):
528 desc=None):
527 try:
529 try:
528 l = lock.lock(lockname, 0, releasefn, desc=desc)
530 l = lock.lock(lockname, 0, releasefn, desc=desc)
529 except lock.LockHeld, inst:
531 except lock.LockHeld, inst:
530 if not wait:
532 if not wait:
531 raise
533 raise
532 self.ui.warn(_("waiting for lock on %s held by %r\n") %
534 self.ui.warn(_("waiting for lock on %s held by %r\n") %
533 (desc, inst.locker))
535 (desc, inst.locker))
534 # default to 600 seconds timeout
536 # default to 600 seconds timeout
535 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
537 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
536 releasefn, desc=desc)
538 releasefn, desc=desc)
537 if acquirefn:
539 if acquirefn:
538 acquirefn()
540 acquirefn()
539 return l
541 return l
540
542
541 def lock(self, wait=1):
543 def lock(self, wait=1):
542 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
544 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
543 desc=_('repository %s') % self.origroot)
545 desc=_('repository %s') % self.origroot)
544
546
545 def wlock(self, wait=1):
547 def wlock(self, wait=1):
546 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
548 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
547 self.wreload,
549 self.wreload,
548 desc=_('working directory of %s') % self.origroot)
550 desc=_('working directory of %s') % self.origroot)
549
551
550 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
552 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
551 """
553 """
552 commit an individual file as part of a larger transaction
554 commit an individual file as part of a larger transaction
553 """
555 """
554
556
555 t = self.wread(fn)
557 t = self.wread(fn)
556 fl = self.file(fn)
558 fl = self.file(fn)
557 fp1 = manifest1.get(fn, nullid)
559 fp1 = manifest1.get(fn, nullid)
558 fp2 = manifest2.get(fn, nullid)
560 fp2 = manifest2.get(fn, nullid)
559
561
560 meta = {}
562 meta = {}
561 cp = self.dirstate.copied(fn)
563 cp = self.dirstate.copied(fn)
562 if cp:
564 if cp:
563 meta["copy"] = cp
565 meta["copy"] = cp
564 if not manifest2: # not a branch merge
566 if not manifest2: # not a branch merge
565 meta["copyrev"] = hex(manifest1.get(cp, nullid))
567 meta["copyrev"] = hex(manifest1.get(cp, nullid))
566 fp2 = nullid
568 fp2 = nullid
567 elif fp2 != nullid: # copied on remote side
569 elif fp2 != nullid: # copied on remote side
568 meta["copyrev"] = hex(manifest1.get(cp, nullid))
570 meta["copyrev"] = hex(manifest1.get(cp, nullid))
569 elif fp1 != nullid: # copied on local side, reversed
571 elif fp1 != nullid: # copied on local side, reversed
570 meta["copyrev"] = hex(manifest2.get(cp))
572 meta["copyrev"] = hex(manifest2.get(cp))
571 fp2 = nullid
573 fp2 = nullid
572 else: # directory rename
574 else: # directory rename
573 meta["copyrev"] = hex(manifest1.get(cp, nullid))
575 meta["copyrev"] = hex(manifest1.get(cp, nullid))
574 self.ui.debug(_(" %s: copy %s:%s\n") %
576 self.ui.debug(_(" %s: copy %s:%s\n") %
575 (fn, cp, meta["copyrev"]))
577 (fn, cp, meta["copyrev"]))
576 fp1 = nullid
578 fp1 = nullid
577 elif fp2 != nullid:
579 elif fp2 != nullid:
578 # is one parent an ancestor of the other?
580 # is one parent an ancestor of the other?
579 fpa = fl.ancestor(fp1, fp2)
581 fpa = fl.ancestor(fp1, fp2)
580 if fpa == fp1:
582 if fpa == fp1:
581 fp1, fp2 = fp2, nullid
583 fp1, fp2 = fp2, nullid
582 elif fpa == fp2:
584 elif fpa == fp2:
583 fp2 = nullid
585 fp2 = nullid
584
586
585 # is the file unmodified from the parent? report existing entry
587 # is the file unmodified from the parent? report existing entry
586 if fp2 == nullid and not fl.cmp(fp1, t):
588 if fp2 == nullid and not fl.cmp(fp1, t):
587 return fp1
589 return fp1
588
590
589 changelist.append(fn)
591 changelist.append(fn)
590 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
592 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
591
593
592 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
594 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
593 if p1 is None:
595 if p1 is None:
594 p1, p2 = self.dirstate.parents()
596 p1, p2 = self.dirstate.parents()
595 return self.commit(files=files, text=text, user=user, date=date,
597 return self.commit(files=files, text=text, user=user, date=date,
596 p1=p1, p2=p2, wlock=wlock)
598 p1=p1, p2=p2, wlock=wlock)
597
599
598 def commit(self, files=None, text="", user=None, date=None,
600 def commit(self, files=None, text="", user=None, date=None,
599 match=util.always, force=False, lock=None, wlock=None,
601 match=util.always, force=False, lock=None, wlock=None,
600 force_editor=False, p1=None, p2=None, extra={}):
602 force_editor=False, p1=None, p2=None, extra={}):
601
603
602 commit = []
604 commit = []
603 remove = []
605 remove = []
604 changed = []
606 changed = []
605 use_dirstate = (p1 is None) # not rawcommit
607 use_dirstate = (p1 is None) # not rawcommit
606 extra = extra.copy()
608 extra = extra.copy()
607
609
608 if use_dirstate:
610 if use_dirstate:
609 if files:
611 if files:
610 for f in files:
612 for f in files:
611 s = self.dirstate.state(f)
613 s = self.dirstate.state(f)
612 if s in 'nmai':
614 if s in 'nmai':
613 commit.append(f)
615 commit.append(f)
614 elif s == 'r':
616 elif s == 'r':
615 remove.append(f)
617 remove.append(f)
616 else:
618 else:
617 self.ui.warn(_("%s not tracked!\n") % f)
619 self.ui.warn(_("%s not tracked!\n") % f)
618 else:
620 else:
619 changes = self.status(match=match)[:5]
621 changes = self.status(match=match)[:5]
620 modified, added, removed, deleted, unknown = changes
622 modified, added, removed, deleted, unknown = changes
621 commit = modified + added
623 commit = modified + added
622 remove = removed
624 remove = removed
623 else:
625 else:
624 commit = files
626 commit = files
625
627
626 if use_dirstate:
628 if use_dirstate:
627 p1, p2 = self.dirstate.parents()
629 p1, p2 = self.dirstate.parents()
628 update_dirstate = True
630 update_dirstate = True
629 else:
631 else:
630 p1, p2 = p1, p2 or nullid
632 p1, p2 = p1, p2 or nullid
631 update_dirstate = (self.dirstate.parents()[0] == p1)
633 update_dirstate = (self.dirstate.parents()[0] == p1)
632
634
633 c1 = self.changelog.read(p1)
635 c1 = self.changelog.read(p1)
634 c2 = self.changelog.read(p2)
636 c2 = self.changelog.read(p2)
635 m1 = self.manifest.read(c1[0]).copy()
637 m1 = self.manifest.read(c1[0]).copy()
636 m2 = self.manifest.read(c2[0])
638 m2 = self.manifest.read(c2[0])
637
639
638 if use_dirstate:
640 if use_dirstate:
639 branchname = util.fromlocal(self.workingctx().branch())
641 branchname = util.fromlocal(self.workingctx().branch())
640 else:
642 else:
641 branchname = ""
643 branchname = ""
642
644
643 if use_dirstate:
645 if use_dirstate:
644 oldname = c1[5].get("branch", "") # stored in UTF-8
646 oldname = c1[5].get("branch", "") # stored in UTF-8
645 if not commit and not remove and not force and p2 == nullid and \
647 if not commit and not remove and not force and p2 == nullid and \
646 branchname == oldname:
648 branchname == oldname:
647 self.ui.status(_("nothing changed\n"))
649 self.ui.status(_("nothing changed\n"))
648 return None
650 return None
649
651
650 xp1 = hex(p1)
652 xp1 = hex(p1)
651 if p2 == nullid: xp2 = ''
653 if p2 == nullid: xp2 = ''
652 else: xp2 = hex(p2)
654 else: xp2 = hex(p2)
653
655
654 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
656 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
655
657
656 if not wlock:
658 if not wlock:
657 wlock = self.wlock()
659 wlock = self.wlock()
658 if not lock:
660 if not lock:
659 lock = self.lock()
661 lock = self.lock()
660 tr = self.transaction()
662 tr = self.transaction()
661
663
662 # check in files
664 # check in files
663 new = {}
665 new = {}
664 linkrev = self.changelog.count()
666 linkrev = self.changelog.count()
665 commit.sort()
667 commit.sort()
666 for f in commit:
668 for f in commit:
667 self.ui.note(f + "\n")
669 self.ui.note(f + "\n")
668 try:
670 try:
669 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
671 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
670 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
672 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
671 except IOError:
673 except IOError:
672 if use_dirstate:
674 if use_dirstate:
673 self.ui.warn(_("trouble committing %s!\n") % f)
675 self.ui.warn(_("trouble committing %s!\n") % f)
674 raise
676 raise
675 else:
677 else:
676 remove.append(f)
678 remove.append(f)
677
679
678 # update manifest
680 # update manifest
679 m1.update(new)
681 m1.update(new)
680 remove.sort()
682 remove.sort()
681
683
682 for f in remove:
684 for f in remove:
683 if f in m1:
685 if f in m1:
684 del m1[f]
686 del m1[f]
685 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
687 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
686
688
687 # add changeset
689 # add changeset
688 new = new.keys()
690 new = new.keys()
689 new.sort()
691 new.sort()
690
692
691 user = user or self.ui.username()
693 user = user or self.ui.username()
692 if not text or force_editor:
694 if not text or force_editor:
693 edittext = []
695 edittext = []
694 if text:
696 if text:
695 edittext.append(text)
697 edittext.append(text)
696 edittext.append("")
698 edittext.append("")
697 edittext.append("HG: user: %s" % user)
699 edittext.append("HG: user: %s" % user)
698 if p2 != nullid:
700 if p2 != nullid:
699 edittext.append("HG: branch merge")
701 edittext.append("HG: branch merge")
700 edittext.extend(["HG: changed %s" % f for f in changed])
702 edittext.extend(["HG: changed %s" % f for f in changed])
701 edittext.extend(["HG: removed %s" % f for f in remove])
703 edittext.extend(["HG: removed %s" % f for f in remove])
702 if not changed and not remove:
704 if not changed and not remove:
703 edittext.append("HG: no files changed")
705 edittext.append("HG: no files changed")
704 edittext.append("")
706 edittext.append("")
705 # run editor in the repository root
707 # run editor in the repository root
706 olddir = os.getcwd()
708 olddir = os.getcwd()
707 os.chdir(self.root)
709 os.chdir(self.root)
708 text = self.ui.edit("\n".join(edittext), user)
710 text = self.ui.edit("\n".join(edittext), user)
709 os.chdir(olddir)
711 os.chdir(olddir)
710
712
711 lines = [line.rstrip() for line in text.rstrip().splitlines()]
713 lines = [line.rstrip() for line in text.rstrip().splitlines()]
712 while lines and not lines[0]:
714 while lines and not lines[0]:
713 del lines[0]
715 del lines[0]
714 if not lines:
716 if not lines:
715 return None
717 return None
716 text = '\n'.join(lines)
718 text = '\n'.join(lines)
717 if branchname:
719 if branchname:
718 extra["branch"] = branchname
720 extra["branch"] = branchname
719 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
721 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
720 user, date, extra)
722 user, date, extra)
721 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
723 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
722 parent2=xp2)
724 parent2=xp2)
723 tr.close()
725 tr.close()
724
726
725 if use_dirstate or update_dirstate:
727 if use_dirstate or update_dirstate:
726 self.dirstate.setparents(n)
728 self.dirstate.setparents(n)
727 if use_dirstate:
729 if use_dirstate:
728 self.dirstate.update(new, "n")
730 self.dirstate.update(new, "n")
729 self.dirstate.forget(remove)
731 self.dirstate.forget(remove)
730
732
731 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
733 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
732 return n
734 return n
733
735
734 def walk(self, node=None, files=[], match=util.always, badmatch=None):
736 def walk(self, node=None, files=[], match=util.always, badmatch=None):
735 '''
737 '''
736 walk recursively through the directory tree or a given
738 walk recursively through the directory tree or a given
737 changeset, finding all files matched by the match
739 changeset, finding all files matched by the match
738 function
740 function
739
741
740 results are yielded in a tuple (src, filename), where src
742 results are yielded in a tuple (src, filename), where src
741 is one of:
743 is one of:
742 'f' the file was found in the directory tree
744 'f' the file was found in the directory tree
743 'm' the file was only in the dirstate and not in the tree
745 'm' the file was only in the dirstate and not in the tree
744 'b' file was not found and matched badmatch
746 'b' file was not found and matched badmatch
745 '''
747 '''
746
748
747 if node:
749 if node:
748 fdict = dict.fromkeys(files)
750 fdict = dict.fromkeys(files)
749 for fn in self.manifest.read(self.changelog.read(node)[0]):
751 for fn in self.manifest.read(self.changelog.read(node)[0]):
750 for ffn in fdict:
752 for ffn in fdict:
751 # match if the file is the exact name or a directory
753 # match if the file is the exact name or a directory
752 if ffn == fn or fn.startswith("%s/" % ffn):
754 if ffn == fn or fn.startswith("%s/" % ffn):
753 del fdict[ffn]
755 del fdict[ffn]
754 break
756 break
755 if match(fn):
757 if match(fn):
756 yield 'm', fn
758 yield 'm', fn
757 for fn in fdict:
759 for fn in fdict:
758 if badmatch and badmatch(fn):
760 if badmatch and badmatch(fn):
759 if match(fn):
761 if match(fn):
760 yield 'b', fn
762 yield 'b', fn
761 else:
763 else:
762 self.ui.warn(_('%s: No such file in rev %s\n') % (
764 self.ui.warn(_('%s: No such file in rev %s\n') % (
763 util.pathto(self.getcwd(), fn), short(node)))
765 util.pathto(self.getcwd(), fn), short(node)))
764 else:
766 else:
765 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
767 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
766 yield src, fn
768 yield src, fn
767
769
768 def status(self, node1=None, node2=None, files=[], match=util.always,
770 def status(self, node1=None, node2=None, files=[], match=util.always,
769 wlock=None, list_ignored=False, list_clean=False):
771 wlock=None, list_ignored=False, list_clean=False):
770 """return status of files between two nodes or node and working directory
772 """return status of files between two nodes or node and working directory
771
773
772 If node1 is None, use the first dirstate parent instead.
774 If node1 is None, use the first dirstate parent instead.
773 If node2 is None, compare node1 with working directory.
775 If node2 is None, compare node1 with working directory.
774 """
776 """
775
777
776 def fcmp(fn, mf):
778 def fcmp(fn, mf):
777 t1 = self.wread(fn)
779 t1 = self.wread(fn)
778 return self.file(fn).cmp(mf.get(fn, nullid), t1)
780 return self.file(fn).cmp(mf.get(fn, nullid), t1)
779
781
780 def mfmatches(node):
782 def mfmatches(node):
781 change = self.changelog.read(node)
783 change = self.changelog.read(node)
782 mf = self.manifest.read(change[0]).copy()
784 mf = self.manifest.read(change[0]).copy()
783 for fn in mf.keys():
785 for fn in mf.keys():
784 if not match(fn):
786 if not match(fn):
785 del mf[fn]
787 del mf[fn]
786 return mf
788 return mf
787
789
788 modified, added, removed, deleted, unknown = [], [], [], [], []
790 modified, added, removed, deleted, unknown = [], [], [], [], []
789 ignored, clean = [], []
791 ignored, clean = [], []
790
792
791 compareworking = False
793 compareworking = False
792 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
794 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
793 compareworking = True
795 compareworking = True
794
796
795 if not compareworking:
797 if not compareworking:
796 # read the manifest from node1 before the manifest from node2,
798 # read the manifest from node1 before the manifest from node2,
797 # so that we'll hit the manifest cache if we're going through
799 # so that we'll hit the manifest cache if we're going through
798 # all the revisions in parent->child order.
800 # all the revisions in parent->child order.
799 mf1 = mfmatches(node1)
801 mf1 = mfmatches(node1)
800
802
801 # are we comparing the working directory?
803 # are we comparing the working directory?
802 if not node2:
804 if not node2:
803 if not wlock:
805 if not wlock:
804 try:
806 try:
805 wlock = self.wlock(wait=0)
807 wlock = self.wlock(wait=0)
806 except lock.LockException:
808 except lock.LockException:
807 wlock = None
809 wlock = None
808 (lookup, modified, added, removed, deleted, unknown,
810 (lookup, modified, added, removed, deleted, unknown,
809 ignored, clean) = self.dirstate.status(files, match,
811 ignored, clean) = self.dirstate.status(files, match,
810 list_ignored, list_clean)
812 list_ignored, list_clean)
811
813
812 # are we comparing working dir against its parent?
814 # are we comparing working dir against its parent?
813 if compareworking:
815 if compareworking:
814 if lookup:
816 if lookup:
815 # do a full compare of any files that might have changed
817 # do a full compare of any files that might have changed
816 mf2 = mfmatches(self.dirstate.parents()[0])
818 mf2 = mfmatches(self.dirstate.parents()[0])
817 for f in lookup:
819 for f in lookup:
818 if fcmp(f, mf2):
820 if fcmp(f, mf2):
819 modified.append(f)
821 modified.append(f)
820 else:
822 else:
821 clean.append(f)
823 clean.append(f)
822 if wlock is not None:
824 if wlock is not None:
823 self.dirstate.update([f], "n")
825 self.dirstate.update([f], "n")
824 else:
826 else:
825 # we are comparing working dir against non-parent
827 # we are comparing working dir against non-parent
826 # generate a pseudo-manifest for the working dir
828 # generate a pseudo-manifest for the working dir
827 # XXX: create it in dirstate.py ?
829 # XXX: create it in dirstate.py ?
828 mf2 = mfmatches(self.dirstate.parents()[0])
830 mf2 = mfmatches(self.dirstate.parents()[0])
829 for f in lookup + modified + added:
831 for f in lookup + modified + added:
830 mf2[f] = ""
832 mf2[f] = ""
831 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
833 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
832 for f in removed:
834 for f in removed:
833 if f in mf2:
835 if f in mf2:
834 del mf2[f]
836 del mf2[f]
835 else:
837 else:
836 # we are comparing two revisions
838 # we are comparing two revisions
837 mf2 = mfmatches(node2)
839 mf2 = mfmatches(node2)
838
840
839 if not compareworking:
841 if not compareworking:
840 # flush lists from dirstate before comparing manifests
842 # flush lists from dirstate before comparing manifests
841 modified, added, clean = [], [], []
843 modified, added, clean = [], [], []
842
844
843 # make sure to sort the files so we talk to the disk in a
845 # make sure to sort the files so we talk to the disk in a
844 # reasonable order
846 # reasonable order
845 mf2keys = mf2.keys()
847 mf2keys = mf2.keys()
846 mf2keys.sort()
848 mf2keys.sort()
847 for fn in mf2keys:
849 for fn in mf2keys:
848 if mf1.has_key(fn):
850 if mf1.has_key(fn):
849 if mf1.flags(fn) != mf2.flags(fn) or \
851 if mf1.flags(fn) != mf2.flags(fn) or \
850 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
852 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
851 modified.append(fn)
853 modified.append(fn)
852 elif list_clean:
854 elif list_clean:
853 clean.append(fn)
855 clean.append(fn)
854 del mf1[fn]
856 del mf1[fn]
855 else:
857 else:
856 added.append(fn)
858 added.append(fn)
857
859
858 removed = mf1.keys()
860 removed = mf1.keys()
859
861
860 # sort and return results:
862 # sort and return results:
861 for l in modified, added, removed, deleted, unknown, ignored, clean:
863 for l in modified, added, removed, deleted, unknown, ignored, clean:
862 l.sort()
864 l.sort()
863 return (modified, added, removed, deleted, unknown, ignored, clean)
865 return (modified, added, removed, deleted, unknown, ignored, clean)
864
866
865 def add(self, list, wlock=None):
867 def add(self, list, wlock=None):
866 if not wlock:
868 if not wlock:
867 wlock = self.wlock()
869 wlock = self.wlock()
868 for f in list:
870 for f in list:
869 p = self.wjoin(f)
871 p = self.wjoin(f)
870 if not os.path.exists(p):
872 if not os.path.exists(p):
871 self.ui.warn(_("%s does not exist!\n") % f)
873 self.ui.warn(_("%s does not exist!\n") % f)
872 elif not os.path.isfile(p):
874 elif not os.path.isfile(p):
873 self.ui.warn(_("%s not added: only files supported currently\n")
875 self.ui.warn(_("%s not added: only files supported currently\n")
874 % f)
876 % f)
875 elif self.dirstate.state(f) in 'an':
877 elif self.dirstate.state(f) in 'an':
876 self.ui.warn(_("%s already tracked!\n") % f)
878 self.ui.warn(_("%s already tracked!\n") % f)
877 else:
879 else:
878 self.dirstate.update([f], "a")
880 self.dirstate.update([f], "a")
879
881
880 def forget(self, list, wlock=None):
882 def forget(self, list, wlock=None):
881 if not wlock:
883 if not wlock:
882 wlock = self.wlock()
884 wlock = self.wlock()
883 for f in list:
885 for f in list:
884 if self.dirstate.state(f) not in 'ai':
886 if self.dirstate.state(f) not in 'ai':
885 self.ui.warn(_("%s not added!\n") % f)
887 self.ui.warn(_("%s not added!\n") % f)
886 else:
888 else:
887 self.dirstate.forget([f])
889 self.dirstate.forget([f])
888
890
889 def remove(self, list, unlink=False, wlock=None):
891 def remove(self, list, unlink=False, wlock=None):
890 if unlink:
892 if unlink:
891 for f in list:
893 for f in list:
892 try:
894 try:
893 util.unlink(self.wjoin(f))
895 util.unlink(self.wjoin(f))
894 except OSError, inst:
896 except OSError, inst:
895 if inst.errno != errno.ENOENT:
897 if inst.errno != errno.ENOENT:
896 raise
898 raise
897 if not wlock:
899 if not wlock:
898 wlock = self.wlock()
900 wlock = self.wlock()
899 for f in list:
901 for f in list:
900 p = self.wjoin(f)
902 p = self.wjoin(f)
901 if os.path.exists(p):
903 if os.path.exists(p):
902 self.ui.warn(_("%s still exists!\n") % f)
904 self.ui.warn(_("%s still exists!\n") % f)
903 elif self.dirstate.state(f) == 'a':
905 elif self.dirstate.state(f) == 'a':
904 self.dirstate.forget([f])
906 self.dirstate.forget([f])
905 elif f not in self.dirstate:
907 elif f not in self.dirstate:
906 self.ui.warn(_("%s not tracked!\n") % f)
908 self.ui.warn(_("%s not tracked!\n") % f)
907 else:
909 else:
908 self.dirstate.update([f], "r")
910 self.dirstate.update([f], "r")
909
911
910 def undelete(self, list, wlock=None):
912 def undelete(self, list, wlock=None):
911 p = self.dirstate.parents()[0]
913 p = self.dirstate.parents()[0]
912 mn = self.changelog.read(p)[0]
914 mn = self.changelog.read(p)[0]
913 m = self.manifest.read(mn)
915 m = self.manifest.read(mn)
914 if not wlock:
916 if not wlock:
915 wlock = self.wlock()
917 wlock = self.wlock()
916 for f in list:
918 for f in list:
917 if self.dirstate.state(f) not in "r":
919 if self.dirstate.state(f) not in "r":
918 self.ui.warn("%s not removed!\n" % f)
920 self.ui.warn("%s not removed!\n" % f)
919 else:
921 else:
920 t = self.file(f).read(m[f])
922 t = self.file(f).read(m[f])
921 self.wwrite(f, t)
923 self.wwrite(f, t)
922 util.set_exec(self.wjoin(f), m.execf(f))
924 util.set_exec(self.wjoin(f), m.execf(f))
923 self.dirstate.update([f], "n")
925 self.dirstate.update([f], "n")
924
926
925 def copy(self, source, dest, wlock=None):
927 def copy(self, source, dest, wlock=None):
926 p = self.wjoin(dest)
928 p = self.wjoin(dest)
927 if not os.path.exists(p):
929 if not os.path.exists(p):
928 self.ui.warn(_("%s does not exist!\n") % dest)
930 self.ui.warn(_("%s does not exist!\n") % dest)
929 elif not os.path.isfile(p):
931 elif not os.path.isfile(p):
930 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
932 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
931 else:
933 else:
932 if not wlock:
934 if not wlock:
933 wlock = self.wlock()
935 wlock = self.wlock()
934 if self.dirstate.state(dest) == '?':
936 if self.dirstate.state(dest) == '?':
935 self.dirstate.update([dest], "a")
937 self.dirstate.update([dest], "a")
936 self.dirstate.copy(source, dest)
938 self.dirstate.copy(source, dest)
937
939
938 def heads(self, start=None):
940 def heads(self, start=None):
939 heads = self.changelog.heads(start)
941 heads = self.changelog.heads(start)
940 # sort the output in rev descending order
942 # sort the output in rev descending order
941 heads = [(-self.changelog.rev(h), h) for h in heads]
943 heads = [(-self.changelog.rev(h), h) for h in heads]
942 heads.sort()
944 heads.sort()
943 return [n for (r, n) in heads]
945 return [n for (r, n) in heads]
944
946
945 # branchlookup returns a dict giving a list of branches for
947 # branchlookup returns a dict giving a list of branches for
946 # each head. A branch is defined as the tag of a node or
948 # each head. A branch is defined as the tag of a node or
947 # the branch of the node's parents. If a node has multiple
949 # the branch of the node's parents. If a node has multiple
948 # branch tags, tags are eliminated if they are visible from other
950 # branch tags, tags are eliminated if they are visible from other
949 # branch tags.
951 # branch tags.
950 #
952 #
951 # So, for this graph: a->b->c->d->e
953 # So, for this graph: a->b->c->d->e
952 # \ /
954 # \ /
953 # aa -----/
955 # aa -----/
954 # a has tag 2.6.12
956 # a has tag 2.6.12
955 # d has tag 2.6.13
957 # d has tag 2.6.13
956 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
958 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
957 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
959 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
958 # from the list.
960 # from the list.
959 #
961 #
960 # It is possible that more than one head will have the same branch tag.
962 # It is possible that more than one head will have the same branch tag.
961 # callers need to check the result for multiple heads under the same
963 # callers need to check the result for multiple heads under the same
962 # branch tag if that is a problem for them (ie checkout of a specific
964 # branch tag if that is a problem for them (ie checkout of a specific
963 # branch).
965 # branch).
964 #
966 #
965 # passing in a specific branch will limit the depth of the search
967 # passing in a specific branch will limit the depth of the search
966 # through the parents. It won't limit the branches returned in the
968 # through the parents. It won't limit the branches returned in the
967 # result though.
969 # result though.
968 def branchlookup(self, heads=None, branch=None):
970 def branchlookup(self, heads=None, branch=None):
969 if not heads:
971 if not heads:
970 heads = self.heads()
972 heads = self.heads()
971 headt = [ h for h in heads ]
973 headt = [ h for h in heads ]
972 chlog = self.changelog
974 chlog = self.changelog
973 branches = {}
975 branches = {}
974 merges = []
976 merges = []
975 seenmerge = {}
977 seenmerge = {}
976
978
977 # traverse the tree once for each head, recording in the branches
979 # traverse the tree once for each head, recording in the branches
978 # dict which tags are visible from this head. The branches
980 # dict which tags are visible from this head. The branches
979 # dict also records which tags are visible from each tag
981 # dict also records which tags are visible from each tag
980 # while we traverse.
982 # while we traverse.
981 while headt or merges:
983 while headt or merges:
982 if merges:
984 if merges:
983 n, found = merges.pop()
985 n, found = merges.pop()
984 visit = [n]
986 visit = [n]
985 else:
987 else:
986 h = headt.pop()
988 h = headt.pop()
987 visit = [h]
989 visit = [h]
988 found = [h]
990 found = [h]
989 seen = {}
991 seen = {}
990 while visit:
992 while visit:
991 n = visit.pop()
993 n = visit.pop()
992 if n in seen:
994 if n in seen:
993 continue
995 continue
994 pp = chlog.parents(n)
996 pp = chlog.parents(n)
995 tags = self.nodetags(n)
997 tags = self.nodetags(n)
996 if tags:
998 if tags:
997 for x in tags:
999 for x in tags:
998 if x == 'tip':
1000 if x == 'tip':
999 continue
1001 continue
1000 for f in found:
1002 for f in found:
1001 branches.setdefault(f, {})[n] = 1
1003 branches.setdefault(f, {})[n] = 1
1002 branches.setdefault(n, {})[n] = 1
1004 branches.setdefault(n, {})[n] = 1
1003 break
1005 break
1004 if n not in found:
1006 if n not in found:
1005 found.append(n)
1007 found.append(n)
1006 if branch in tags:
1008 if branch in tags:
1007 continue
1009 continue
1008 seen[n] = 1
1010 seen[n] = 1
1009 if pp[1] != nullid and n not in seenmerge:
1011 if pp[1] != nullid and n not in seenmerge:
1010 merges.append((pp[1], [x for x in found]))
1012 merges.append((pp[1], [x for x in found]))
1011 seenmerge[n] = 1
1013 seenmerge[n] = 1
1012 if pp[0] != nullid:
1014 if pp[0] != nullid:
1013 visit.append(pp[0])
1015 visit.append(pp[0])
1014 # traverse the branches dict, eliminating branch tags from each
1016 # traverse the branches dict, eliminating branch tags from each
1015 # head that are visible from another branch tag for that head.
1017 # head that are visible from another branch tag for that head.
1016 out = {}
1018 out = {}
1017 viscache = {}
1019 viscache = {}
1018 for h in heads:
1020 for h in heads:
1019 def visible(node):
1021 def visible(node):
1020 if node in viscache:
1022 if node in viscache:
1021 return viscache[node]
1023 return viscache[node]
1022 ret = {}
1024 ret = {}
1023 visit = [node]
1025 visit = [node]
1024 while visit:
1026 while visit:
1025 x = visit.pop()
1027 x = visit.pop()
1026 if x in viscache:
1028 if x in viscache:
1027 ret.update(viscache[x])
1029 ret.update(viscache[x])
1028 elif x not in ret:
1030 elif x not in ret:
1029 ret[x] = 1
1031 ret[x] = 1
1030 if x in branches:
1032 if x in branches:
1031 visit[len(visit):] = branches[x].keys()
1033 visit[len(visit):] = branches[x].keys()
1032 viscache[node] = ret
1034 viscache[node] = ret
1033 return ret
1035 return ret
1034 if h not in branches:
1036 if h not in branches:
1035 continue
1037 continue
1036 # O(n^2), but somewhat limited. This only searches the
1038 # O(n^2), but somewhat limited. This only searches the
1037 # tags visible from a specific head, not all the tags in the
1039 # tags visible from a specific head, not all the tags in the
1038 # whole repo.
1040 # whole repo.
1039 for b in branches[h]:
1041 for b in branches[h]:
1040 vis = False
1042 vis = False
1041 for bb in branches[h].keys():
1043 for bb in branches[h].keys():
1042 if b != bb:
1044 if b != bb:
1043 if b in visible(bb):
1045 if b in visible(bb):
1044 vis = True
1046 vis = True
1045 break
1047 break
1046 if not vis:
1048 if not vis:
1047 l = out.setdefault(h, [])
1049 l = out.setdefault(h, [])
1048 l[len(l):] = self.nodetags(b)
1050 l[len(l):] = self.nodetags(b)
1049 return out
1051 return out
1050
1052
1051 def branches(self, nodes):
1053 def branches(self, nodes):
1052 if not nodes:
1054 if not nodes:
1053 nodes = [self.changelog.tip()]
1055 nodes = [self.changelog.tip()]
1054 b = []
1056 b = []
1055 for n in nodes:
1057 for n in nodes:
1056 t = n
1058 t = n
1057 while 1:
1059 while 1:
1058 p = self.changelog.parents(n)
1060 p = self.changelog.parents(n)
1059 if p[1] != nullid or p[0] == nullid:
1061 if p[1] != nullid or p[0] == nullid:
1060 b.append((t, n, p[0], p[1]))
1062 b.append((t, n, p[0], p[1]))
1061 break
1063 break
1062 n = p[0]
1064 n = p[0]
1063 return b
1065 return b
1064
1066
1065 def between(self, pairs):
1067 def between(self, pairs):
1066 r = []
1068 r = []
1067
1069
1068 for top, bottom in pairs:
1070 for top, bottom in pairs:
1069 n, l, i = top, [], 0
1071 n, l, i = top, [], 0
1070 f = 1
1072 f = 1
1071
1073
1072 while n != bottom:
1074 while n != bottom:
1073 p = self.changelog.parents(n)[0]
1075 p = self.changelog.parents(n)[0]
1074 if i == f:
1076 if i == f:
1075 l.append(n)
1077 l.append(n)
1076 f = f * 2
1078 f = f * 2
1077 n = p
1079 n = p
1078 i += 1
1080 i += 1
1079
1081
1080 r.append(l)
1082 r.append(l)
1081
1083
1082 return r
1084 return r
1083
1085
1084 def findincoming(self, remote, base=None, heads=None, force=False):
1086 def findincoming(self, remote, base=None, heads=None, force=False):
1085 """Return list of roots of the subsets of missing nodes from remote
1087 """Return list of roots of the subsets of missing nodes from remote
1086
1088
1087 If base dict is specified, assume that these nodes and their parents
1089 If base dict is specified, assume that these nodes and their parents
1088 exist on the remote side and that no child of a node of base exists
1090 exist on the remote side and that no child of a node of base exists
1089 in both remote and self.
1091 in both remote and self.
1090 Furthermore base will be updated to include the nodes that exists
1092 Furthermore base will be updated to include the nodes that exists
1091 in self and remote but no children exists in self and remote.
1093 in self and remote but no children exists in self and remote.
1092 If a list of heads is specified, return only nodes which are heads
1094 If a list of heads is specified, return only nodes which are heads
1093 or ancestors of these heads.
1095 or ancestors of these heads.
1094
1096
1095 All the ancestors of base are in self and in remote.
1097 All the ancestors of base are in self and in remote.
1096 All the descendants of the list returned are missing in self.
1098 All the descendants of the list returned are missing in self.
1097 (and so we know that the rest of the nodes are missing in remote, see
1099 (and so we know that the rest of the nodes are missing in remote, see
1098 outgoing)
1100 outgoing)
1099 """
1101 """
1100 m = self.changelog.nodemap
1102 m = self.changelog.nodemap
1101 search = []
1103 search = []
1102 fetch = {}
1104 fetch = {}
1103 seen = {}
1105 seen = {}
1104 seenbranch = {}
1106 seenbranch = {}
1105 if base == None:
1107 if base == None:
1106 base = {}
1108 base = {}
1107
1109
1108 if not heads:
1110 if not heads:
1109 heads = remote.heads()
1111 heads = remote.heads()
1110
1112
1111 if self.changelog.tip() == nullid:
1113 if self.changelog.tip() == nullid:
1112 base[nullid] = 1
1114 base[nullid] = 1
1113 if heads != [nullid]:
1115 if heads != [nullid]:
1114 return [nullid]
1116 return [nullid]
1115 return []
1117 return []
1116
1118
1117 # assume we're closer to the tip than the root
1119 # assume we're closer to the tip than the root
1118 # and start by examining the heads
1120 # and start by examining the heads
1119 self.ui.status(_("searching for changes\n"))
1121 self.ui.status(_("searching for changes\n"))
1120
1122
1121 unknown = []
1123 unknown = []
1122 for h in heads:
1124 for h in heads:
1123 if h not in m:
1125 if h not in m:
1124 unknown.append(h)
1126 unknown.append(h)
1125 else:
1127 else:
1126 base[h] = 1
1128 base[h] = 1
1127
1129
1128 if not unknown:
1130 if not unknown:
1129 return []
1131 return []
1130
1132
1131 req = dict.fromkeys(unknown)
1133 req = dict.fromkeys(unknown)
1132 reqcnt = 0
1134 reqcnt = 0
1133
1135
1134 # search through remote branches
1136 # search through remote branches
1135 # a 'branch' here is a linear segment of history, with four parts:
1137 # a 'branch' here is a linear segment of history, with four parts:
1136 # head, root, first parent, second parent
1138 # head, root, first parent, second parent
1137 # (a branch always has two parents (or none) by definition)
1139 # (a branch always has two parents (or none) by definition)
1138 unknown = remote.branches(unknown)
1140 unknown = remote.branches(unknown)
1139 while unknown:
1141 while unknown:
1140 r = []
1142 r = []
1141 while unknown:
1143 while unknown:
1142 n = unknown.pop(0)
1144 n = unknown.pop(0)
1143 if n[0] in seen:
1145 if n[0] in seen:
1144 continue
1146 continue
1145
1147
1146 self.ui.debug(_("examining %s:%s\n")
1148 self.ui.debug(_("examining %s:%s\n")
1147 % (short(n[0]), short(n[1])))
1149 % (short(n[0]), short(n[1])))
1148 if n[0] == nullid: # found the end of the branch
1150 if n[0] == nullid: # found the end of the branch
1149 pass
1151 pass
1150 elif n in seenbranch:
1152 elif n in seenbranch:
1151 self.ui.debug(_("branch already found\n"))
1153 self.ui.debug(_("branch already found\n"))
1152 continue
1154 continue
1153 elif n[1] and n[1] in m: # do we know the base?
1155 elif n[1] and n[1] in m: # do we know the base?
1154 self.ui.debug(_("found incomplete branch %s:%s\n")
1156 self.ui.debug(_("found incomplete branch %s:%s\n")
1155 % (short(n[0]), short(n[1])))
1157 % (short(n[0]), short(n[1])))
1156 search.append(n) # schedule branch range for scanning
1158 search.append(n) # schedule branch range for scanning
1157 seenbranch[n] = 1
1159 seenbranch[n] = 1
1158 else:
1160 else:
1159 if n[1] not in seen and n[1] not in fetch:
1161 if n[1] not in seen and n[1] not in fetch:
1160 if n[2] in m and n[3] in m:
1162 if n[2] in m and n[3] in m:
1161 self.ui.debug(_("found new changeset %s\n") %
1163 self.ui.debug(_("found new changeset %s\n") %
1162 short(n[1]))
1164 short(n[1]))
1163 fetch[n[1]] = 1 # earliest unknown
1165 fetch[n[1]] = 1 # earliest unknown
1164 for p in n[2:4]:
1166 for p in n[2:4]:
1165 if p in m:
1167 if p in m:
1166 base[p] = 1 # latest known
1168 base[p] = 1 # latest known
1167
1169
1168 for p in n[2:4]:
1170 for p in n[2:4]:
1169 if p not in req and p not in m:
1171 if p not in req and p not in m:
1170 r.append(p)
1172 r.append(p)
1171 req[p] = 1
1173 req[p] = 1
1172 seen[n[0]] = 1
1174 seen[n[0]] = 1
1173
1175
1174 if r:
1176 if r:
1175 reqcnt += 1
1177 reqcnt += 1
1176 self.ui.debug(_("request %d: %s\n") %
1178 self.ui.debug(_("request %d: %s\n") %
1177 (reqcnt, " ".join(map(short, r))))
1179 (reqcnt, " ".join(map(short, r))))
1178 for p in xrange(0, len(r), 10):
1180 for p in xrange(0, len(r), 10):
1179 for b in remote.branches(r[p:p+10]):
1181 for b in remote.branches(r[p:p+10]):
1180 self.ui.debug(_("received %s:%s\n") %
1182 self.ui.debug(_("received %s:%s\n") %
1181 (short(b[0]), short(b[1])))
1183 (short(b[0]), short(b[1])))
1182 unknown.append(b)
1184 unknown.append(b)
1183
1185
1184 # do binary search on the branches we found
1186 # do binary search on the branches we found
1185 while search:
1187 while search:
1186 n = search.pop(0)
1188 n = search.pop(0)
1187 reqcnt += 1
1189 reqcnt += 1
1188 l = remote.between([(n[0], n[1])])[0]
1190 l = remote.between([(n[0], n[1])])[0]
1189 l.append(n[1])
1191 l.append(n[1])
1190 p = n[0]
1192 p = n[0]
1191 f = 1
1193 f = 1
1192 for i in l:
1194 for i in l:
1193 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1195 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1194 if i in m:
1196 if i in m:
1195 if f <= 2:
1197 if f <= 2:
1196 self.ui.debug(_("found new branch changeset %s\n") %
1198 self.ui.debug(_("found new branch changeset %s\n") %
1197 short(p))
1199 short(p))
1198 fetch[p] = 1
1200 fetch[p] = 1
1199 base[i] = 1
1201 base[i] = 1
1200 else:
1202 else:
1201 self.ui.debug(_("narrowed branch search to %s:%s\n")
1203 self.ui.debug(_("narrowed branch search to %s:%s\n")
1202 % (short(p), short(i)))
1204 % (short(p), short(i)))
1203 search.append((p, i))
1205 search.append((p, i))
1204 break
1206 break
1205 p, f = i, f * 2
1207 p, f = i, f * 2
1206
1208
1207 # sanity check our fetch list
1209 # sanity check our fetch list
1208 for f in fetch.keys():
1210 for f in fetch.keys():
1209 if f in m:
1211 if f in m:
1210 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1212 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1211
1213
1212 if base.keys() == [nullid]:
1214 if base.keys() == [nullid]:
1213 if force:
1215 if force:
1214 self.ui.warn(_("warning: repository is unrelated\n"))
1216 self.ui.warn(_("warning: repository is unrelated\n"))
1215 else:
1217 else:
1216 raise util.Abort(_("repository is unrelated"))
1218 raise util.Abort(_("repository is unrelated"))
1217
1219
1218 self.ui.debug(_("found new changesets starting at ") +
1220 self.ui.debug(_("found new changesets starting at ") +
1219 " ".join([short(f) for f in fetch]) + "\n")
1221 " ".join([short(f) for f in fetch]) + "\n")
1220
1222
1221 self.ui.debug(_("%d total queries\n") % reqcnt)
1223 self.ui.debug(_("%d total queries\n") % reqcnt)
1222
1224
1223 return fetch.keys()
1225 return fetch.keys()
1224
1226
1225 def findoutgoing(self, remote, base=None, heads=None, force=False):
1227 def findoutgoing(self, remote, base=None, heads=None, force=False):
1226 """Return list of nodes that are roots of subsets not in remote
1228 """Return list of nodes that are roots of subsets not in remote
1227
1229
1228 If base dict is specified, assume that these nodes and their parents
1230 If base dict is specified, assume that these nodes and their parents
1229 exist on the remote side.
1231 exist on the remote side.
1230 If a list of heads is specified, return only nodes which are heads
1232 If a list of heads is specified, return only nodes which are heads
1231 or ancestors of these heads, and return a second element which
1233 or ancestors of these heads, and return a second element which
1232 contains all remote heads which get new children.
1234 contains all remote heads which get new children.
1233 """
1235 """
1234 if base == None:
1236 if base == None:
1235 base = {}
1237 base = {}
1236 self.findincoming(remote, base, heads, force=force)
1238 self.findincoming(remote, base, heads, force=force)
1237
1239
1238 self.ui.debug(_("common changesets up to ")
1240 self.ui.debug(_("common changesets up to ")
1239 + " ".join(map(short, base.keys())) + "\n")
1241 + " ".join(map(short, base.keys())) + "\n")
1240
1242
1241 remain = dict.fromkeys(self.changelog.nodemap)
1243 remain = dict.fromkeys(self.changelog.nodemap)
1242
1244
1243 # prune everything remote has from the tree
1245 # prune everything remote has from the tree
1244 del remain[nullid]
1246 del remain[nullid]
1245 remove = base.keys()
1247 remove = base.keys()
1246 while remove:
1248 while remove:
1247 n = remove.pop(0)
1249 n = remove.pop(0)
1248 if n in remain:
1250 if n in remain:
1249 del remain[n]
1251 del remain[n]
1250 for p in self.changelog.parents(n):
1252 for p in self.changelog.parents(n):
1251 remove.append(p)
1253 remove.append(p)
1252
1254
1253 # find every node whose parents have been pruned
1255 # find every node whose parents have been pruned
1254 subset = []
1256 subset = []
1255 # find every remote head that will get new children
1257 # find every remote head that will get new children
1256 updated_heads = {}
1258 updated_heads = {}
1257 for n in remain:
1259 for n in remain:
1258 p1, p2 = self.changelog.parents(n)
1260 p1, p2 = self.changelog.parents(n)
1259 if p1 not in remain and p2 not in remain:
1261 if p1 not in remain and p2 not in remain:
1260 subset.append(n)
1262 subset.append(n)
1261 if heads:
1263 if heads:
1262 if p1 in heads:
1264 if p1 in heads:
1263 updated_heads[p1] = True
1265 updated_heads[p1] = True
1264 if p2 in heads:
1266 if p2 in heads:
1265 updated_heads[p2] = True
1267 updated_heads[p2] = True
1266
1268
1267 # this is the set of all roots we have to push
1269 # this is the set of all roots we have to push
1268 if heads:
1270 if heads:
1269 return subset, updated_heads.keys()
1271 return subset, updated_heads.keys()
1270 else:
1272 else:
1271 return subset
1273 return subset
1272
1274
1273 def pull(self, remote, heads=None, force=False, lock=None):
1275 def pull(self, remote, heads=None, force=False, lock=None):
1274 mylock = False
1276 mylock = False
1275 if not lock:
1277 if not lock:
1276 lock = self.lock()
1278 lock = self.lock()
1277 mylock = True
1279 mylock = True
1278
1280
1279 try:
1281 try:
1280 fetch = self.findincoming(remote, force=force)
1282 fetch = self.findincoming(remote, force=force)
1281 if fetch == [nullid]:
1283 if fetch == [nullid]:
1282 self.ui.status(_("requesting all changes\n"))
1284 self.ui.status(_("requesting all changes\n"))
1283
1285
1284 if not fetch:
1286 if not fetch:
1285 self.ui.status(_("no changes found\n"))
1287 self.ui.status(_("no changes found\n"))
1286 return 0
1288 return 0
1287
1289
1288 if heads is None:
1290 if heads is None:
1289 cg = remote.changegroup(fetch, 'pull')
1291 cg = remote.changegroup(fetch, 'pull')
1290 else:
1292 else:
1291 if 'changegroupsubset' not in remote.capabilities:
1293 if 'changegroupsubset' not in remote.capabilities:
1292 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1294 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1293 cg = remote.changegroupsubset(fetch, heads, 'pull')
1295 cg = remote.changegroupsubset(fetch, heads, 'pull')
1294 return self.addchangegroup(cg, 'pull', remote.url())
1296 return self.addchangegroup(cg, 'pull', remote.url())
1295 finally:
1297 finally:
1296 if mylock:
1298 if mylock:
1297 lock.release()
1299 lock.release()
1298
1300
1299 def push(self, remote, force=False, revs=None):
1301 def push(self, remote, force=False, revs=None):
1300 # there are two ways to push to remote repo:
1302 # there are two ways to push to remote repo:
1301 #
1303 #
1302 # addchangegroup assumes local user can lock remote
1304 # addchangegroup assumes local user can lock remote
1303 # repo (local filesystem, old ssh servers).
1305 # repo (local filesystem, old ssh servers).
1304 #
1306 #
1305 # unbundle assumes local user cannot lock remote repo (new ssh
1307 # unbundle assumes local user cannot lock remote repo (new ssh
1306 # servers, http servers).
1308 # servers, http servers).
1307
1309
1308 if remote.capable('unbundle'):
1310 if remote.capable('unbundle'):
1309 return self.push_unbundle(remote, force, revs)
1311 return self.push_unbundle(remote, force, revs)
1310 return self.push_addchangegroup(remote, force, revs)
1312 return self.push_addchangegroup(remote, force, revs)
1311
1313
1312 def prepush(self, remote, force, revs):
1314 def prepush(self, remote, force, revs):
1313 base = {}
1315 base = {}
1314 remote_heads = remote.heads()
1316 remote_heads = remote.heads()
1315 inc = self.findincoming(remote, base, remote_heads, force=force)
1317 inc = self.findincoming(remote, base, remote_heads, force=force)
1316
1318
1317 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1319 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1318 if revs is not None:
1320 if revs is not None:
1319 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1321 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1320 else:
1322 else:
1321 bases, heads = update, self.changelog.heads()
1323 bases, heads = update, self.changelog.heads()
1322
1324
1323 if not bases:
1325 if not bases:
1324 self.ui.status(_("no changes found\n"))
1326 self.ui.status(_("no changes found\n"))
1325 return None, 1
1327 return None, 1
1326 elif not force:
1328 elif not force:
1327 # check if we're creating new remote heads
1329 # check if we're creating new remote heads
1328 # to be a remote head after push, node must be either
1330 # to be a remote head after push, node must be either
1329 # - unknown locally
1331 # - unknown locally
1330 # - a local outgoing head descended from update
1332 # - a local outgoing head descended from update
1331 # - a remote head that's known locally and not
1333 # - a remote head that's known locally and not
1332 # ancestral to an outgoing head
1334 # ancestral to an outgoing head
1333
1335
1334 warn = 0
1336 warn = 0
1335
1337
1336 if remote_heads == [nullid]:
1338 if remote_heads == [nullid]:
1337 warn = 0
1339 warn = 0
1338 elif not revs and len(heads) > len(remote_heads):
1340 elif not revs and len(heads) > len(remote_heads):
1339 warn = 1
1341 warn = 1
1340 else:
1342 else:
1341 newheads = list(heads)
1343 newheads = list(heads)
1342 for r in remote_heads:
1344 for r in remote_heads:
1343 if r in self.changelog.nodemap:
1345 if r in self.changelog.nodemap:
1344 desc = self.changelog.heads(r)
1346 desc = self.changelog.heads(r)
1345 l = [h for h in heads if h in desc]
1347 l = [h for h in heads if h in desc]
1346 if not l:
1348 if not l:
1347 newheads.append(r)
1349 newheads.append(r)
1348 else:
1350 else:
1349 newheads.append(r)
1351 newheads.append(r)
1350 if len(newheads) > len(remote_heads):
1352 if len(newheads) > len(remote_heads):
1351 warn = 1
1353 warn = 1
1352
1354
1353 if warn:
1355 if warn:
1354 self.ui.warn(_("abort: push creates new remote branches!\n"))
1356 self.ui.warn(_("abort: push creates new remote branches!\n"))
1355 self.ui.status(_("(did you forget to merge?"
1357 self.ui.status(_("(did you forget to merge?"
1356 " use push -f to force)\n"))
1358 " use push -f to force)\n"))
1357 return None, 1
1359 return None, 1
1358 elif inc:
1360 elif inc:
1359 self.ui.warn(_("note: unsynced remote changes!\n"))
1361 self.ui.warn(_("note: unsynced remote changes!\n"))
1360
1362
1361
1363
1362 if revs is None:
1364 if revs is None:
1363 cg = self.changegroup(update, 'push')
1365 cg = self.changegroup(update, 'push')
1364 else:
1366 else:
1365 cg = self.changegroupsubset(update, revs, 'push')
1367 cg = self.changegroupsubset(update, revs, 'push')
1366 return cg, remote_heads
1368 return cg, remote_heads
1367
1369
1368 def push_addchangegroup(self, remote, force, revs):
1370 def push_addchangegroup(self, remote, force, revs):
1369 lock = remote.lock()
1371 lock = remote.lock()
1370
1372
1371 ret = self.prepush(remote, force, revs)
1373 ret = self.prepush(remote, force, revs)
1372 if ret[0] is not None:
1374 if ret[0] is not None:
1373 cg, remote_heads = ret
1375 cg, remote_heads = ret
1374 return remote.addchangegroup(cg, 'push', self.url())
1376 return remote.addchangegroup(cg, 'push', self.url())
1375 return ret[1]
1377 return ret[1]
1376
1378
1377 def push_unbundle(self, remote, force, revs):
1379 def push_unbundle(self, remote, force, revs):
1378 # local repo finds heads on server, finds out what revs it
1380 # local repo finds heads on server, finds out what revs it
1379 # must push. once revs transferred, if server finds it has
1381 # must push. once revs transferred, if server finds it has
1380 # different heads (someone else won commit/push race), server
1382 # different heads (someone else won commit/push race), server
1381 # aborts.
1383 # aborts.
1382
1384
1383 ret = self.prepush(remote, force, revs)
1385 ret = self.prepush(remote, force, revs)
1384 if ret[0] is not None:
1386 if ret[0] is not None:
1385 cg, remote_heads = ret
1387 cg, remote_heads = ret
1386 if force: remote_heads = ['force']
1388 if force: remote_heads = ['force']
1387 return remote.unbundle(cg, remote_heads, 'push')
1389 return remote.unbundle(cg, remote_heads, 'push')
1388 return ret[1]
1390 return ret[1]
1389
1391
1390 def changegroupinfo(self, nodes):
1392 def changegroupinfo(self, nodes):
1391 self.ui.note(_("%d changesets found\n") % len(nodes))
1393 self.ui.note(_("%d changesets found\n") % len(nodes))
1392 if self.ui.debugflag:
1394 if self.ui.debugflag:
1393 self.ui.debug(_("List of changesets:\n"))
1395 self.ui.debug(_("List of changesets:\n"))
1394 for node in nodes:
1396 for node in nodes:
1395 self.ui.debug("%s\n" % hex(node))
1397 self.ui.debug("%s\n" % hex(node))
1396
1398
1397 def changegroupsubset(self, bases, heads, source):
1399 def changegroupsubset(self, bases, heads, source):
1398 """This function generates a changegroup consisting of all the nodes
1400 """This function generates a changegroup consisting of all the nodes
1399 that are descendents of any of the bases, and ancestors of any of
1401 that are descendents of any of the bases, and ancestors of any of
1400 the heads.
1402 the heads.
1401
1403
1402 It is fairly complex as determining which filenodes and which
1404 It is fairly complex as determining which filenodes and which
1403 manifest nodes need to be included for the changeset to be complete
1405 manifest nodes need to be included for the changeset to be complete
1404 is non-trivial.
1406 is non-trivial.
1405
1407
1406 Another wrinkle is doing the reverse, figuring out which changeset in
1408 Another wrinkle is doing the reverse, figuring out which changeset in
1407 the changegroup a particular filenode or manifestnode belongs to."""
1409 the changegroup a particular filenode or manifestnode belongs to."""
1408
1410
1409 self.hook('preoutgoing', throw=True, source=source)
1411 self.hook('preoutgoing', throw=True, source=source)
1410
1412
1411 # Set up some initial variables
1413 # Set up some initial variables
1412 # Make it easy to refer to self.changelog
1414 # Make it easy to refer to self.changelog
1413 cl = self.changelog
1415 cl = self.changelog
1414 # msng is short for missing - compute the list of changesets in this
1416 # msng is short for missing - compute the list of changesets in this
1415 # changegroup.
1417 # changegroup.
1416 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1418 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1417 self.changegroupinfo(msng_cl_lst)
1419 self.changegroupinfo(msng_cl_lst)
1418 # Some bases may turn out to be superfluous, and some heads may be
1420 # Some bases may turn out to be superfluous, and some heads may be
1419 # too. nodesbetween will return the minimal set of bases and heads
1421 # too. nodesbetween will return the minimal set of bases and heads
1420 # necessary to re-create the changegroup.
1422 # necessary to re-create the changegroup.
1421
1423
1422 # Known heads are the list of heads that it is assumed the recipient
1424 # Known heads are the list of heads that it is assumed the recipient
1423 # of this changegroup will know about.
1425 # of this changegroup will know about.
1424 knownheads = {}
1426 knownheads = {}
1425 # We assume that all parents of bases are known heads.
1427 # We assume that all parents of bases are known heads.
1426 for n in bases:
1428 for n in bases:
1427 for p in cl.parents(n):
1429 for p in cl.parents(n):
1428 if p != nullid:
1430 if p != nullid:
1429 knownheads[p] = 1
1431 knownheads[p] = 1
1430 knownheads = knownheads.keys()
1432 knownheads = knownheads.keys()
1431 if knownheads:
1433 if knownheads:
1432 # Now that we know what heads are known, we can compute which
1434 # Now that we know what heads are known, we can compute which
1433 # changesets are known. The recipient must know about all
1435 # changesets are known. The recipient must know about all
1434 # changesets required to reach the known heads from the null
1436 # changesets required to reach the known heads from the null
1435 # changeset.
1437 # changeset.
1436 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1438 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1437 junk = None
1439 junk = None
1438 # Transform the list into an ersatz set.
1440 # Transform the list into an ersatz set.
1439 has_cl_set = dict.fromkeys(has_cl_set)
1441 has_cl_set = dict.fromkeys(has_cl_set)
1440 else:
1442 else:
1441 # If there were no known heads, the recipient cannot be assumed to
1443 # If there were no known heads, the recipient cannot be assumed to
1442 # know about any changesets.
1444 # know about any changesets.
1443 has_cl_set = {}
1445 has_cl_set = {}
1444
1446
1445 # Make it easy to refer to self.manifest
1447 # Make it easy to refer to self.manifest
1446 mnfst = self.manifest
1448 mnfst = self.manifest
1447 # We don't know which manifests are missing yet
1449 # We don't know which manifests are missing yet
1448 msng_mnfst_set = {}
1450 msng_mnfst_set = {}
1449 # Nor do we know which filenodes are missing.
1451 # Nor do we know which filenodes are missing.
1450 msng_filenode_set = {}
1452 msng_filenode_set = {}
1451
1453
1452 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1454 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1453 junk = None
1455 junk = None
1454
1456
1455 # A changeset always belongs to itself, so the changenode lookup
1457 # A changeset always belongs to itself, so the changenode lookup
1456 # function for a changenode is identity.
1458 # function for a changenode is identity.
1457 def identity(x):
1459 def identity(x):
1458 return x
1460 return x
1459
1461
1460 # A function generating function. Sets up an environment for the
1462 # A function generating function. Sets up an environment for the
1461 # inner function.
1463 # inner function.
1462 def cmp_by_rev_func(revlog):
1464 def cmp_by_rev_func(revlog):
1463 # Compare two nodes by their revision number in the environment's
1465 # Compare two nodes by their revision number in the environment's
1464 # revision history. Since the revision number both represents the
1466 # revision history. Since the revision number both represents the
1465 # most efficient order to read the nodes in, and represents a
1467 # most efficient order to read the nodes in, and represents a
1466 # topological sorting of the nodes, this function is often useful.
1468 # topological sorting of the nodes, this function is often useful.
1467 def cmp_by_rev(a, b):
1469 def cmp_by_rev(a, b):
1468 return cmp(revlog.rev(a), revlog.rev(b))
1470 return cmp(revlog.rev(a), revlog.rev(b))
1469 return cmp_by_rev
1471 return cmp_by_rev
1470
1472
1471 # If we determine that a particular file or manifest node must be a
1473 # If we determine that a particular file or manifest node must be a
1472 # node that the recipient of the changegroup will already have, we can
1474 # node that the recipient of the changegroup will already have, we can
1473 # also assume the recipient will have all the parents. This function
1475 # also assume the recipient will have all the parents. This function
1474 # prunes them from the set of missing nodes.
1476 # prunes them from the set of missing nodes.
1475 def prune_parents(revlog, hasset, msngset):
1477 def prune_parents(revlog, hasset, msngset):
1476 haslst = hasset.keys()
1478 haslst = hasset.keys()
1477 haslst.sort(cmp_by_rev_func(revlog))
1479 haslst.sort(cmp_by_rev_func(revlog))
1478 for node in haslst:
1480 for node in haslst:
1479 parentlst = [p for p in revlog.parents(node) if p != nullid]
1481 parentlst = [p for p in revlog.parents(node) if p != nullid]
1480 while parentlst:
1482 while parentlst:
1481 n = parentlst.pop()
1483 n = parentlst.pop()
1482 if n not in hasset:
1484 if n not in hasset:
1483 hasset[n] = 1
1485 hasset[n] = 1
1484 p = [p for p in revlog.parents(n) if p != nullid]
1486 p = [p for p in revlog.parents(n) if p != nullid]
1485 parentlst.extend(p)
1487 parentlst.extend(p)
1486 for n in hasset:
1488 for n in hasset:
1487 msngset.pop(n, None)
1489 msngset.pop(n, None)
1488
1490
1489 # This is a function generating function used to set up an environment
1491 # This is a function generating function used to set up an environment
1490 # for the inner function to execute in.
1492 # for the inner function to execute in.
1491 def manifest_and_file_collector(changedfileset):
1493 def manifest_and_file_collector(changedfileset):
1492 # This is an information gathering function that gathers
1494 # This is an information gathering function that gathers
1493 # information from each changeset node that goes out as part of
1495 # information from each changeset node that goes out as part of
1494 # the changegroup. The information gathered is a list of which
1496 # the changegroup. The information gathered is a list of which
1495 # manifest nodes are potentially required (the recipient may
1497 # manifest nodes are potentially required (the recipient may
1496 # already have them) and total list of all files which were
1498 # already have them) and total list of all files which were
1497 # changed in any changeset in the changegroup.
1499 # changed in any changeset in the changegroup.
1498 #
1500 #
1499 # We also remember the first changenode we saw any manifest
1501 # We also remember the first changenode we saw any manifest
1500 # referenced by so we can later determine which changenode 'owns'
1502 # referenced by so we can later determine which changenode 'owns'
1501 # the manifest.
1503 # the manifest.
1502 def collect_manifests_and_files(clnode):
1504 def collect_manifests_and_files(clnode):
1503 c = cl.read(clnode)
1505 c = cl.read(clnode)
1504 for f in c[3]:
1506 for f in c[3]:
1505 # This is to make sure we only have one instance of each
1507 # This is to make sure we only have one instance of each
1506 # filename string for each filename.
1508 # filename string for each filename.
1507 changedfileset.setdefault(f, f)
1509 changedfileset.setdefault(f, f)
1508 msng_mnfst_set.setdefault(c[0], clnode)
1510 msng_mnfst_set.setdefault(c[0], clnode)
1509 return collect_manifests_and_files
1511 return collect_manifests_and_files
1510
1512
1511 # Figure out which manifest nodes (of the ones we think might be part
1513 # Figure out which manifest nodes (of the ones we think might be part
1512 # of the changegroup) the recipient must know about and remove them
1514 # of the changegroup) the recipient must know about and remove them
1513 # from the changegroup.
1515 # from the changegroup.
1514 def prune_manifests():
1516 def prune_manifests():
1515 has_mnfst_set = {}
1517 has_mnfst_set = {}
1516 for n in msng_mnfst_set:
1518 for n in msng_mnfst_set:
1517 # If a 'missing' manifest thinks it belongs to a changenode
1519 # If a 'missing' manifest thinks it belongs to a changenode
1518 # the recipient is assumed to have, obviously the recipient
1520 # the recipient is assumed to have, obviously the recipient
1519 # must have that manifest.
1521 # must have that manifest.
1520 linknode = cl.node(mnfst.linkrev(n))
1522 linknode = cl.node(mnfst.linkrev(n))
1521 if linknode in has_cl_set:
1523 if linknode in has_cl_set:
1522 has_mnfst_set[n] = 1
1524 has_mnfst_set[n] = 1
1523 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1525 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1524
1526
1525 # Use the information collected in collect_manifests_and_files to say
1527 # Use the information collected in collect_manifests_and_files to say
1526 # which changenode any manifestnode belongs to.
1528 # which changenode any manifestnode belongs to.
1527 def lookup_manifest_link(mnfstnode):
1529 def lookup_manifest_link(mnfstnode):
1528 return msng_mnfst_set[mnfstnode]
1530 return msng_mnfst_set[mnfstnode]
1529
1531
1530 # A function generating function that sets up the initial environment
1532 # A function generating function that sets up the initial environment
1531 # the inner function.
1533 # the inner function.
1532 def filenode_collector(changedfiles):
1534 def filenode_collector(changedfiles):
1533 next_rev = [0]
1535 next_rev = [0]
1534 # This gathers information from each manifestnode included in the
1536 # This gathers information from each manifestnode included in the
1535 # changegroup about which filenodes the manifest node references
1537 # changegroup about which filenodes the manifest node references
1536 # so we can include those in the changegroup too.
1538 # so we can include those in the changegroup too.
1537 #
1539 #
1538 # It also remembers which changenode each filenode belongs to. It
1540 # It also remembers which changenode each filenode belongs to. It
1539 # does this by assuming the a filenode belongs to the changenode
1541 # does this by assuming the a filenode belongs to the changenode
1540 # the first manifest that references it belongs to.
1542 # the first manifest that references it belongs to.
1541 def collect_msng_filenodes(mnfstnode):
1543 def collect_msng_filenodes(mnfstnode):
1542 r = mnfst.rev(mnfstnode)
1544 r = mnfst.rev(mnfstnode)
1543 if r == next_rev[0]:
1545 if r == next_rev[0]:
1544 # If the last rev we looked at was the one just previous,
1546 # If the last rev we looked at was the one just previous,
1545 # we only need to see a diff.
1547 # we only need to see a diff.
1546 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1548 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1547 # For each line in the delta
1549 # For each line in the delta
1548 for dline in delta.splitlines():
1550 for dline in delta.splitlines():
1549 # get the filename and filenode for that line
1551 # get the filename and filenode for that line
1550 f, fnode = dline.split('\0')
1552 f, fnode = dline.split('\0')
1551 fnode = bin(fnode[:40])
1553 fnode = bin(fnode[:40])
1552 f = changedfiles.get(f, None)
1554 f = changedfiles.get(f, None)
1553 # And if the file is in the list of files we care
1555 # And if the file is in the list of files we care
1554 # about.
1556 # about.
1555 if f is not None:
1557 if f is not None:
1556 # Get the changenode this manifest belongs to
1558 # Get the changenode this manifest belongs to
1557 clnode = msng_mnfst_set[mnfstnode]
1559 clnode = msng_mnfst_set[mnfstnode]
1558 # Create the set of filenodes for the file if
1560 # Create the set of filenodes for the file if
1559 # there isn't one already.
1561 # there isn't one already.
1560 ndset = msng_filenode_set.setdefault(f, {})
1562 ndset = msng_filenode_set.setdefault(f, {})
1561 # And set the filenode's changelog node to the
1563 # And set the filenode's changelog node to the
1562 # manifest's if it hasn't been set already.
1564 # manifest's if it hasn't been set already.
1563 ndset.setdefault(fnode, clnode)
1565 ndset.setdefault(fnode, clnode)
1564 else:
1566 else:
1565 # Otherwise we need a full manifest.
1567 # Otherwise we need a full manifest.
1566 m = mnfst.read(mnfstnode)
1568 m = mnfst.read(mnfstnode)
1567 # For every file in we care about.
1569 # For every file in we care about.
1568 for f in changedfiles:
1570 for f in changedfiles:
1569 fnode = m.get(f, None)
1571 fnode = m.get(f, None)
1570 # If it's in the manifest
1572 # If it's in the manifest
1571 if fnode is not None:
1573 if fnode is not None:
1572 # See comments above.
1574 # See comments above.
1573 clnode = msng_mnfst_set[mnfstnode]
1575 clnode = msng_mnfst_set[mnfstnode]
1574 ndset = msng_filenode_set.setdefault(f, {})
1576 ndset = msng_filenode_set.setdefault(f, {})
1575 ndset.setdefault(fnode, clnode)
1577 ndset.setdefault(fnode, clnode)
1576 # Remember the revision we hope to see next.
1578 # Remember the revision we hope to see next.
1577 next_rev[0] = r + 1
1579 next_rev[0] = r + 1
1578 return collect_msng_filenodes
1580 return collect_msng_filenodes
1579
1581
1580 # We have a list of filenodes we think we need for a file, lets remove
1582 # We have a list of filenodes we think we need for a file, lets remove
1581 # all those we now the recipient must have.
1583 # all those we now the recipient must have.
1582 def prune_filenodes(f, filerevlog):
1584 def prune_filenodes(f, filerevlog):
1583 msngset = msng_filenode_set[f]
1585 msngset = msng_filenode_set[f]
1584 hasset = {}
1586 hasset = {}
1585 # If a 'missing' filenode thinks it belongs to a changenode we
1587 # If a 'missing' filenode thinks it belongs to a changenode we
1586 # assume the recipient must have, then the recipient must have
1588 # assume the recipient must have, then the recipient must have
1587 # that filenode.
1589 # that filenode.
1588 for n in msngset:
1590 for n in msngset:
1589 clnode = cl.node(filerevlog.linkrev(n))
1591 clnode = cl.node(filerevlog.linkrev(n))
1590 if clnode in has_cl_set:
1592 if clnode in has_cl_set:
1591 hasset[n] = 1
1593 hasset[n] = 1
1592 prune_parents(filerevlog, hasset, msngset)
1594 prune_parents(filerevlog, hasset, msngset)
1593
1595
1594 # A function generator function that sets up the a context for the
1596 # A function generator function that sets up the a context for the
1595 # inner function.
1597 # inner function.
1596 def lookup_filenode_link_func(fname):
1598 def lookup_filenode_link_func(fname):
1597 msngset = msng_filenode_set[fname]
1599 msngset = msng_filenode_set[fname]
1598 # Lookup the changenode the filenode belongs to.
1600 # Lookup the changenode the filenode belongs to.
1599 def lookup_filenode_link(fnode):
1601 def lookup_filenode_link(fnode):
1600 return msngset[fnode]
1602 return msngset[fnode]
1601 return lookup_filenode_link
1603 return lookup_filenode_link
1602
1604
1603 # Now that we have all theses utility functions to help out and
1605 # Now that we have all theses utility functions to help out and
1604 # logically divide up the task, generate the group.
1606 # logically divide up the task, generate the group.
1605 def gengroup():
1607 def gengroup():
1606 # The set of changed files starts empty.
1608 # The set of changed files starts empty.
1607 changedfiles = {}
1609 changedfiles = {}
1608 # Create a changenode group generator that will call our functions
1610 # Create a changenode group generator that will call our functions
1609 # back to lookup the owning changenode and collect information.
1611 # back to lookup the owning changenode and collect information.
1610 group = cl.group(msng_cl_lst, identity,
1612 group = cl.group(msng_cl_lst, identity,
1611 manifest_and_file_collector(changedfiles))
1613 manifest_and_file_collector(changedfiles))
1612 for chnk in group:
1614 for chnk in group:
1613 yield chnk
1615 yield chnk
1614
1616
1615 # The list of manifests has been collected by the generator
1617 # The list of manifests has been collected by the generator
1616 # calling our functions back.
1618 # calling our functions back.
1617 prune_manifests()
1619 prune_manifests()
1618 msng_mnfst_lst = msng_mnfst_set.keys()
1620 msng_mnfst_lst = msng_mnfst_set.keys()
1619 # Sort the manifestnodes by revision number.
1621 # Sort the manifestnodes by revision number.
1620 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1622 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1621 # Create a generator for the manifestnodes that calls our lookup
1623 # Create a generator for the manifestnodes that calls our lookup
1622 # and data collection functions back.
1624 # and data collection functions back.
1623 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1625 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1624 filenode_collector(changedfiles))
1626 filenode_collector(changedfiles))
1625 for chnk in group:
1627 for chnk in group:
1626 yield chnk
1628 yield chnk
1627
1629
1628 # These are no longer needed, dereference and toss the memory for
1630 # These are no longer needed, dereference and toss the memory for
1629 # them.
1631 # them.
1630 msng_mnfst_lst = None
1632 msng_mnfst_lst = None
1631 msng_mnfst_set.clear()
1633 msng_mnfst_set.clear()
1632
1634
1633 changedfiles = changedfiles.keys()
1635 changedfiles = changedfiles.keys()
1634 changedfiles.sort()
1636 changedfiles.sort()
1635 # Go through all our files in order sorted by name.
1637 # Go through all our files in order sorted by name.
1636 for fname in changedfiles:
1638 for fname in changedfiles:
1637 filerevlog = self.file(fname)
1639 filerevlog = self.file(fname)
1638 # Toss out the filenodes that the recipient isn't really
1640 # Toss out the filenodes that the recipient isn't really
1639 # missing.
1641 # missing.
1640 if msng_filenode_set.has_key(fname):
1642 if msng_filenode_set.has_key(fname):
1641 prune_filenodes(fname, filerevlog)
1643 prune_filenodes(fname, filerevlog)
1642 msng_filenode_lst = msng_filenode_set[fname].keys()
1644 msng_filenode_lst = msng_filenode_set[fname].keys()
1643 else:
1645 else:
1644 msng_filenode_lst = []
1646 msng_filenode_lst = []
1645 # If any filenodes are left, generate the group for them,
1647 # If any filenodes are left, generate the group for them,
1646 # otherwise don't bother.
1648 # otherwise don't bother.
1647 if len(msng_filenode_lst) > 0:
1649 if len(msng_filenode_lst) > 0:
1648 yield changegroup.genchunk(fname)
1650 yield changegroup.genchunk(fname)
1649 # Sort the filenodes by their revision #
1651 # Sort the filenodes by their revision #
1650 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1652 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1651 # Create a group generator and only pass in a changenode
1653 # Create a group generator and only pass in a changenode
1652 # lookup function as we need to collect no information
1654 # lookup function as we need to collect no information
1653 # from filenodes.
1655 # from filenodes.
1654 group = filerevlog.group(msng_filenode_lst,
1656 group = filerevlog.group(msng_filenode_lst,
1655 lookup_filenode_link_func(fname))
1657 lookup_filenode_link_func(fname))
1656 for chnk in group:
1658 for chnk in group:
1657 yield chnk
1659 yield chnk
1658 if msng_filenode_set.has_key(fname):
1660 if msng_filenode_set.has_key(fname):
1659 # Don't need this anymore, toss it to free memory.
1661 # Don't need this anymore, toss it to free memory.
1660 del msng_filenode_set[fname]
1662 del msng_filenode_set[fname]
1661 # Signal that no more groups are left.
1663 # Signal that no more groups are left.
1662 yield changegroup.closechunk()
1664 yield changegroup.closechunk()
1663
1665
1664 if msng_cl_lst:
1666 if msng_cl_lst:
1665 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1667 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1666
1668
1667 return util.chunkbuffer(gengroup())
1669 return util.chunkbuffer(gengroup())
1668
1670
1669 def changegroup(self, basenodes, source):
1671 def changegroup(self, basenodes, source):
1670 """Generate a changegroup of all nodes that we have that a recipient
1672 """Generate a changegroup of all nodes that we have that a recipient
1671 doesn't.
1673 doesn't.
1672
1674
1673 This is much easier than the previous function as we can assume that
1675 This is much easier than the previous function as we can assume that
1674 the recipient has any changenode we aren't sending them."""
1676 the recipient has any changenode we aren't sending them."""
1675
1677
1676 self.hook('preoutgoing', throw=True, source=source)
1678 self.hook('preoutgoing', throw=True, source=source)
1677
1679
1678 cl = self.changelog
1680 cl = self.changelog
1679 nodes = cl.nodesbetween(basenodes, None)[0]
1681 nodes = cl.nodesbetween(basenodes, None)[0]
1680 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1682 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1681 self.changegroupinfo(nodes)
1683 self.changegroupinfo(nodes)
1682
1684
1683 def identity(x):
1685 def identity(x):
1684 return x
1686 return x
1685
1687
1686 def gennodelst(revlog):
1688 def gennodelst(revlog):
1687 for r in xrange(0, revlog.count()):
1689 for r in xrange(0, revlog.count()):
1688 n = revlog.node(r)
1690 n = revlog.node(r)
1689 if revlog.linkrev(n) in revset:
1691 if revlog.linkrev(n) in revset:
1690 yield n
1692 yield n
1691
1693
1692 def changed_file_collector(changedfileset):
1694 def changed_file_collector(changedfileset):
1693 def collect_changed_files(clnode):
1695 def collect_changed_files(clnode):
1694 c = cl.read(clnode)
1696 c = cl.read(clnode)
1695 for fname in c[3]:
1697 for fname in c[3]:
1696 changedfileset[fname] = 1
1698 changedfileset[fname] = 1
1697 return collect_changed_files
1699 return collect_changed_files
1698
1700
1699 def lookuprevlink_func(revlog):
1701 def lookuprevlink_func(revlog):
1700 def lookuprevlink(n):
1702 def lookuprevlink(n):
1701 return cl.node(revlog.linkrev(n))
1703 return cl.node(revlog.linkrev(n))
1702 return lookuprevlink
1704 return lookuprevlink
1703
1705
1704 def gengroup():
1706 def gengroup():
1705 # construct a list of all changed files
1707 # construct a list of all changed files
1706 changedfiles = {}
1708 changedfiles = {}
1707
1709
1708 for chnk in cl.group(nodes, identity,
1710 for chnk in cl.group(nodes, identity,
1709 changed_file_collector(changedfiles)):
1711 changed_file_collector(changedfiles)):
1710 yield chnk
1712 yield chnk
1711 changedfiles = changedfiles.keys()
1713 changedfiles = changedfiles.keys()
1712 changedfiles.sort()
1714 changedfiles.sort()
1713
1715
1714 mnfst = self.manifest
1716 mnfst = self.manifest
1715 nodeiter = gennodelst(mnfst)
1717 nodeiter = gennodelst(mnfst)
1716 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1718 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1717 yield chnk
1719 yield chnk
1718
1720
1719 for fname in changedfiles:
1721 for fname in changedfiles:
1720 filerevlog = self.file(fname)
1722 filerevlog = self.file(fname)
1721 nodeiter = gennodelst(filerevlog)
1723 nodeiter = gennodelst(filerevlog)
1722 nodeiter = list(nodeiter)
1724 nodeiter = list(nodeiter)
1723 if nodeiter:
1725 if nodeiter:
1724 yield changegroup.genchunk(fname)
1726 yield changegroup.genchunk(fname)
1725 lookup = lookuprevlink_func(filerevlog)
1727 lookup = lookuprevlink_func(filerevlog)
1726 for chnk in filerevlog.group(nodeiter, lookup):
1728 for chnk in filerevlog.group(nodeiter, lookup):
1727 yield chnk
1729 yield chnk
1728
1730
1729 yield changegroup.closechunk()
1731 yield changegroup.closechunk()
1730
1732
1731 if nodes:
1733 if nodes:
1732 self.hook('outgoing', node=hex(nodes[0]), source=source)
1734 self.hook('outgoing', node=hex(nodes[0]), source=source)
1733
1735
1734 return util.chunkbuffer(gengroup())
1736 return util.chunkbuffer(gengroup())
1735
1737
1736 def addchangegroup(self, source, srctype, url):
1738 def addchangegroup(self, source, srctype, url):
1737 """add changegroup to repo.
1739 """add changegroup to repo.
1738 returns number of heads modified or added + 1."""
1740 returns number of heads modified or added + 1."""
1739
1741
1740 def csmap(x):
1742 def csmap(x):
1741 self.ui.debug(_("add changeset %s\n") % short(x))
1743 self.ui.debug(_("add changeset %s\n") % short(x))
1742 return cl.count()
1744 return cl.count()
1743
1745
1744 def revmap(x):
1746 def revmap(x):
1745 return cl.rev(x)
1747 return cl.rev(x)
1746
1748
1747 if not source:
1749 if not source:
1748 return 0
1750 return 0
1749
1751
1750 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1752 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1751
1753
1752 changesets = files = revisions = 0
1754 changesets = files = revisions = 0
1753
1755
1754 tr = self.transaction()
1756 tr = self.transaction()
1755
1757
1756 # write changelog data to temp files so concurrent readers will not see
1758 # write changelog data to temp files so concurrent readers will not see
1757 # inconsistent view
1759 # inconsistent view
1758 cl = None
1760 cl = None
1759 try:
1761 try:
1760 cl = appendfile.appendchangelog(self.sopener,
1762 cl = appendfile.appendchangelog(self.sopener,
1761 self.changelog.version)
1763 self.changelog.version)
1762
1764
1763 oldheads = len(cl.heads())
1765 oldheads = len(cl.heads())
1764
1766
1765 # pull off the changeset group
1767 # pull off the changeset group
1766 self.ui.status(_("adding changesets\n"))
1768 self.ui.status(_("adding changesets\n"))
1767 cor = cl.count() - 1
1769 cor = cl.count() - 1
1768 chunkiter = changegroup.chunkiter(source)
1770 chunkiter = changegroup.chunkiter(source)
1769 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1771 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1770 raise util.Abort(_("received changelog group is empty"))
1772 raise util.Abort(_("received changelog group is empty"))
1771 cnr = cl.count() - 1
1773 cnr = cl.count() - 1
1772 changesets = cnr - cor
1774 changesets = cnr - cor
1773
1775
1774 # pull off the manifest group
1776 # pull off the manifest group
1775 self.ui.status(_("adding manifests\n"))
1777 self.ui.status(_("adding manifests\n"))
1776 chunkiter = changegroup.chunkiter(source)
1778 chunkiter = changegroup.chunkiter(source)
1777 # no need to check for empty manifest group here:
1779 # no need to check for empty manifest group here:
1778 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1780 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1779 # no new manifest will be created and the manifest group will
1781 # no new manifest will be created and the manifest group will
1780 # be empty during the pull
1782 # be empty during the pull
1781 self.manifest.addgroup(chunkiter, revmap, tr)
1783 self.manifest.addgroup(chunkiter, revmap, tr)
1782
1784
1783 # process the files
1785 # process the files
1784 self.ui.status(_("adding file changes\n"))
1786 self.ui.status(_("adding file changes\n"))
1785 while 1:
1787 while 1:
1786 f = changegroup.getchunk(source)
1788 f = changegroup.getchunk(source)
1787 if not f:
1789 if not f:
1788 break
1790 break
1789 self.ui.debug(_("adding %s revisions\n") % f)
1791 self.ui.debug(_("adding %s revisions\n") % f)
1790 fl = self.file(f)
1792 fl = self.file(f)
1791 o = fl.count()
1793 o = fl.count()
1792 chunkiter = changegroup.chunkiter(source)
1794 chunkiter = changegroup.chunkiter(source)
1793 if fl.addgroup(chunkiter, revmap, tr) is None:
1795 if fl.addgroup(chunkiter, revmap, tr) is None:
1794 raise util.Abort(_("received file revlog group is empty"))
1796 raise util.Abort(_("received file revlog group is empty"))
1795 revisions += fl.count() - o
1797 revisions += fl.count() - o
1796 files += 1
1798 files += 1
1797
1799
1798 cl.writedata()
1800 cl.writedata()
1799 finally:
1801 finally:
1800 if cl:
1802 if cl:
1801 cl.cleanup()
1803 cl.cleanup()
1802
1804
1803 # make changelog see real files again
1805 # make changelog see real files again
1804 self.changelog = changelog.changelog(self.sopener,
1806 self.changelog = changelog.changelog(self.sopener,
1805 self.changelog.version)
1807 self.changelog.version)
1806 self.changelog.checkinlinesize(tr)
1808 self.changelog.checkinlinesize(tr)
1807
1809
1808 newheads = len(self.changelog.heads())
1810 newheads = len(self.changelog.heads())
1809 heads = ""
1811 heads = ""
1810 if oldheads and newheads != oldheads:
1812 if oldheads and newheads != oldheads:
1811 heads = _(" (%+d heads)") % (newheads - oldheads)
1813 heads = _(" (%+d heads)") % (newheads - oldheads)
1812
1814
1813 self.ui.status(_("added %d changesets"
1815 self.ui.status(_("added %d changesets"
1814 " with %d changes to %d files%s\n")
1816 " with %d changes to %d files%s\n")
1815 % (changesets, revisions, files, heads))
1817 % (changesets, revisions, files, heads))
1816
1818
1817 if changesets > 0:
1819 if changesets > 0:
1818 self.hook('pretxnchangegroup', throw=True,
1820 self.hook('pretxnchangegroup', throw=True,
1819 node=hex(self.changelog.node(cor+1)), source=srctype,
1821 node=hex(self.changelog.node(cor+1)), source=srctype,
1820 url=url)
1822 url=url)
1821
1823
1822 tr.close()
1824 tr.close()
1823
1825
1824 if changesets > 0:
1826 if changesets > 0:
1825 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1827 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1826 source=srctype, url=url)
1828 source=srctype, url=url)
1827
1829
1828 for i in xrange(cor + 1, cnr + 1):
1830 for i in xrange(cor + 1, cnr + 1):
1829 self.hook("incoming", node=hex(self.changelog.node(i)),
1831 self.hook("incoming", node=hex(self.changelog.node(i)),
1830 source=srctype, url=url)
1832 source=srctype, url=url)
1831
1833
1832 return newheads - oldheads + 1
1834 return newheads - oldheads + 1
1833
1835
1834
1836
1835 def stream_in(self, remote):
1837 def stream_in(self, remote):
1836 fp = remote.stream_out()
1838 fp = remote.stream_out()
1837 l = fp.readline()
1839 l = fp.readline()
1838 try:
1840 try:
1839 resp = int(l)
1841 resp = int(l)
1840 except ValueError:
1842 except ValueError:
1841 raise util.UnexpectedOutput(
1843 raise util.UnexpectedOutput(
1842 _('Unexpected response from remote server:'), l)
1844 _('Unexpected response from remote server:'), l)
1843 if resp == 1:
1845 if resp == 1:
1844 raise util.Abort(_('operation forbidden by server'))
1846 raise util.Abort(_('operation forbidden by server'))
1845 elif resp == 2:
1847 elif resp == 2:
1846 raise util.Abort(_('locking the remote repository failed'))
1848 raise util.Abort(_('locking the remote repository failed'))
1847 elif resp != 0:
1849 elif resp != 0:
1848 raise util.Abort(_('the server sent an unknown error code'))
1850 raise util.Abort(_('the server sent an unknown error code'))
1849 self.ui.status(_('streaming all changes\n'))
1851 self.ui.status(_('streaming all changes\n'))
1850 l = fp.readline()
1852 l = fp.readline()
1851 try:
1853 try:
1852 total_files, total_bytes = map(int, l.split(' ', 1))
1854 total_files, total_bytes = map(int, l.split(' ', 1))
1853 except ValueError, TypeError:
1855 except ValueError, TypeError:
1854 raise util.UnexpectedOutput(
1856 raise util.UnexpectedOutput(
1855 _('Unexpected response from remote server:'), l)
1857 _('Unexpected response from remote server:'), l)
1856 self.ui.status(_('%d files to transfer, %s of data\n') %
1858 self.ui.status(_('%d files to transfer, %s of data\n') %
1857 (total_files, util.bytecount(total_bytes)))
1859 (total_files, util.bytecount(total_bytes)))
1858 start = time.time()
1860 start = time.time()
1859 for i in xrange(total_files):
1861 for i in xrange(total_files):
1860 # XXX doesn't support '\n' or '\r' in filenames
1862 # XXX doesn't support '\n' or '\r' in filenames
1861 l = fp.readline()
1863 l = fp.readline()
1862 try:
1864 try:
1863 name, size = l.split('\0', 1)
1865 name, size = l.split('\0', 1)
1864 size = int(size)
1866 size = int(size)
1865 except ValueError, TypeError:
1867 except ValueError, TypeError:
1866 raise util.UnexpectedOutput(
1868 raise util.UnexpectedOutput(
1867 _('Unexpected response from remote server:'), l)
1869 _('Unexpected response from remote server:'), l)
1868 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1870 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1869 ofp = self.sopener(name, 'w')
1871 ofp = self.sopener(name, 'w')
1870 for chunk in util.filechunkiter(fp, limit=size):
1872 for chunk in util.filechunkiter(fp, limit=size):
1871 ofp.write(chunk)
1873 ofp.write(chunk)
1872 ofp.close()
1874 ofp.close()
1873 elapsed = time.time() - start
1875 elapsed = time.time() - start
1874 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1876 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1875 (util.bytecount(total_bytes), elapsed,
1877 (util.bytecount(total_bytes), elapsed,
1876 util.bytecount(total_bytes / elapsed)))
1878 util.bytecount(total_bytes / elapsed)))
1877 self.reload()
1879 self.reload()
1878 return len(self.heads()) + 1
1880 return len(self.heads()) + 1
1879
1881
1880 def clone(self, remote, heads=[], stream=False):
1882 def clone(self, remote, heads=[], stream=False):
1881 '''clone remote repository.
1883 '''clone remote repository.
1882
1884
1883 keyword arguments:
1885 keyword arguments:
1884 heads: list of revs to clone (forces use of pull)
1886 heads: list of revs to clone (forces use of pull)
1885 stream: use streaming clone if possible'''
1887 stream: use streaming clone if possible'''
1886
1888
1887 # now, all clients that can request uncompressed clones can
1889 # now, all clients that can request uncompressed clones can
1888 # read repo formats supported by all servers that can serve
1890 # read repo formats supported by all servers that can serve
1889 # them.
1891 # them.
1890
1892
1891 # if revlog format changes, client will have to check version
1893 # if revlog format changes, client will have to check version
1892 # and format flags on "stream" capability, and use
1894 # and format flags on "stream" capability, and use
1893 # uncompressed only if compatible.
1895 # uncompressed only if compatible.
1894
1896
1895 if stream and not heads and remote.capable('stream'):
1897 if stream and not heads and remote.capable('stream'):
1896 return self.stream_in(remote)
1898 return self.stream_in(remote)
1897 return self.pull(remote, heads)
1899 return self.pull(remote, heads)
1898
1900
1899 # used to avoid circular references so destructors work
1901 # used to avoid circular references so destructors work
1900 def aftertrans(base):
1902 def aftertrans(files):
1901 p = base
1903 renamefiles = [tuple(t) for t in files]
1902 def a():
1904 def a():
1903 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1905 for src, dest in renamefiles:
1904 util.rename(os.path.join(p, "journal.dirstate"),
1906 util.rename(src, dest)
1905 os.path.join(p, "undo.dirstate"))
1906 return a
1907 return a
1907
1908
1908 def instance(ui, path, create):
1909 def instance(ui, path, create):
1909 return localrepository(ui, util.drop_scheme('file', path), create)
1910 return localrepository(ui, util.drop_scheme('file', path), create)
1910
1911
1911 def islocal(path):
1912 def islocal(path):
1912 return True
1913 return True
General Comments 0
You need to be logged in to leave comments. Login now