##// END OF EJS Templates
move code around
Benoit Boissinot -
r3850:a4457828 default
parent child Browse files
Show More
@@ -1,1927 +1,1929 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import gettext as _
9 from i18n import gettext as _
10 from demandload import *
10 from demandload import *
11 import repo
11 import repo
12 demandload(globals(), "appendfile changegroup")
12 demandload(globals(), "appendfile changegroup")
13 demandload(globals(), "changelog dirstate filelog manifest context")
13 demandload(globals(), "changelog dirstate filelog manifest context")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 demandload(globals(), "os revlog time util")
15 demandload(globals(), "os revlog time util")
16
16
17 class localrepository(repo.repository):
17 class localrepository(repo.repository):
18 capabilities = ('lookup', 'changegroupsubset')
18 capabilities = ('lookup', 'changegroupsubset')
19
19
20 def __del__(self):
20 def __del__(self):
21 self.transhandle = None
21 self.transhandle = None
22 def __init__(self, parentui, path=None, create=0):
22 def __init__(self, parentui, path=None, create=0):
23 repo.repository.__init__(self)
23 repo.repository.__init__(self)
24 if not path:
24 if not path:
25 p = os.getcwd()
25 p = os.getcwd()
26 while not os.path.isdir(os.path.join(p, ".hg")):
26 while not os.path.isdir(os.path.join(p, ".hg")):
27 oldp = p
27 oldp = p
28 p = os.path.dirname(p)
28 p = os.path.dirname(p)
29 if p == oldp:
29 if p == oldp:
30 raise repo.RepoError(_("There is no Mercurial repository"
30 raise repo.RepoError(_("There is no Mercurial repository"
31 " here (.hg not found)"))
31 " here (.hg not found)"))
32 path = p
32 path = p
33
33 self.path = os.path.join(path, ".hg")
34 self.path = os.path.join(path, ".hg")
34 self.spath = self.path
35 self.root = os.path.realpath(path)
36 self.origroot = path
37 self.opener = util.opener(self.path)
38 self.wopener = util.opener(self.root)
35
39
36 if not os.path.isdir(self.path):
40 if not os.path.isdir(self.path):
37 if create:
41 if create:
38 if not os.path.exists(path):
42 if not os.path.exists(path):
39 os.mkdir(path)
43 os.mkdir(path)
40 os.mkdir(self.path)
44 os.mkdir(self.path)
41 if self.spath != self.path:
45 #if self.spath != self.path:
42 os.mkdir(self.spath)
46 # os.mkdir(self.spath)
43 else:
47 else:
44 raise repo.RepoError(_("repository %s not found") % path)
48 raise repo.RepoError(_("repository %s not found") % path)
45 elif create:
49 elif create:
46 raise repo.RepoError(_("repository %s already exists") % path)
50 raise repo.RepoError(_("repository %s already exists") % path)
47
51
48 self.root = os.path.realpath(path)
52 # setup store
49 self.origroot = path
53 self.spath = self.path
54 self.sopener = util.opener(self.spath)
55
50 self.ui = ui.ui(parentui=parentui)
56 self.ui = ui.ui(parentui=parentui)
51 self.opener = util.opener(self.path)
52 self.sopener = util.opener(self.spath)
53 self.wopener = util.opener(self.root)
54
55 try:
57 try:
56 self.ui.readconfig(self.join("hgrc"), self.root)
58 self.ui.readconfig(self.join("hgrc"), self.root)
57 except IOError:
59 except IOError:
58 pass
60 pass
59
61
60 v = self.ui.configrevlog()
62 v = self.ui.configrevlog()
61 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
63 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
62 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
64 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
63 fl = v.get('flags', None)
65 fl = v.get('flags', None)
64 flags = 0
66 flags = 0
65 if fl != None:
67 if fl != None:
66 for x in fl.split():
68 for x in fl.split():
67 flags |= revlog.flagstr(x)
69 flags |= revlog.flagstr(x)
68 elif self.revlogv1:
70 elif self.revlogv1:
69 flags = revlog.REVLOG_DEFAULT_FLAGS
71 flags = revlog.REVLOG_DEFAULT_FLAGS
70
72
71 v = self.revlogversion | flags
73 v = self.revlogversion | flags
72 self.manifest = manifest.manifest(self.sopener, v)
74 self.manifest = manifest.manifest(self.sopener, v)
73 self.changelog = changelog.changelog(self.sopener, v)
75 self.changelog = changelog.changelog(self.sopener, v)
74
76
75 # the changelog might not have the inline index flag
77 # the changelog might not have the inline index flag
76 # on. If the format of the changelog is the same as found in
78 # on. If the format of the changelog is the same as found in
77 # .hgrc, apply any flags found in the .hgrc as well.
79 # .hgrc, apply any flags found in the .hgrc as well.
78 # Otherwise, just version from the changelog
80 # Otherwise, just version from the changelog
79 v = self.changelog.version
81 v = self.changelog.version
80 if v == self.revlogversion:
82 if v == self.revlogversion:
81 v |= flags
83 v |= flags
82 self.revlogversion = v
84 self.revlogversion = v
83
85
84 self.tagscache = None
86 self.tagscache = None
85 self.branchcache = None
87 self.branchcache = None
86 self.nodetagscache = None
88 self.nodetagscache = None
87 self.encodepats = None
89 self.encodepats = None
88 self.decodepats = None
90 self.decodepats = None
89 self.transhandle = None
91 self.transhandle = None
90
92
91 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
93 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
92
94
93 def url(self):
95 def url(self):
94 return 'file:' + self.root
96 return 'file:' + self.root
95
97
96 def hook(self, name, throw=False, **args):
98 def hook(self, name, throw=False, **args):
97 def callhook(hname, funcname):
99 def callhook(hname, funcname):
98 '''call python hook. hook is callable object, looked up as
100 '''call python hook. hook is callable object, looked up as
99 name in python module. if callable returns "true", hook
101 name in python module. if callable returns "true", hook
100 fails, else passes. if hook raises exception, treated as
102 fails, else passes. if hook raises exception, treated as
101 hook failure. exception propagates if throw is "true".
103 hook failure. exception propagates if throw is "true".
102
104
103 reason for "true" meaning "hook failed" is so that
105 reason for "true" meaning "hook failed" is so that
104 unmodified commands (e.g. mercurial.commands.update) can
106 unmodified commands (e.g. mercurial.commands.update) can
105 be run as hooks without wrappers to convert return values.'''
107 be run as hooks without wrappers to convert return values.'''
106
108
107 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
109 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
108 d = funcname.rfind('.')
110 d = funcname.rfind('.')
109 if d == -1:
111 if d == -1:
110 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
112 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
111 % (hname, funcname))
113 % (hname, funcname))
112 modname = funcname[:d]
114 modname = funcname[:d]
113 try:
115 try:
114 obj = __import__(modname)
116 obj = __import__(modname)
115 except ImportError:
117 except ImportError:
116 try:
118 try:
117 # extensions are loaded with hgext_ prefix
119 # extensions are loaded with hgext_ prefix
118 obj = __import__("hgext_%s" % modname)
120 obj = __import__("hgext_%s" % modname)
119 except ImportError:
121 except ImportError:
120 raise util.Abort(_('%s hook is invalid '
122 raise util.Abort(_('%s hook is invalid '
121 '(import of "%s" failed)') %
123 '(import of "%s" failed)') %
122 (hname, modname))
124 (hname, modname))
123 try:
125 try:
124 for p in funcname.split('.')[1:]:
126 for p in funcname.split('.')[1:]:
125 obj = getattr(obj, p)
127 obj = getattr(obj, p)
126 except AttributeError, err:
128 except AttributeError, err:
127 raise util.Abort(_('%s hook is invalid '
129 raise util.Abort(_('%s hook is invalid '
128 '("%s" is not defined)') %
130 '("%s" is not defined)') %
129 (hname, funcname))
131 (hname, funcname))
130 if not callable(obj):
132 if not callable(obj):
131 raise util.Abort(_('%s hook is invalid '
133 raise util.Abort(_('%s hook is invalid '
132 '("%s" is not callable)') %
134 '("%s" is not callable)') %
133 (hname, funcname))
135 (hname, funcname))
134 try:
136 try:
135 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
137 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
136 except (KeyboardInterrupt, util.SignalInterrupt):
138 except (KeyboardInterrupt, util.SignalInterrupt):
137 raise
139 raise
138 except Exception, exc:
140 except Exception, exc:
139 if isinstance(exc, util.Abort):
141 if isinstance(exc, util.Abort):
140 self.ui.warn(_('error: %s hook failed: %s\n') %
142 self.ui.warn(_('error: %s hook failed: %s\n') %
141 (hname, exc.args[0]))
143 (hname, exc.args[0]))
142 else:
144 else:
143 self.ui.warn(_('error: %s hook raised an exception: '
145 self.ui.warn(_('error: %s hook raised an exception: '
144 '%s\n') % (hname, exc))
146 '%s\n') % (hname, exc))
145 if throw:
147 if throw:
146 raise
148 raise
147 self.ui.print_exc()
149 self.ui.print_exc()
148 return True
150 return True
149 if r:
151 if r:
150 if throw:
152 if throw:
151 raise util.Abort(_('%s hook failed') % hname)
153 raise util.Abort(_('%s hook failed') % hname)
152 self.ui.warn(_('warning: %s hook failed\n') % hname)
154 self.ui.warn(_('warning: %s hook failed\n') % hname)
153 return r
155 return r
154
156
155 def runhook(name, cmd):
157 def runhook(name, cmd):
156 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
158 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
157 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
159 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
158 r = util.system(cmd, environ=env, cwd=self.root)
160 r = util.system(cmd, environ=env, cwd=self.root)
159 if r:
161 if r:
160 desc, r = util.explain_exit(r)
162 desc, r = util.explain_exit(r)
161 if throw:
163 if throw:
162 raise util.Abort(_('%s hook %s') % (name, desc))
164 raise util.Abort(_('%s hook %s') % (name, desc))
163 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
165 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
164 return r
166 return r
165
167
166 r = False
168 r = False
167 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
169 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
168 if hname.split(".", 1)[0] == name and cmd]
170 if hname.split(".", 1)[0] == name and cmd]
169 hooks.sort()
171 hooks.sort()
170 for hname, cmd in hooks:
172 for hname, cmd in hooks:
171 if cmd.startswith('python:'):
173 if cmd.startswith('python:'):
172 r = callhook(hname, cmd[7:].strip()) or r
174 r = callhook(hname, cmd[7:].strip()) or r
173 else:
175 else:
174 r = runhook(hname, cmd) or r
176 r = runhook(hname, cmd) or r
175 return r
177 return r
176
178
177 tag_disallowed = ':\r\n'
179 tag_disallowed = ':\r\n'
178
180
179 def tag(self, name, node, message, local, user, date):
181 def tag(self, name, node, message, local, user, date):
180 '''tag a revision with a symbolic name.
182 '''tag a revision with a symbolic name.
181
183
182 if local is True, the tag is stored in a per-repository file.
184 if local is True, the tag is stored in a per-repository file.
183 otherwise, it is stored in the .hgtags file, and a new
185 otherwise, it is stored in the .hgtags file, and a new
184 changeset is committed with the change.
186 changeset is committed with the change.
185
187
186 keyword arguments:
188 keyword arguments:
187
189
188 local: whether to store tag in non-version-controlled file
190 local: whether to store tag in non-version-controlled file
189 (default False)
191 (default False)
190
192
191 message: commit message to use if committing
193 message: commit message to use if committing
192
194
193 user: name of user to use if committing
195 user: name of user to use if committing
194
196
195 date: date tuple to use if committing'''
197 date: date tuple to use if committing'''
196
198
197 for c in self.tag_disallowed:
199 for c in self.tag_disallowed:
198 if c in name:
200 if c in name:
199 raise util.Abort(_('%r cannot be used in a tag name') % c)
201 raise util.Abort(_('%r cannot be used in a tag name') % c)
200
202
201 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
203 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
202
204
203 if local:
205 if local:
204 # local tags are stored in the current charset
206 # local tags are stored in the current charset
205 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
207 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
206 self.hook('tag', node=hex(node), tag=name, local=local)
208 self.hook('tag', node=hex(node), tag=name, local=local)
207 return
209 return
208
210
209 for x in self.status()[:5]:
211 for x in self.status()[:5]:
210 if '.hgtags' in x:
212 if '.hgtags' in x:
211 raise util.Abort(_('working copy of .hgtags is changed '
213 raise util.Abort(_('working copy of .hgtags is changed '
212 '(please commit .hgtags manually)'))
214 '(please commit .hgtags manually)'))
213
215
214 # committed tags are stored in UTF-8
216 # committed tags are stored in UTF-8
215 line = '%s %s\n' % (hex(node), util.fromlocal(name))
217 line = '%s %s\n' % (hex(node), util.fromlocal(name))
216 self.wfile('.hgtags', 'ab').write(line)
218 self.wfile('.hgtags', 'ab').write(line)
217 if self.dirstate.state('.hgtags') == '?':
219 if self.dirstate.state('.hgtags') == '?':
218 self.add(['.hgtags'])
220 self.add(['.hgtags'])
219
221
220 self.commit(['.hgtags'], message, user, date)
222 self.commit(['.hgtags'], message, user, date)
221 self.hook('tag', node=hex(node), tag=name, local=local)
223 self.hook('tag', node=hex(node), tag=name, local=local)
222
224
223 def tags(self):
225 def tags(self):
224 '''return a mapping of tag to node'''
226 '''return a mapping of tag to node'''
225 if not self.tagscache:
227 if not self.tagscache:
226 self.tagscache = {}
228 self.tagscache = {}
227
229
228 def parsetag(line, context):
230 def parsetag(line, context):
229 if not line:
231 if not line:
230 return
232 return
231 s = l.split(" ", 1)
233 s = l.split(" ", 1)
232 if len(s) != 2:
234 if len(s) != 2:
233 self.ui.warn(_("%s: cannot parse entry\n") % context)
235 self.ui.warn(_("%s: cannot parse entry\n") % context)
234 return
236 return
235 node, key = s
237 node, key = s
236 key = util.tolocal(key.strip()) # stored in UTF-8
238 key = util.tolocal(key.strip()) # stored in UTF-8
237 try:
239 try:
238 bin_n = bin(node)
240 bin_n = bin(node)
239 except TypeError:
241 except TypeError:
240 self.ui.warn(_("%s: node '%s' is not well formed\n") %
242 self.ui.warn(_("%s: node '%s' is not well formed\n") %
241 (context, node))
243 (context, node))
242 return
244 return
243 if bin_n not in self.changelog.nodemap:
245 if bin_n not in self.changelog.nodemap:
244 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
246 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
245 (context, key))
247 (context, key))
246 return
248 return
247 self.tagscache[key] = bin_n
249 self.tagscache[key] = bin_n
248
250
249 # read the tags file from each head, ending with the tip,
251 # read the tags file from each head, ending with the tip,
250 # and add each tag found to the map, with "newer" ones
252 # and add each tag found to the map, with "newer" ones
251 # taking precedence
253 # taking precedence
252 f = None
254 f = None
253 for rev, node, fnode in self._hgtagsnodes():
255 for rev, node, fnode in self._hgtagsnodes():
254 f = (f and f.filectx(fnode) or
256 f = (f and f.filectx(fnode) or
255 self.filectx('.hgtags', fileid=fnode))
257 self.filectx('.hgtags', fileid=fnode))
256 count = 0
258 count = 0
257 for l in f.data().splitlines():
259 for l in f.data().splitlines():
258 count += 1
260 count += 1
259 parsetag(l, _("%s, line %d") % (str(f), count))
261 parsetag(l, _("%s, line %d") % (str(f), count))
260
262
261 try:
263 try:
262 f = self.opener("localtags")
264 f = self.opener("localtags")
263 count = 0
265 count = 0
264 for l in f:
266 for l in f:
265 # localtags are stored in the local character set
267 # localtags are stored in the local character set
266 # while the internal tag table is stored in UTF-8
268 # while the internal tag table is stored in UTF-8
267 l = util.fromlocal(l)
269 l = util.fromlocal(l)
268 count += 1
270 count += 1
269 parsetag(l, _("localtags, line %d") % count)
271 parsetag(l, _("localtags, line %d") % count)
270 except IOError:
272 except IOError:
271 pass
273 pass
272
274
273 self.tagscache['tip'] = self.changelog.tip()
275 self.tagscache['tip'] = self.changelog.tip()
274
276
275 return self.tagscache
277 return self.tagscache
276
278
277 def _hgtagsnodes(self):
279 def _hgtagsnodes(self):
278 heads = self.heads()
280 heads = self.heads()
279 heads.reverse()
281 heads.reverse()
280 last = {}
282 last = {}
281 ret = []
283 ret = []
282 for node in heads:
284 for node in heads:
283 c = self.changectx(node)
285 c = self.changectx(node)
284 rev = c.rev()
286 rev = c.rev()
285 try:
287 try:
286 fnode = c.filenode('.hgtags')
288 fnode = c.filenode('.hgtags')
287 except repo.LookupError:
289 except repo.LookupError:
288 continue
290 continue
289 ret.append((rev, node, fnode))
291 ret.append((rev, node, fnode))
290 if fnode in last:
292 if fnode in last:
291 ret[last[fnode]] = None
293 ret[last[fnode]] = None
292 last[fnode] = len(ret) - 1
294 last[fnode] = len(ret) - 1
293 return [item for item in ret if item]
295 return [item for item in ret if item]
294
296
295 def tagslist(self):
297 def tagslist(self):
296 '''return a list of tags ordered by revision'''
298 '''return a list of tags ordered by revision'''
297 l = []
299 l = []
298 for t, n in self.tags().items():
300 for t, n in self.tags().items():
299 try:
301 try:
300 r = self.changelog.rev(n)
302 r = self.changelog.rev(n)
301 except:
303 except:
302 r = -2 # sort to the beginning of the list if unknown
304 r = -2 # sort to the beginning of the list if unknown
303 l.append((r, t, n))
305 l.append((r, t, n))
304 l.sort()
306 l.sort()
305 return [(t, n) for r, t, n in l]
307 return [(t, n) for r, t, n in l]
306
308
307 def nodetags(self, node):
309 def nodetags(self, node):
308 '''return the tags associated with a node'''
310 '''return the tags associated with a node'''
309 if not self.nodetagscache:
311 if not self.nodetagscache:
310 self.nodetagscache = {}
312 self.nodetagscache = {}
311 for t, n in self.tags().items():
313 for t, n in self.tags().items():
312 self.nodetagscache.setdefault(n, []).append(t)
314 self.nodetagscache.setdefault(n, []).append(t)
313 return self.nodetagscache.get(node, [])
315 return self.nodetagscache.get(node, [])
314
316
315 def branchtags(self):
317 def branchtags(self):
316 if self.branchcache != None:
318 if self.branchcache != None:
317 return self.branchcache
319 return self.branchcache
318
320
319 self.branchcache = {} # avoid recursion in changectx
321 self.branchcache = {} # avoid recursion in changectx
320
322
321 partial, last, lrev = self._readbranchcache()
323 partial, last, lrev = self._readbranchcache()
322
324
323 tiprev = self.changelog.count() - 1
325 tiprev = self.changelog.count() - 1
324 if lrev != tiprev:
326 if lrev != tiprev:
325 self._updatebranchcache(partial, lrev+1, tiprev+1)
327 self._updatebranchcache(partial, lrev+1, tiprev+1)
326 self._writebranchcache(partial, self.changelog.tip(), tiprev)
328 self._writebranchcache(partial, self.changelog.tip(), tiprev)
327
329
328 # the branch cache is stored on disk as UTF-8, but in the local
330 # the branch cache is stored on disk as UTF-8, but in the local
329 # charset internally
331 # charset internally
330 for k, v in partial.items():
332 for k, v in partial.items():
331 self.branchcache[util.tolocal(k)] = v
333 self.branchcache[util.tolocal(k)] = v
332 return self.branchcache
334 return self.branchcache
333
335
334 def _readbranchcache(self):
336 def _readbranchcache(self):
335 partial = {}
337 partial = {}
336 try:
338 try:
337 f = self.opener("branches.cache")
339 f = self.opener("branches.cache")
338 lines = f.read().split('\n')
340 lines = f.read().split('\n')
339 f.close()
341 f.close()
340 last, lrev = lines.pop(0).rstrip().split(" ", 1)
342 last, lrev = lines.pop(0).rstrip().split(" ", 1)
341 last, lrev = bin(last), int(lrev)
343 last, lrev = bin(last), int(lrev)
342 if not (lrev < self.changelog.count() and
344 if not (lrev < self.changelog.count() and
343 self.changelog.node(lrev) == last): # sanity check
345 self.changelog.node(lrev) == last): # sanity check
344 # invalidate the cache
346 # invalidate the cache
345 raise ValueError('Invalid branch cache: unknown tip')
347 raise ValueError('Invalid branch cache: unknown tip')
346 for l in lines:
348 for l in lines:
347 if not l: continue
349 if not l: continue
348 node, label = l.rstrip().split(" ", 1)
350 node, label = l.rstrip().split(" ", 1)
349 partial[label] = bin(node)
351 partial[label] = bin(node)
350 except (KeyboardInterrupt, util.SignalInterrupt):
352 except (KeyboardInterrupt, util.SignalInterrupt):
351 raise
353 raise
352 except Exception, inst:
354 except Exception, inst:
353 if self.ui.debugflag:
355 if self.ui.debugflag:
354 self.ui.warn(str(inst), '\n')
356 self.ui.warn(str(inst), '\n')
355 partial, last, lrev = {}, nullid, nullrev
357 partial, last, lrev = {}, nullid, nullrev
356 return partial, last, lrev
358 return partial, last, lrev
357
359
358 def _writebranchcache(self, branches, tip, tiprev):
360 def _writebranchcache(self, branches, tip, tiprev):
359 try:
361 try:
360 f = self.opener("branches.cache", "w")
362 f = self.opener("branches.cache", "w")
361 f.write("%s %s\n" % (hex(tip), tiprev))
363 f.write("%s %s\n" % (hex(tip), tiprev))
362 for label, node in branches.iteritems():
364 for label, node in branches.iteritems():
363 f.write("%s %s\n" % (hex(node), label))
365 f.write("%s %s\n" % (hex(node), label))
364 except IOError:
366 except IOError:
365 pass
367 pass
366
368
367 def _updatebranchcache(self, partial, start, end):
369 def _updatebranchcache(self, partial, start, end):
368 for r in xrange(start, end):
370 for r in xrange(start, end):
369 c = self.changectx(r)
371 c = self.changectx(r)
370 b = c.branch()
372 b = c.branch()
371 if b:
373 if b:
372 partial[b] = c.node()
374 partial[b] = c.node()
373
375
374 def lookup(self, key):
376 def lookup(self, key):
375 if key == '.':
377 if key == '.':
376 key = self.dirstate.parents()[0]
378 key = self.dirstate.parents()[0]
377 if key == nullid:
379 if key == nullid:
378 raise repo.RepoError(_("no revision checked out"))
380 raise repo.RepoError(_("no revision checked out"))
379 elif key == 'null':
381 elif key == 'null':
380 return nullid
382 return nullid
381 n = self.changelog._match(key)
383 n = self.changelog._match(key)
382 if n:
384 if n:
383 return n
385 return n
384 if key in self.tags():
386 if key in self.tags():
385 return self.tags()[key]
387 return self.tags()[key]
386 if key in self.branchtags():
388 if key in self.branchtags():
387 return self.branchtags()[key]
389 return self.branchtags()[key]
388 n = self.changelog._partialmatch(key)
390 n = self.changelog._partialmatch(key)
389 if n:
391 if n:
390 return n
392 return n
391 raise repo.RepoError(_("unknown revision '%s'") % key)
393 raise repo.RepoError(_("unknown revision '%s'") % key)
392
394
393 def dev(self):
395 def dev(self):
394 return os.lstat(self.path).st_dev
396 return os.lstat(self.path).st_dev
395
397
396 def local(self):
398 def local(self):
397 return True
399 return True
398
400
399 def join(self, f):
401 def join(self, f):
400 return os.path.join(self.path, f)
402 return os.path.join(self.path, f)
401
403
402 def sjoin(self, f):
404 def sjoin(self, f):
403 return os.path.join(self.spath, f)
405 return os.path.join(self.spath, f)
404
406
405 def wjoin(self, f):
407 def wjoin(self, f):
406 return os.path.join(self.root, f)
408 return os.path.join(self.root, f)
407
409
408 def file(self, f):
410 def file(self, f):
409 if f[0] == '/':
411 if f[0] == '/':
410 f = f[1:]
412 f = f[1:]
411 return filelog.filelog(self.sopener, f, self.revlogversion)
413 return filelog.filelog(self.sopener, f, self.revlogversion)
412
414
413 def changectx(self, changeid=None):
415 def changectx(self, changeid=None):
414 return context.changectx(self, changeid)
416 return context.changectx(self, changeid)
415
417
416 def workingctx(self):
418 def workingctx(self):
417 return context.workingctx(self)
419 return context.workingctx(self)
418
420
419 def parents(self, changeid=None):
421 def parents(self, changeid=None):
420 '''
422 '''
421 get list of changectxs for parents of changeid or working directory
423 get list of changectxs for parents of changeid or working directory
422 '''
424 '''
423 if changeid is None:
425 if changeid is None:
424 pl = self.dirstate.parents()
426 pl = self.dirstate.parents()
425 else:
427 else:
426 n = self.changelog.lookup(changeid)
428 n = self.changelog.lookup(changeid)
427 pl = self.changelog.parents(n)
429 pl = self.changelog.parents(n)
428 if pl[1] == nullid:
430 if pl[1] == nullid:
429 return [self.changectx(pl[0])]
431 return [self.changectx(pl[0])]
430 return [self.changectx(pl[0]), self.changectx(pl[1])]
432 return [self.changectx(pl[0]), self.changectx(pl[1])]
431
433
432 def filectx(self, path, changeid=None, fileid=None):
434 def filectx(self, path, changeid=None, fileid=None):
433 """changeid can be a changeset revision, node, or tag.
435 """changeid can be a changeset revision, node, or tag.
434 fileid can be a file revision or node."""
436 fileid can be a file revision or node."""
435 return context.filectx(self, path, changeid, fileid)
437 return context.filectx(self, path, changeid, fileid)
436
438
437 def getcwd(self):
439 def getcwd(self):
438 return self.dirstate.getcwd()
440 return self.dirstate.getcwd()
439
441
440 def wfile(self, f, mode='r'):
442 def wfile(self, f, mode='r'):
441 return self.wopener(f, mode)
443 return self.wopener(f, mode)
442
444
443 def wread(self, filename):
445 def wread(self, filename):
444 if self.encodepats == None:
446 if self.encodepats == None:
445 l = []
447 l = []
446 for pat, cmd in self.ui.configitems("encode"):
448 for pat, cmd in self.ui.configitems("encode"):
447 mf = util.matcher(self.root, "", [pat], [], [])[1]
449 mf = util.matcher(self.root, "", [pat], [], [])[1]
448 l.append((mf, cmd))
450 l.append((mf, cmd))
449 self.encodepats = l
451 self.encodepats = l
450
452
451 data = self.wopener(filename, 'r').read()
453 data = self.wopener(filename, 'r').read()
452
454
453 for mf, cmd in self.encodepats:
455 for mf, cmd in self.encodepats:
454 if mf(filename):
456 if mf(filename):
455 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
457 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
456 data = util.filter(data, cmd)
458 data = util.filter(data, cmd)
457 break
459 break
458
460
459 return data
461 return data
460
462
461 def wwrite(self, filename, data, fd=None):
463 def wwrite(self, filename, data, fd=None):
462 if self.decodepats == None:
464 if self.decodepats == None:
463 l = []
465 l = []
464 for pat, cmd in self.ui.configitems("decode"):
466 for pat, cmd in self.ui.configitems("decode"):
465 mf = util.matcher(self.root, "", [pat], [], [])[1]
467 mf = util.matcher(self.root, "", [pat], [], [])[1]
466 l.append((mf, cmd))
468 l.append((mf, cmd))
467 self.decodepats = l
469 self.decodepats = l
468
470
469 for mf, cmd in self.decodepats:
471 for mf, cmd in self.decodepats:
470 if mf(filename):
472 if mf(filename):
471 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
473 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
472 data = util.filter(data, cmd)
474 data = util.filter(data, cmd)
473 break
475 break
474
476
475 if fd:
477 if fd:
476 return fd.write(data)
478 return fd.write(data)
477 return self.wopener(filename, 'w').write(data)
479 return self.wopener(filename, 'w').write(data)
478
480
479 def transaction(self):
481 def transaction(self):
480 tr = self.transhandle
482 tr = self.transhandle
481 if tr != None and tr.running():
483 if tr != None and tr.running():
482 return tr.nest()
484 return tr.nest()
483
485
484 # save dirstate for rollback
486 # save dirstate for rollback
485 try:
487 try:
486 ds = self.opener("dirstate").read()
488 ds = self.opener("dirstate").read()
487 except IOError:
489 except IOError:
488 ds = ""
490 ds = ""
489 self.opener("journal.dirstate", "w").write(ds)
491 self.opener("journal.dirstate", "w").write(ds)
490
492
491 renames = [(self.sjoin("journal"), self.sjoin("undo")),
493 renames = [(self.sjoin("journal"), self.sjoin("undo")),
492 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
494 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
493 tr = transaction.transaction(self.ui.warn, self.sopener,
495 tr = transaction.transaction(self.ui.warn, self.sopener,
494 self.sjoin("journal"),
496 self.sjoin("journal"),
495 aftertrans(renames))
497 aftertrans(renames))
496 self.transhandle = tr
498 self.transhandle = tr
497 return tr
499 return tr
498
500
499 def recover(self):
501 def recover(self):
500 l = self.lock()
502 l = self.lock()
501 if os.path.exists(self.sjoin("journal")):
503 if os.path.exists(self.sjoin("journal")):
502 self.ui.status(_("rolling back interrupted transaction\n"))
504 self.ui.status(_("rolling back interrupted transaction\n"))
503 transaction.rollback(self.sopener, self.sjoin("journal"))
505 transaction.rollback(self.sopener, self.sjoin("journal"))
504 self.reload()
506 self.reload()
505 return True
507 return True
506 else:
508 else:
507 self.ui.warn(_("no interrupted transaction available\n"))
509 self.ui.warn(_("no interrupted transaction available\n"))
508 return False
510 return False
509
511
510 def rollback(self, wlock=None):
512 def rollback(self, wlock=None):
511 if not wlock:
513 if not wlock:
512 wlock = self.wlock()
514 wlock = self.wlock()
513 l = self.lock()
515 l = self.lock()
514 if os.path.exists(self.sjoin("undo")):
516 if os.path.exists(self.sjoin("undo")):
515 self.ui.status(_("rolling back last transaction\n"))
517 self.ui.status(_("rolling back last transaction\n"))
516 transaction.rollback(self.sopener, self.sjoin("undo"))
518 transaction.rollback(self.sopener, self.sjoin("undo"))
517 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
519 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
518 self.reload()
520 self.reload()
519 self.wreload()
521 self.wreload()
520 else:
522 else:
521 self.ui.warn(_("no rollback information available\n"))
523 self.ui.warn(_("no rollback information available\n"))
522
524
523 def wreload(self):
525 def wreload(self):
524 self.dirstate.read()
526 self.dirstate.read()
525
527
526 def reload(self):
528 def reload(self):
527 self.changelog.load()
529 self.changelog.load()
528 self.manifest.load()
530 self.manifest.load()
529 self.tagscache = None
531 self.tagscache = None
530 self.nodetagscache = None
532 self.nodetagscache = None
531
533
532 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
534 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
533 desc=None):
535 desc=None):
534 try:
536 try:
535 l = lock.lock(lockname, 0, releasefn, desc=desc)
537 l = lock.lock(lockname, 0, releasefn, desc=desc)
536 except lock.LockHeld, inst:
538 except lock.LockHeld, inst:
537 if not wait:
539 if not wait:
538 raise
540 raise
539 self.ui.warn(_("waiting for lock on %s held by %r\n") %
541 self.ui.warn(_("waiting for lock on %s held by %r\n") %
540 (desc, inst.locker))
542 (desc, inst.locker))
541 # default to 600 seconds timeout
543 # default to 600 seconds timeout
542 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
544 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
543 releasefn, desc=desc)
545 releasefn, desc=desc)
544 if acquirefn:
546 if acquirefn:
545 acquirefn()
547 acquirefn()
546 return l
548 return l
547
549
548 def lock(self, wait=1):
550 def lock(self, wait=1):
549 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
551 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
550 desc=_('repository %s') % self.origroot)
552 desc=_('repository %s') % self.origroot)
551
553
552 def wlock(self, wait=1):
554 def wlock(self, wait=1):
553 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
555 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
554 self.wreload,
556 self.wreload,
555 desc=_('working directory of %s') % self.origroot)
557 desc=_('working directory of %s') % self.origroot)
556
558
557 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
559 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
558 """
560 """
559 commit an individual file as part of a larger transaction
561 commit an individual file as part of a larger transaction
560 """
562 """
561
563
562 t = self.wread(fn)
564 t = self.wread(fn)
563 fl = self.file(fn)
565 fl = self.file(fn)
564 fp1 = manifest1.get(fn, nullid)
566 fp1 = manifest1.get(fn, nullid)
565 fp2 = manifest2.get(fn, nullid)
567 fp2 = manifest2.get(fn, nullid)
566
568
567 meta = {}
569 meta = {}
568 cp = self.dirstate.copied(fn)
570 cp = self.dirstate.copied(fn)
569 if cp:
571 if cp:
570 meta["copy"] = cp
572 meta["copy"] = cp
571 if not manifest2: # not a branch merge
573 if not manifest2: # not a branch merge
572 meta["copyrev"] = hex(manifest1.get(cp, nullid))
574 meta["copyrev"] = hex(manifest1.get(cp, nullid))
573 fp2 = nullid
575 fp2 = nullid
574 elif fp2 != nullid: # copied on remote side
576 elif fp2 != nullid: # copied on remote side
575 meta["copyrev"] = hex(manifest1.get(cp, nullid))
577 meta["copyrev"] = hex(manifest1.get(cp, nullid))
576 elif fp1 != nullid: # copied on local side, reversed
578 elif fp1 != nullid: # copied on local side, reversed
577 meta["copyrev"] = hex(manifest2.get(cp))
579 meta["copyrev"] = hex(manifest2.get(cp))
578 fp2 = nullid
580 fp2 = nullid
579 else: # directory rename
581 else: # directory rename
580 meta["copyrev"] = hex(manifest1.get(cp, nullid))
582 meta["copyrev"] = hex(manifest1.get(cp, nullid))
581 self.ui.debug(_(" %s: copy %s:%s\n") %
583 self.ui.debug(_(" %s: copy %s:%s\n") %
582 (fn, cp, meta["copyrev"]))
584 (fn, cp, meta["copyrev"]))
583 fp1 = nullid
585 fp1 = nullid
584 elif fp2 != nullid:
586 elif fp2 != nullid:
585 # is one parent an ancestor of the other?
587 # is one parent an ancestor of the other?
586 fpa = fl.ancestor(fp1, fp2)
588 fpa = fl.ancestor(fp1, fp2)
587 if fpa == fp1:
589 if fpa == fp1:
588 fp1, fp2 = fp2, nullid
590 fp1, fp2 = fp2, nullid
589 elif fpa == fp2:
591 elif fpa == fp2:
590 fp2 = nullid
592 fp2 = nullid
591
593
592 # is the file unmodified from the parent? report existing entry
594 # is the file unmodified from the parent? report existing entry
593 if fp2 == nullid and not fl.cmp(fp1, t):
595 if fp2 == nullid and not fl.cmp(fp1, t):
594 return fp1
596 return fp1
595
597
596 changelist.append(fn)
598 changelist.append(fn)
597 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
599 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
598
600
599 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
601 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
600 if p1 is None:
602 if p1 is None:
601 p1, p2 = self.dirstate.parents()
603 p1, p2 = self.dirstate.parents()
602 return self.commit(files=files, text=text, user=user, date=date,
604 return self.commit(files=files, text=text, user=user, date=date,
603 p1=p1, p2=p2, wlock=wlock)
605 p1=p1, p2=p2, wlock=wlock)
604
606
605 def commit(self, files=None, text="", user=None, date=None,
607 def commit(self, files=None, text="", user=None, date=None,
606 match=util.always, force=False, lock=None, wlock=None,
608 match=util.always, force=False, lock=None, wlock=None,
607 force_editor=False, p1=None, p2=None, extra={}):
609 force_editor=False, p1=None, p2=None, extra={}):
608
610
609 commit = []
611 commit = []
610 remove = []
612 remove = []
611 changed = []
613 changed = []
612 use_dirstate = (p1 is None) # not rawcommit
614 use_dirstate = (p1 is None) # not rawcommit
613 extra = extra.copy()
615 extra = extra.copy()
614
616
615 if use_dirstate:
617 if use_dirstate:
616 if files:
618 if files:
617 for f in files:
619 for f in files:
618 s = self.dirstate.state(f)
620 s = self.dirstate.state(f)
619 if s in 'nmai':
621 if s in 'nmai':
620 commit.append(f)
622 commit.append(f)
621 elif s == 'r':
623 elif s == 'r':
622 remove.append(f)
624 remove.append(f)
623 else:
625 else:
624 self.ui.warn(_("%s not tracked!\n") % f)
626 self.ui.warn(_("%s not tracked!\n") % f)
625 else:
627 else:
626 changes = self.status(match=match)[:5]
628 changes = self.status(match=match)[:5]
627 modified, added, removed, deleted, unknown = changes
629 modified, added, removed, deleted, unknown = changes
628 commit = modified + added
630 commit = modified + added
629 remove = removed
631 remove = removed
630 else:
632 else:
631 commit = files
633 commit = files
632
634
633 if use_dirstate:
635 if use_dirstate:
634 p1, p2 = self.dirstate.parents()
636 p1, p2 = self.dirstate.parents()
635 update_dirstate = True
637 update_dirstate = True
636 else:
638 else:
637 p1, p2 = p1, p2 or nullid
639 p1, p2 = p1, p2 or nullid
638 update_dirstate = (self.dirstate.parents()[0] == p1)
640 update_dirstate = (self.dirstate.parents()[0] == p1)
639
641
640 c1 = self.changelog.read(p1)
642 c1 = self.changelog.read(p1)
641 c2 = self.changelog.read(p2)
643 c2 = self.changelog.read(p2)
642 m1 = self.manifest.read(c1[0]).copy()
644 m1 = self.manifest.read(c1[0]).copy()
643 m2 = self.manifest.read(c2[0])
645 m2 = self.manifest.read(c2[0])
644
646
645 if use_dirstate:
647 if use_dirstate:
646 branchname = util.fromlocal(self.workingctx().branch())
648 branchname = util.fromlocal(self.workingctx().branch())
647 else:
649 else:
648 branchname = ""
650 branchname = ""
649
651
650 if use_dirstate:
652 if use_dirstate:
651 oldname = c1[5].get("branch", "") # stored in UTF-8
653 oldname = c1[5].get("branch", "") # stored in UTF-8
652 if not commit and not remove and not force and p2 == nullid and \
654 if not commit and not remove and not force and p2 == nullid and \
653 branchname == oldname:
655 branchname == oldname:
654 self.ui.status(_("nothing changed\n"))
656 self.ui.status(_("nothing changed\n"))
655 return None
657 return None
656
658
657 xp1 = hex(p1)
659 xp1 = hex(p1)
658 if p2 == nullid: xp2 = ''
660 if p2 == nullid: xp2 = ''
659 else: xp2 = hex(p2)
661 else: xp2 = hex(p2)
660
662
661 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
663 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
662
664
663 if not wlock:
665 if not wlock:
664 wlock = self.wlock()
666 wlock = self.wlock()
665 if not lock:
667 if not lock:
666 lock = self.lock()
668 lock = self.lock()
667 tr = self.transaction()
669 tr = self.transaction()
668
670
669 # check in files
671 # check in files
670 new = {}
672 new = {}
671 linkrev = self.changelog.count()
673 linkrev = self.changelog.count()
672 commit.sort()
674 commit.sort()
673 for f in commit:
675 for f in commit:
674 self.ui.note(f + "\n")
676 self.ui.note(f + "\n")
675 try:
677 try:
676 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
678 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
677 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
679 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
678 except IOError:
680 except IOError:
679 if use_dirstate:
681 if use_dirstate:
680 self.ui.warn(_("trouble committing %s!\n") % f)
682 self.ui.warn(_("trouble committing %s!\n") % f)
681 raise
683 raise
682 else:
684 else:
683 remove.append(f)
685 remove.append(f)
684
686
685 # update manifest
687 # update manifest
686 m1.update(new)
688 m1.update(new)
687 remove.sort()
689 remove.sort()
688
690
689 for f in remove:
691 for f in remove:
690 if f in m1:
692 if f in m1:
691 del m1[f]
693 del m1[f]
692 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
694 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
693
695
694 # add changeset
696 # add changeset
695 new = new.keys()
697 new = new.keys()
696 new.sort()
698 new.sort()
697
699
698 user = user or self.ui.username()
700 user = user or self.ui.username()
699 if not text or force_editor:
701 if not text or force_editor:
700 edittext = []
702 edittext = []
701 if text:
703 if text:
702 edittext.append(text)
704 edittext.append(text)
703 edittext.append("")
705 edittext.append("")
704 edittext.append("HG: user: %s" % user)
706 edittext.append("HG: user: %s" % user)
705 if p2 != nullid:
707 if p2 != nullid:
706 edittext.append("HG: branch merge")
708 edittext.append("HG: branch merge")
707 edittext.extend(["HG: changed %s" % f for f in changed])
709 edittext.extend(["HG: changed %s" % f for f in changed])
708 edittext.extend(["HG: removed %s" % f for f in remove])
710 edittext.extend(["HG: removed %s" % f for f in remove])
709 if not changed and not remove:
711 if not changed and not remove:
710 edittext.append("HG: no files changed")
712 edittext.append("HG: no files changed")
711 edittext.append("")
713 edittext.append("")
712 # run editor in the repository root
714 # run editor in the repository root
713 olddir = os.getcwd()
715 olddir = os.getcwd()
714 os.chdir(self.root)
716 os.chdir(self.root)
715 text = self.ui.edit("\n".join(edittext), user)
717 text = self.ui.edit("\n".join(edittext), user)
716 os.chdir(olddir)
718 os.chdir(olddir)
717
719
718 lines = [line.rstrip() for line in text.rstrip().splitlines()]
720 lines = [line.rstrip() for line in text.rstrip().splitlines()]
719 while lines and not lines[0]:
721 while lines and not lines[0]:
720 del lines[0]
722 del lines[0]
721 if not lines:
723 if not lines:
722 return None
724 return None
723 text = '\n'.join(lines)
725 text = '\n'.join(lines)
724 if branchname:
726 if branchname:
725 extra["branch"] = branchname
727 extra["branch"] = branchname
726 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
728 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
727 user, date, extra)
729 user, date, extra)
728 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
730 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
729 parent2=xp2)
731 parent2=xp2)
730 tr.close()
732 tr.close()
731
733
732 if use_dirstate or update_dirstate:
734 if use_dirstate or update_dirstate:
733 self.dirstate.setparents(n)
735 self.dirstate.setparents(n)
734 if use_dirstate:
736 if use_dirstate:
735 self.dirstate.update(new, "n")
737 self.dirstate.update(new, "n")
736 self.dirstate.forget(remove)
738 self.dirstate.forget(remove)
737
739
738 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
740 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
739 return n
741 return n
740
742
741 def walk(self, node=None, files=[], match=util.always, badmatch=None):
743 def walk(self, node=None, files=[], match=util.always, badmatch=None):
742 '''
744 '''
743 walk recursively through the directory tree or a given
745 walk recursively through the directory tree or a given
744 changeset, finding all files matched by the match
746 changeset, finding all files matched by the match
745 function
747 function
746
748
747 results are yielded in a tuple (src, filename), where src
749 results are yielded in a tuple (src, filename), where src
748 is one of:
750 is one of:
749 'f' the file was found in the directory tree
751 'f' the file was found in the directory tree
750 'm' the file was only in the dirstate and not in the tree
752 'm' the file was only in the dirstate and not in the tree
751 'b' file was not found and matched badmatch
753 'b' file was not found and matched badmatch
752 '''
754 '''
753
755
754 if node:
756 if node:
755 fdict = dict.fromkeys(files)
757 fdict = dict.fromkeys(files)
756 for fn in self.manifest.read(self.changelog.read(node)[0]):
758 for fn in self.manifest.read(self.changelog.read(node)[0]):
757 for ffn in fdict:
759 for ffn in fdict:
758 # match if the file is the exact name or a directory
760 # match if the file is the exact name or a directory
759 if ffn == fn or fn.startswith("%s/" % ffn):
761 if ffn == fn or fn.startswith("%s/" % ffn):
760 del fdict[ffn]
762 del fdict[ffn]
761 break
763 break
762 if match(fn):
764 if match(fn):
763 yield 'm', fn
765 yield 'm', fn
764 for fn in fdict:
766 for fn in fdict:
765 if badmatch and badmatch(fn):
767 if badmatch and badmatch(fn):
766 if match(fn):
768 if match(fn):
767 yield 'b', fn
769 yield 'b', fn
768 else:
770 else:
769 self.ui.warn(_('%s: No such file in rev %s\n') % (
771 self.ui.warn(_('%s: No such file in rev %s\n') % (
770 util.pathto(self.getcwd(), fn), short(node)))
772 util.pathto(self.getcwd(), fn), short(node)))
771 else:
773 else:
772 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
774 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
773 yield src, fn
775 yield src, fn
774
776
775 def status(self, node1=None, node2=None, files=[], match=util.always,
777 def status(self, node1=None, node2=None, files=[], match=util.always,
776 wlock=None, list_ignored=False, list_clean=False):
778 wlock=None, list_ignored=False, list_clean=False):
777 """return status of files between two nodes or node and working directory
779 """return status of files between two nodes or node and working directory
778
780
779 If node1 is None, use the first dirstate parent instead.
781 If node1 is None, use the first dirstate parent instead.
780 If node2 is None, compare node1 with working directory.
782 If node2 is None, compare node1 with working directory.
781 """
783 """
782
784
783 def fcmp(fn, mf):
785 def fcmp(fn, mf):
784 t1 = self.wread(fn)
786 t1 = self.wread(fn)
785 return self.file(fn).cmp(mf.get(fn, nullid), t1)
787 return self.file(fn).cmp(mf.get(fn, nullid), t1)
786
788
787 def mfmatches(node):
789 def mfmatches(node):
788 change = self.changelog.read(node)
790 change = self.changelog.read(node)
789 mf = self.manifest.read(change[0]).copy()
791 mf = self.manifest.read(change[0]).copy()
790 for fn in mf.keys():
792 for fn in mf.keys():
791 if not match(fn):
793 if not match(fn):
792 del mf[fn]
794 del mf[fn]
793 return mf
795 return mf
794
796
795 modified, added, removed, deleted, unknown = [], [], [], [], []
797 modified, added, removed, deleted, unknown = [], [], [], [], []
796 ignored, clean = [], []
798 ignored, clean = [], []
797
799
798 compareworking = False
800 compareworking = False
799 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
801 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
800 compareworking = True
802 compareworking = True
801
803
802 if not compareworking:
804 if not compareworking:
803 # read the manifest from node1 before the manifest from node2,
805 # read the manifest from node1 before the manifest from node2,
804 # so that we'll hit the manifest cache if we're going through
806 # so that we'll hit the manifest cache if we're going through
805 # all the revisions in parent->child order.
807 # all the revisions in parent->child order.
806 mf1 = mfmatches(node1)
808 mf1 = mfmatches(node1)
807
809
808 # are we comparing the working directory?
810 # are we comparing the working directory?
809 if not node2:
811 if not node2:
810 if not wlock:
812 if not wlock:
811 try:
813 try:
812 wlock = self.wlock(wait=0)
814 wlock = self.wlock(wait=0)
813 except lock.LockException:
815 except lock.LockException:
814 wlock = None
816 wlock = None
815 (lookup, modified, added, removed, deleted, unknown,
817 (lookup, modified, added, removed, deleted, unknown,
816 ignored, clean) = self.dirstate.status(files, match,
818 ignored, clean) = self.dirstate.status(files, match,
817 list_ignored, list_clean)
819 list_ignored, list_clean)
818
820
819 # are we comparing working dir against its parent?
821 # are we comparing working dir against its parent?
820 if compareworking:
822 if compareworking:
821 if lookup:
823 if lookup:
822 # do a full compare of any files that might have changed
824 # do a full compare of any files that might have changed
823 mf2 = mfmatches(self.dirstate.parents()[0])
825 mf2 = mfmatches(self.dirstate.parents()[0])
824 for f in lookup:
826 for f in lookup:
825 if fcmp(f, mf2):
827 if fcmp(f, mf2):
826 modified.append(f)
828 modified.append(f)
827 else:
829 else:
828 clean.append(f)
830 clean.append(f)
829 if wlock is not None:
831 if wlock is not None:
830 self.dirstate.update([f], "n")
832 self.dirstate.update([f], "n")
831 else:
833 else:
832 # we are comparing working dir against non-parent
834 # we are comparing working dir against non-parent
833 # generate a pseudo-manifest for the working dir
835 # generate a pseudo-manifest for the working dir
834 # XXX: create it in dirstate.py ?
836 # XXX: create it in dirstate.py ?
835 mf2 = mfmatches(self.dirstate.parents()[0])
837 mf2 = mfmatches(self.dirstate.parents()[0])
836 for f in lookup + modified + added:
838 for f in lookup + modified + added:
837 mf2[f] = ""
839 mf2[f] = ""
838 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
840 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
839 for f in removed:
841 for f in removed:
840 if f in mf2:
842 if f in mf2:
841 del mf2[f]
843 del mf2[f]
842 else:
844 else:
843 # we are comparing two revisions
845 # we are comparing two revisions
844 mf2 = mfmatches(node2)
846 mf2 = mfmatches(node2)
845
847
846 if not compareworking:
848 if not compareworking:
847 # flush lists from dirstate before comparing manifests
849 # flush lists from dirstate before comparing manifests
848 modified, added, clean = [], [], []
850 modified, added, clean = [], [], []
849
851
850 # make sure to sort the files so we talk to the disk in a
852 # make sure to sort the files so we talk to the disk in a
851 # reasonable order
853 # reasonable order
852 mf2keys = mf2.keys()
854 mf2keys = mf2.keys()
853 mf2keys.sort()
855 mf2keys.sort()
854 for fn in mf2keys:
856 for fn in mf2keys:
855 if mf1.has_key(fn):
857 if mf1.has_key(fn):
856 if mf1.flags(fn) != mf2.flags(fn) or \
858 if mf1.flags(fn) != mf2.flags(fn) or \
857 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
859 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
858 modified.append(fn)
860 modified.append(fn)
859 elif list_clean:
861 elif list_clean:
860 clean.append(fn)
862 clean.append(fn)
861 del mf1[fn]
863 del mf1[fn]
862 else:
864 else:
863 added.append(fn)
865 added.append(fn)
864
866
865 removed = mf1.keys()
867 removed = mf1.keys()
866
868
867 # sort and return results:
869 # sort and return results:
868 for l in modified, added, removed, deleted, unknown, ignored, clean:
870 for l in modified, added, removed, deleted, unknown, ignored, clean:
869 l.sort()
871 l.sort()
870 return (modified, added, removed, deleted, unknown, ignored, clean)
872 return (modified, added, removed, deleted, unknown, ignored, clean)
871
873
872 def add(self, list, wlock=None):
874 def add(self, list, wlock=None):
873 if not wlock:
875 if not wlock:
874 wlock = self.wlock()
876 wlock = self.wlock()
875 for f in list:
877 for f in list:
876 p = self.wjoin(f)
878 p = self.wjoin(f)
877 if not os.path.exists(p):
879 if not os.path.exists(p):
878 self.ui.warn(_("%s does not exist!\n") % f)
880 self.ui.warn(_("%s does not exist!\n") % f)
879 elif not os.path.isfile(p):
881 elif not os.path.isfile(p):
880 self.ui.warn(_("%s not added: only files supported currently\n")
882 self.ui.warn(_("%s not added: only files supported currently\n")
881 % f)
883 % f)
882 elif self.dirstate.state(f) in 'an':
884 elif self.dirstate.state(f) in 'an':
883 self.ui.warn(_("%s already tracked!\n") % f)
885 self.ui.warn(_("%s already tracked!\n") % f)
884 else:
886 else:
885 self.dirstate.update([f], "a")
887 self.dirstate.update([f], "a")
886
888
887 def forget(self, list, wlock=None):
889 def forget(self, list, wlock=None):
888 if not wlock:
890 if not wlock:
889 wlock = self.wlock()
891 wlock = self.wlock()
890 for f in list:
892 for f in list:
891 if self.dirstate.state(f) not in 'ai':
893 if self.dirstate.state(f) not in 'ai':
892 self.ui.warn(_("%s not added!\n") % f)
894 self.ui.warn(_("%s not added!\n") % f)
893 else:
895 else:
894 self.dirstate.forget([f])
896 self.dirstate.forget([f])
895
897
896 def remove(self, list, unlink=False, wlock=None):
898 def remove(self, list, unlink=False, wlock=None):
897 if unlink:
899 if unlink:
898 for f in list:
900 for f in list:
899 try:
901 try:
900 util.unlink(self.wjoin(f))
902 util.unlink(self.wjoin(f))
901 except OSError, inst:
903 except OSError, inst:
902 if inst.errno != errno.ENOENT:
904 if inst.errno != errno.ENOENT:
903 raise
905 raise
904 if not wlock:
906 if not wlock:
905 wlock = self.wlock()
907 wlock = self.wlock()
906 for f in list:
908 for f in list:
907 p = self.wjoin(f)
909 p = self.wjoin(f)
908 if os.path.exists(p):
910 if os.path.exists(p):
909 self.ui.warn(_("%s still exists!\n") % f)
911 self.ui.warn(_("%s still exists!\n") % f)
910 elif self.dirstate.state(f) == 'a':
912 elif self.dirstate.state(f) == 'a':
911 self.dirstate.forget([f])
913 self.dirstate.forget([f])
912 elif f not in self.dirstate:
914 elif f not in self.dirstate:
913 self.ui.warn(_("%s not tracked!\n") % f)
915 self.ui.warn(_("%s not tracked!\n") % f)
914 else:
916 else:
915 self.dirstate.update([f], "r")
917 self.dirstate.update([f], "r")
916
918
917 def undelete(self, list, wlock=None):
919 def undelete(self, list, wlock=None):
918 p = self.dirstate.parents()[0]
920 p = self.dirstate.parents()[0]
919 mn = self.changelog.read(p)[0]
921 mn = self.changelog.read(p)[0]
920 m = self.manifest.read(mn)
922 m = self.manifest.read(mn)
921 if not wlock:
923 if not wlock:
922 wlock = self.wlock()
924 wlock = self.wlock()
923 for f in list:
925 for f in list:
924 if self.dirstate.state(f) not in "r":
926 if self.dirstate.state(f) not in "r":
925 self.ui.warn("%s not removed!\n" % f)
927 self.ui.warn("%s not removed!\n" % f)
926 else:
928 else:
927 t = self.file(f).read(m[f])
929 t = self.file(f).read(m[f])
928 self.wwrite(f, t)
930 self.wwrite(f, t)
929 util.set_exec(self.wjoin(f), m.execf(f))
931 util.set_exec(self.wjoin(f), m.execf(f))
930 self.dirstate.update([f], "n")
932 self.dirstate.update([f], "n")
931
933
932 def copy(self, source, dest, wlock=None):
934 def copy(self, source, dest, wlock=None):
933 p = self.wjoin(dest)
935 p = self.wjoin(dest)
934 if not os.path.exists(p):
936 if not os.path.exists(p):
935 self.ui.warn(_("%s does not exist!\n") % dest)
937 self.ui.warn(_("%s does not exist!\n") % dest)
936 elif not os.path.isfile(p):
938 elif not os.path.isfile(p):
937 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
939 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
938 else:
940 else:
939 if not wlock:
941 if not wlock:
940 wlock = self.wlock()
942 wlock = self.wlock()
941 if self.dirstate.state(dest) == '?':
943 if self.dirstate.state(dest) == '?':
942 self.dirstate.update([dest], "a")
944 self.dirstate.update([dest], "a")
943 self.dirstate.copy(source, dest)
945 self.dirstate.copy(source, dest)
944
946
945 def heads(self, start=None):
947 def heads(self, start=None):
946 heads = self.changelog.heads(start)
948 heads = self.changelog.heads(start)
947 # sort the output in rev descending order
949 # sort the output in rev descending order
948 heads = [(-self.changelog.rev(h), h) for h in heads]
950 heads = [(-self.changelog.rev(h), h) for h in heads]
949 heads.sort()
951 heads.sort()
950 return [n for (r, n) in heads]
952 return [n for (r, n) in heads]
951
953
952 # branchlookup returns a dict giving a list of branches for
954 # branchlookup returns a dict giving a list of branches for
953 # each head. A branch is defined as the tag of a node or
955 # each head. A branch is defined as the tag of a node or
954 # the branch of the node's parents. If a node has multiple
956 # the branch of the node's parents. If a node has multiple
955 # branch tags, tags are eliminated if they are visible from other
957 # branch tags, tags are eliminated if they are visible from other
956 # branch tags.
958 # branch tags.
957 #
959 #
958 # So, for this graph: a->b->c->d->e
960 # So, for this graph: a->b->c->d->e
959 # \ /
961 # \ /
960 # aa -----/
962 # aa -----/
961 # a has tag 2.6.12
963 # a has tag 2.6.12
962 # d has tag 2.6.13
964 # d has tag 2.6.13
963 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
965 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
964 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
966 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
965 # from the list.
967 # from the list.
966 #
968 #
967 # It is possible that more than one head will have the same branch tag.
969 # It is possible that more than one head will have the same branch tag.
968 # callers need to check the result for multiple heads under the same
970 # callers need to check the result for multiple heads under the same
969 # branch tag if that is a problem for them (ie checkout of a specific
971 # branch tag if that is a problem for them (ie checkout of a specific
970 # branch).
972 # branch).
971 #
973 #
972 # passing in a specific branch will limit the depth of the search
974 # passing in a specific branch will limit the depth of the search
973 # through the parents. It won't limit the branches returned in the
975 # through the parents. It won't limit the branches returned in the
974 # result though.
976 # result though.
975 def branchlookup(self, heads=None, branch=None):
977 def branchlookup(self, heads=None, branch=None):
976 if not heads:
978 if not heads:
977 heads = self.heads()
979 heads = self.heads()
978 headt = [ h for h in heads ]
980 headt = [ h for h in heads ]
979 chlog = self.changelog
981 chlog = self.changelog
980 branches = {}
982 branches = {}
981 merges = []
983 merges = []
982 seenmerge = {}
984 seenmerge = {}
983
985
984 # traverse the tree once for each head, recording in the branches
986 # traverse the tree once for each head, recording in the branches
985 # dict which tags are visible from this head. The branches
987 # dict which tags are visible from this head. The branches
986 # dict also records which tags are visible from each tag
988 # dict also records which tags are visible from each tag
987 # while we traverse.
989 # while we traverse.
988 while headt or merges:
990 while headt or merges:
989 if merges:
991 if merges:
990 n, found = merges.pop()
992 n, found = merges.pop()
991 visit = [n]
993 visit = [n]
992 else:
994 else:
993 h = headt.pop()
995 h = headt.pop()
994 visit = [h]
996 visit = [h]
995 found = [h]
997 found = [h]
996 seen = {}
998 seen = {}
997 while visit:
999 while visit:
998 n = visit.pop()
1000 n = visit.pop()
999 if n in seen:
1001 if n in seen:
1000 continue
1002 continue
1001 pp = chlog.parents(n)
1003 pp = chlog.parents(n)
1002 tags = self.nodetags(n)
1004 tags = self.nodetags(n)
1003 if tags:
1005 if tags:
1004 for x in tags:
1006 for x in tags:
1005 if x == 'tip':
1007 if x == 'tip':
1006 continue
1008 continue
1007 for f in found:
1009 for f in found:
1008 branches.setdefault(f, {})[n] = 1
1010 branches.setdefault(f, {})[n] = 1
1009 branches.setdefault(n, {})[n] = 1
1011 branches.setdefault(n, {})[n] = 1
1010 break
1012 break
1011 if n not in found:
1013 if n not in found:
1012 found.append(n)
1014 found.append(n)
1013 if branch in tags:
1015 if branch in tags:
1014 continue
1016 continue
1015 seen[n] = 1
1017 seen[n] = 1
1016 if pp[1] != nullid and n not in seenmerge:
1018 if pp[1] != nullid and n not in seenmerge:
1017 merges.append((pp[1], [x for x in found]))
1019 merges.append((pp[1], [x for x in found]))
1018 seenmerge[n] = 1
1020 seenmerge[n] = 1
1019 if pp[0] != nullid:
1021 if pp[0] != nullid:
1020 visit.append(pp[0])
1022 visit.append(pp[0])
1021 # traverse the branches dict, eliminating branch tags from each
1023 # traverse the branches dict, eliminating branch tags from each
1022 # head that are visible from another branch tag for that head.
1024 # head that are visible from another branch tag for that head.
1023 out = {}
1025 out = {}
1024 viscache = {}
1026 viscache = {}
1025 for h in heads:
1027 for h in heads:
1026 def visible(node):
1028 def visible(node):
1027 if node in viscache:
1029 if node in viscache:
1028 return viscache[node]
1030 return viscache[node]
1029 ret = {}
1031 ret = {}
1030 visit = [node]
1032 visit = [node]
1031 while visit:
1033 while visit:
1032 x = visit.pop()
1034 x = visit.pop()
1033 if x in viscache:
1035 if x in viscache:
1034 ret.update(viscache[x])
1036 ret.update(viscache[x])
1035 elif x not in ret:
1037 elif x not in ret:
1036 ret[x] = 1
1038 ret[x] = 1
1037 if x in branches:
1039 if x in branches:
1038 visit[len(visit):] = branches[x].keys()
1040 visit[len(visit):] = branches[x].keys()
1039 viscache[node] = ret
1041 viscache[node] = ret
1040 return ret
1042 return ret
1041 if h not in branches:
1043 if h not in branches:
1042 continue
1044 continue
1043 # O(n^2), but somewhat limited. This only searches the
1045 # O(n^2), but somewhat limited. This only searches the
1044 # tags visible from a specific head, not all the tags in the
1046 # tags visible from a specific head, not all the tags in the
1045 # whole repo.
1047 # whole repo.
1046 for b in branches[h]:
1048 for b in branches[h]:
1047 vis = False
1049 vis = False
1048 for bb in branches[h].keys():
1050 for bb in branches[h].keys():
1049 if b != bb:
1051 if b != bb:
1050 if b in visible(bb):
1052 if b in visible(bb):
1051 vis = True
1053 vis = True
1052 break
1054 break
1053 if not vis:
1055 if not vis:
1054 l = out.setdefault(h, [])
1056 l = out.setdefault(h, [])
1055 l[len(l):] = self.nodetags(b)
1057 l[len(l):] = self.nodetags(b)
1056 return out
1058 return out
1057
1059
1058 def branches(self, nodes):
1060 def branches(self, nodes):
1059 if not nodes:
1061 if not nodes:
1060 nodes = [self.changelog.tip()]
1062 nodes = [self.changelog.tip()]
1061 b = []
1063 b = []
1062 for n in nodes:
1064 for n in nodes:
1063 t = n
1065 t = n
1064 while 1:
1066 while 1:
1065 p = self.changelog.parents(n)
1067 p = self.changelog.parents(n)
1066 if p[1] != nullid or p[0] == nullid:
1068 if p[1] != nullid or p[0] == nullid:
1067 b.append((t, n, p[0], p[1]))
1069 b.append((t, n, p[0], p[1]))
1068 break
1070 break
1069 n = p[0]
1071 n = p[0]
1070 return b
1072 return b
1071
1073
1072 def between(self, pairs):
1074 def between(self, pairs):
1073 r = []
1075 r = []
1074
1076
1075 for top, bottom in pairs:
1077 for top, bottom in pairs:
1076 n, l, i = top, [], 0
1078 n, l, i = top, [], 0
1077 f = 1
1079 f = 1
1078
1080
1079 while n != bottom:
1081 while n != bottom:
1080 p = self.changelog.parents(n)[0]
1082 p = self.changelog.parents(n)[0]
1081 if i == f:
1083 if i == f:
1082 l.append(n)
1084 l.append(n)
1083 f = f * 2
1085 f = f * 2
1084 n = p
1086 n = p
1085 i += 1
1087 i += 1
1086
1088
1087 r.append(l)
1089 r.append(l)
1088
1090
1089 return r
1091 return r
1090
1092
1091 def findincoming(self, remote, base=None, heads=None, force=False):
1093 def findincoming(self, remote, base=None, heads=None, force=False):
1092 """Return list of roots of the subsets of missing nodes from remote
1094 """Return list of roots of the subsets of missing nodes from remote
1093
1095
1094 If base dict is specified, assume that these nodes and their parents
1096 If base dict is specified, assume that these nodes and their parents
1095 exist on the remote side and that no child of a node of base exists
1097 exist on the remote side and that no child of a node of base exists
1096 in both remote and self.
1098 in both remote and self.
1097 Furthermore base will be updated to include the nodes that exists
1099 Furthermore base will be updated to include the nodes that exists
1098 in self and remote but no children exists in self and remote.
1100 in self and remote but no children exists in self and remote.
1099 If a list of heads is specified, return only nodes which are heads
1101 If a list of heads is specified, return only nodes which are heads
1100 or ancestors of these heads.
1102 or ancestors of these heads.
1101
1103
1102 All the ancestors of base are in self and in remote.
1104 All the ancestors of base are in self and in remote.
1103 All the descendants of the list returned are missing in self.
1105 All the descendants of the list returned are missing in self.
1104 (and so we know that the rest of the nodes are missing in remote, see
1106 (and so we know that the rest of the nodes are missing in remote, see
1105 outgoing)
1107 outgoing)
1106 """
1108 """
1107 m = self.changelog.nodemap
1109 m = self.changelog.nodemap
1108 search = []
1110 search = []
1109 fetch = {}
1111 fetch = {}
1110 seen = {}
1112 seen = {}
1111 seenbranch = {}
1113 seenbranch = {}
1112 if base == None:
1114 if base == None:
1113 base = {}
1115 base = {}
1114
1116
1115 if not heads:
1117 if not heads:
1116 heads = remote.heads()
1118 heads = remote.heads()
1117
1119
1118 if self.changelog.tip() == nullid:
1120 if self.changelog.tip() == nullid:
1119 base[nullid] = 1
1121 base[nullid] = 1
1120 if heads != [nullid]:
1122 if heads != [nullid]:
1121 return [nullid]
1123 return [nullid]
1122 return []
1124 return []
1123
1125
1124 # assume we're closer to the tip than the root
1126 # assume we're closer to the tip than the root
1125 # and start by examining the heads
1127 # and start by examining the heads
1126 self.ui.status(_("searching for changes\n"))
1128 self.ui.status(_("searching for changes\n"))
1127
1129
1128 unknown = []
1130 unknown = []
1129 for h in heads:
1131 for h in heads:
1130 if h not in m:
1132 if h not in m:
1131 unknown.append(h)
1133 unknown.append(h)
1132 else:
1134 else:
1133 base[h] = 1
1135 base[h] = 1
1134
1136
1135 if not unknown:
1137 if not unknown:
1136 return []
1138 return []
1137
1139
1138 req = dict.fromkeys(unknown)
1140 req = dict.fromkeys(unknown)
1139 reqcnt = 0
1141 reqcnt = 0
1140
1142
1141 # search through remote branches
1143 # search through remote branches
1142 # a 'branch' here is a linear segment of history, with four parts:
1144 # a 'branch' here is a linear segment of history, with four parts:
1143 # head, root, first parent, second parent
1145 # head, root, first parent, second parent
1144 # (a branch always has two parents (or none) by definition)
1146 # (a branch always has two parents (or none) by definition)
1145 unknown = remote.branches(unknown)
1147 unknown = remote.branches(unknown)
1146 while unknown:
1148 while unknown:
1147 r = []
1149 r = []
1148 while unknown:
1150 while unknown:
1149 n = unknown.pop(0)
1151 n = unknown.pop(0)
1150 if n[0] in seen:
1152 if n[0] in seen:
1151 continue
1153 continue
1152
1154
1153 self.ui.debug(_("examining %s:%s\n")
1155 self.ui.debug(_("examining %s:%s\n")
1154 % (short(n[0]), short(n[1])))
1156 % (short(n[0]), short(n[1])))
1155 if n[0] == nullid: # found the end of the branch
1157 if n[0] == nullid: # found the end of the branch
1156 pass
1158 pass
1157 elif n in seenbranch:
1159 elif n in seenbranch:
1158 self.ui.debug(_("branch already found\n"))
1160 self.ui.debug(_("branch already found\n"))
1159 continue
1161 continue
1160 elif n[1] and n[1] in m: # do we know the base?
1162 elif n[1] and n[1] in m: # do we know the base?
1161 self.ui.debug(_("found incomplete branch %s:%s\n")
1163 self.ui.debug(_("found incomplete branch %s:%s\n")
1162 % (short(n[0]), short(n[1])))
1164 % (short(n[0]), short(n[1])))
1163 search.append(n) # schedule branch range for scanning
1165 search.append(n) # schedule branch range for scanning
1164 seenbranch[n] = 1
1166 seenbranch[n] = 1
1165 else:
1167 else:
1166 if n[1] not in seen and n[1] not in fetch:
1168 if n[1] not in seen and n[1] not in fetch:
1167 if n[2] in m and n[3] in m:
1169 if n[2] in m and n[3] in m:
1168 self.ui.debug(_("found new changeset %s\n") %
1170 self.ui.debug(_("found new changeset %s\n") %
1169 short(n[1]))
1171 short(n[1]))
1170 fetch[n[1]] = 1 # earliest unknown
1172 fetch[n[1]] = 1 # earliest unknown
1171 for p in n[2:4]:
1173 for p in n[2:4]:
1172 if p in m:
1174 if p in m:
1173 base[p] = 1 # latest known
1175 base[p] = 1 # latest known
1174
1176
1175 for p in n[2:4]:
1177 for p in n[2:4]:
1176 if p not in req and p not in m:
1178 if p not in req and p not in m:
1177 r.append(p)
1179 r.append(p)
1178 req[p] = 1
1180 req[p] = 1
1179 seen[n[0]] = 1
1181 seen[n[0]] = 1
1180
1182
1181 if r:
1183 if r:
1182 reqcnt += 1
1184 reqcnt += 1
1183 self.ui.debug(_("request %d: %s\n") %
1185 self.ui.debug(_("request %d: %s\n") %
1184 (reqcnt, " ".join(map(short, r))))
1186 (reqcnt, " ".join(map(short, r))))
1185 for p in xrange(0, len(r), 10):
1187 for p in xrange(0, len(r), 10):
1186 for b in remote.branches(r[p:p+10]):
1188 for b in remote.branches(r[p:p+10]):
1187 self.ui.debug(_("received %s:%s\n") %
1189 self.ui.debug(_("received %s:%s\n") %
1188 (short(b[0]), short(b[1])))
1190 (short(b[0]), short(b[1])))
1189 unknown.append(b)
1191 unknown.append(b)
1190
1192
1191 # do binary search on the branches we found
1193 # do binary search on the branches we found
1192 while search:
1194 while search:
1193 n = search.pop(0)
1195 n = search.pop(0)
1194 reqcnt += 1
1196 reqcnt += 1
1195 l = remote.between([(n[0], n[1])])[0]
1197 l = remote.between([(n[0], n[1])])[0]
1196 l.append(n[1])
1198 l.append(n[1])
1197 p = n[0]
1199 p = n[0]
1198 f = 1
1200 f = 1
1199 for i in l:
1201 for i in l:
1200 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1202 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1201 if i in m:
1203 if i in m:
1202 if f <= 2:
1204 if f <= 2:
1203 self.ui.debug(_("found new branch changeset %s\n") %
1205 self.ui.debug(_("found new branch changeset %s\n") %
1204 short(p))
1206 short(p))
1205 fetch[p] = 1
1207 fetch[p] = 1
1206 base[i] = 1
1208 base[i] = 1
1207 else:
1209 else:
1208 self.ui.debug(_("narrowed branch search to %s:%s\n")
1210 self.ui.debug(_("narrowed branch search to %s:%s\n")
1209 % (short(p), short(i)))
1211 % (short(p), short(i)))
1210 search.append((p, i))
1212 search.append((p, i))
1211 break
1213 break
1212 p, f = i, f * 2
1214 p, f = i, f * 2
1213
1215
1214 # sanity check our fetch list
1216 # sanity check our fetch list
1215 for f in fetch.keys():
1217 for f in fetch.keys():
1216 if f in m:
1218 if f in m:
1217 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1219 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1218
1220
1219 if base.keys() == [nullid]:
1221 if base.keys() == [nullid]:
1220 if force:
1222 if force:
1221 self.ui.warn(_("warning: repository is unrelated\n"))
1223 self.ui.warn(_("warning: repository is unrelated\n"))
1222 else:
1224 else:
1223 raise util.Abort(_("repository is unrelated"))
1225 raise util.Abort(_("repository is unrelated"))
1224
1226
1225 self.ui.debug(_("found new changesets starting at ") +
1227 self.ui.debug(_("found new changesets starting at ") +
1226 " ".join([short(f) for f in fetch]) + "\n")
1228 " ".join([short(f) for f in fetch]) + "\n")
1227
1229
1228 self.ui.debug(_("%d total queries\n") % reqcnt)
1230 self.ui.debug(_("%d total queries\n") % reqcnt)
1229
1231
1230 return fetch.keys()
1232 return fetch.keys()
1231
1233
1232 def findoutgoing(self, remote, base=None, heads=None, force=False):
1234 def findoutgoing(self, remote, base=None, heads=None, force=False):
1233 """Return list of nodes that are roots of subsets not in remote
1235 """Return list of nodes that are roots of subsets not in remote
1234
1236
1235 If base dict is specified, assume that these nodes and their parents
1237 If base dict is specified, assume that these nodes and their parents
1236 exist on the remote side.
1238 exist on the remote side.
1237 If a list of heads is specified, return only nodes which are heads
1239 If a list of heads is specified, return only nodes which are heads
1238 or ancestors of these heads, and return a second element which
1240 or ancestors of these heads, and return a second element which
1239 contains all remote heads which get new children.
1241 contains all remote heads which get new children.
1240 """
1242 """
1241 if base == None:
1243 if base == None:
1242 base = {}
1244 base = {}
1243 self.findincoming(remote, base, heads, force=force)
1245 self.findincoming(remote, base, heads, force=force)
1244
1246
1245 self.ui.debug(_("common changesets up to ")
1247 self.ui.debug(_("common changesets up to ")
1246 + " ".join(map(short, base.keys())) + "\n")
1248 + " ".join(map(short, base.keys())) + "\n")
1247
1249
1248 remain = dict.fromkeys(self.changelog.nodemap)
1250 remain = dict.fromkeys(self.changelog.nodemap)
1249
1251
1250 # prune everything remote has from the tree
1252 # prune everything remote has from the tree
1251 del remain[nullid]
1253 del remain[nullid]
1252 remove = base.keys()
1254 remove = base.keys()
1253 while remove:
1255 while remove:
1254 n = remove.pop(0)
1256 n = remove.pop(0)
1255 if n in remain:
1257 if n in remain:
1256 del remain[n]
1258 del remain[n]
1257 for p in self.changelog.parents(n):
1259 for p in self.changelog.parents(n):
1258 remove.append(p)
1260 remove.append(p)
1259
1261
1260 # find every node whose parents have been pruned
1262 # find every node whose parents have been pruned
1261 subset = []
1263 subset = []
1262 # find every remote head that will get new children
1264 # find every remote head that will get new children
1263 updated_heads = {}
1265 updated_heads = {}
1264 for n in remain:
1266 for n in remain:
1265 p1, p2 = self.changelog.parents(n)
1267 p1, p2 = self.changelog.parents(n)
1266 if p1 not in remain and p2 not in remain:
1268 if p1 not in remain and p2 not in remain:
1267 subset.append(n)
1269 subset.append(n)
1268 if heads:
1270 if heads:
1269 if p1 in heads:
1271 if p1 in heads:
1270 updated_heads[p1] = True
1272 updated_heads[p1] = True
1271 if p2 in heads:
1273 if p2 in heads:
1272 updated_heads[p2] = True
1274 updated_heads[p2] = True
1273
1275
1274 # this is the set of all roots we have to push
1276 # this is the set of all roots we have to push
1275 if heads:
1277 if heads:
1276 return subset, updated_heads.keys()
1278 return subset, updated_heads.keys()
1277 else:
1279 else:
1278 return subset
1280 return subset
1279
1281
1280 def pull(self, remote, heads=None, force=False, lock=None):
1282 def pull(self, remote, heads=None, force=False, lock=None):
1281 mylock = False
1283 mylock = False
1282 if not lock:
1284 if not lock:
1283 lock = self.lock()
1285 lock = self.lock()
1284 mylock = True
1286 mylock = True
1285
1287
1286 try:
1288 try:
1287 fetch = self.findincoming(remote, force=force)
1289 fetch = self.findincoming(remote, force=force)
1288 if fetch == [nullid]:
1290 if fetch == [nullid]:
1289 self.ui.status(_("requesting all changes\n"))
1291 self.ui.status(_("requesting all changes\n"))
1290
1292
1291 if not fetch:
1293 if not fetch:
1292 self.ui.status(_("no changes found\n"))
1294 self.ui.status(_("no changes found\n"))
1293 return 0
1295 return 0
1294
1296
1295 if heads is None:
1297 if heads is None:
1296 cg = remote.changegroup(fetch, 'pull')
1298 cg = remote.changegroup(fetch, 'pull')
1297 else:
1299 else:
1298 if 'changegroupsubset' not in remote.capabilities:
1300 if 'changegroupsubset' not in remote.capabilities:
1299 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1301 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1300 cg = remote.changegroupsubset(fetch, heads, 'pull')
1302 cg = remote.changegroupsubset(fetch, heads, 'pull')
1301 return self.addchangegroup(cg, 'pull', remote.url())
1303 return self.addchangegroup(cg, 'pull', remote.url())
1302 finally:
1304 finally:
1303 if mylock:
1305 if mylock:
1304 lock.release()
1306 lock.release()
1305
1307
1306 def push(self, remote, force=False, revs=None):
1308 def push(self, remote, force=False, revs=None):
1307 # there are two ways to push to remote repo:
1309 # there are two ways to push to remote repo:
1308 #
1310 #
1309 # addchangegroup assumes local user can lock remote
1311 # addchangegroup assumes local user can lock remote
1310 # repo (local filesystem, old ssh servers).
1312 # repo (local filesystem, old ssh servers).
1311 #
1313 #
1312 # unbundle assumes local user cannot lock remote repo (new ssh
1314 # unbundle assumes local user cannot lock remote repo (new ssh
1313 # servers, http servers).
1315 # servers, http servers).
1314
1316
1315 if remote.capable('unbundle'):
1317 if remote.capable('unbundle'):
1316 return self.push_unbundle(remote, force, revs)
1318 return self.push_unbundle(remote, force, revs)
1317 return self.push_addchangegroup(remote, force, revs)
1319 return self.push_addchangegroup(remote, force, revs)
1318
1320
1319 def prepush(self, remote, force, revs):
1321 def prepush(self, remote, force, revs):
1320 base = {}
1322 base = {}
1321 remote_heads = remote.heads()
1323 remote_heads = remote.heads()
1322 inc = self.findincoming(remote, base, remote_heads, force=force)
1324 inc = self.findincoming(remote, base, remote_heads, force=force)
1323
1325
1324 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1326 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1325 if revs is not None:
1327 if revs is not None:
1326 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1328 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1327 else:
1329 else:
1328 bases, heads = update, self.changelog.heads()
1330 bases, heads = update, self.changelog.heads()
1329
1331
1330 if not bases:
1332 if not bases:
1331 self.ui.status(_("no changes found\n"))
1333 self.ui.status(_("no changes found\n"))
1332 return None, 1
1334 return None, 1
1333 elif not force:
1335 elif not force:
1334 # check if we're creating new remote heads
1336 # check if we're creating new remote heads
1335 # to be a remote head after push, node must be either
1337 # to be a remote head after push, node must be either
1336 # - unknown locally
1338 # - unknown locally
1337 # - a local outgoing head descended from update
1339 # - a local outgoing head descended from update
1338 # - a remote head that's known locally and not
1340 # - a remote head that's known locally and not
1339 # ancestral to an outgoing head
1341 # ancestral to an outgoing head
1340
1342
1341 warn = 0
1343 warn = 0
1342
1344
1343 if remote_heads == [nullid]:
1345 if remote_heads == [nullid]:
1344 warn = 0
1346 warn = 0
1345 elif not revs and len(heads) > len(remote_heads):
1347 elif not revs and len(heads) > len(remote_heads):
1346 warn = 1
1348 warn = 1
1347 else:
1349 else:
1348 newheads = list(heads)
1350 newheads = list(heads)
1349 for r in remote_heads:
1351 for r in remote_heads:
1350 if r in self.changelog.nodemap:
1352 if r in self.changelog.nodemap:
1351 desc = self.changelog.heads(r)
1353 desc = self.changelog.heads(r)
1352 l = [h for h in heads if h in desc]
1354 l = [h for h in heads if h in desc]
1353 if not l:
1355 if not l:
1354 newheads.append(r)
1356 newheads.append(r)
1355 else:
1357 else:
1356 newheads.append(r)
1358 newheads.append(r)
1357 if len(newheads) > len(remote_heads):
1359 if len(newheads) > len(remote_heads):
1358 warn = 1
1360 warn = 1
1359
1361
1360 if warn:
1362 if warn:
1361 self.ui.warn(_("abort: push creates new remote branches!\n"))
1363 self.ui.warn(_("abort: push creates new remote branches!\n"))
1362 self.ui.status(_("(did you forget to merge?"
1364 self.ui.status(_("(did you forget to merge?"
1363 " use push -f to force)\n"))
1365 " use push -f to force)\n"))
1364 return None, 1
1366 return None, 1
1365 elif inc:
1367 elif inc:
1366 self.ui.warn(_("note: unsynced remote changes!\n"))
1368 self.ui.warn(_("note: unsynced remote changes!\n"))
1367
1369
1368
1370
1369 if revs is None:
1371 if revs is None:
1370 cg = self.changegroup(update, 'push')
1372 cg = self.changegroup(update, 'push')
1371 else:
1373 else:
1372 cg = self.changegroupsubset(update, revs, 'push')
1374 cg = self.changegroupsubset(update, revs, 'push')
1373 return cg, remote_heads
1375 return cg, remote_heads
1374
1376
1375 def push_addchangegroup(self, remote, force, revs):
1377 def push_addchangegroup(self, remote, force, revs):
1376 lock = remote.lock()
1378 lock = remote.lock()
1377
1379
1378 ret = self.prepush(remote, force, revs)
1380 ret = self.prepush(remote, force, revs)
1379 if ret[0] is not None:
1381 if ret[0] is not None:
1380 cg, remote_heads = ret
1382 cg, remote_heads = ret
1381 return remote.addchangegroup(cg, 'push', self.url())
1383 return remote.addchangegroup(cg, 'push', self.url())
1382 return ret[1]
1384 return ret[1]
1383
1385
1384 def push_unbundle(self, remote, force, revs):
1386 def push_unbundle(self, remote, force, revs):
1385 # local repo finds heads on server, finds out what revs it
1387 # local repo finds heads on server, finds out what revs it
1386 # must push. once revs transferred, if server finds it has
1388 # must push. once revs transferred, if server finds it has
1387 # different heads (someone else won commit/push race), server
1389 # different heads (someone else won commit/push race), server
1388 # aborts.
1390 # aborts.
1389
1391
1390 ret = self.prepush(remote, force, revs)
1392 ret = self.prepush(remote, force, revs)
1391 if ret[0] is not None:
1393 if ret[0] is not None:
1392 cg, remote_heads = ret
1394 cg, remote_heads = ret
1393 if force: remote_heads = ['force']
1395 if force: remote_heads = ['force']
1394 return remote.unbundle(cg, remote_heads, 'push')
1396 return remote.unbundle(cg, remote_heads, 'push')
1395 return ret[1]
1397 return ret[1]
1396
1398
1397 def changegroupinfo(self, nodes):
1399 def changegroupinfo(self, nodes):
1398 self.ui.note(_("%d changesets found\n") % len(nodes))
1400 self.ui.note(_("%d changesets found\n") % len(nodes))
1399 if self.ui.debugflag:
1401 if self.ui.debugflag:
1400 self.ui.debug(_("List of changesets:\n"))
1402 self.ui.debug(_("List of changesets:\n"))
1401 for node in nodes:
1403 for node in nodes:
1402 self.ui.debug("%s\n" % hex(node))
1404 self.ui.debug("%s\n" % hex(node))
1403
1405
1404 def changegroupsubset(self, bases, heads, source):
1406 def changegroupsubset(self, bases, heads, source):
1405 """This function generates a changegroup consisting of all the nodes
1407 """This function generates a changegroup consisting of all the nodes
1406 that are descendents of any of the bases, and ancestors of any of
1408 that are descendents of any of the bases, and ancestors of any of
1407 the heads.
1409 the heads.
1408
1410
1409 It is fairly complex as determining which filenodes and which
1411 It is fairly complex as determining which filenodes and which
1410 manifest nodes need to be included for the changeset to be complete
1412 manifest nodes need to be included for the changeset to be complete
1411 is non-trivial.
1413 is non-trivial.
1412
1414
1413 Another wrinkle is doing the reverse, figuring out which changeset in
1415 Another wrinkle is doing the reverse, figuring out which changeset in
1414 the changegroup a particular filenode or manifestnode belongs to."""
1416 the changegroup a particular filenode or manifestnode belongs to."""
1415
1417
1416 self.hook('preoutgoing', throw=True, source=source)
1418 self.hook('preoutgoing', throw=True, source=source)
1417
1419
1418 # Set up some initial variables
1420 # Set up some initial variables
1419 # Make it easy to refer to self.changelog
1421 # Make it easy to refer to self.changelog
1420 cl = self.changelog
1422 cl = self.changelog
1421 # msng is short for missing - compute the list of changesets in this
1423 # msng is short for missing - compute the list of changesets in this
1422 # changegroup.
1424 # changegroup.
1423 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1425 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1424 self.changegroupinfo(msng_cl_lst)
1426 self.changegroupinfo(msng_cl_lst)
1425 # Some bases may turn out to be superfluous, and some heads may be
1427 # Some bases may turn out to be superfluous, and some heads may be
1426 # too. nodesbetween will return the minimal set of bases and heads
1428 # too. nodesbetween will return the minimal set of bases and heads
1427 # necessary to re-create the changegroup.
1429 # necessary to re-create the changegroup.
1428
1430
1429 # Known heads are the list of heads that it is assumed the recipient
1431 # Known heads are the list of heads that it is assumed the recipient
1430 # of this changegroup will know about.
1432 # of this changegroup will know about.
1431 knownheads = {}
1433 knownheads = {}
1432 # We assume that all parents of bases are known heads.
1434 # We assume that all parents of bases are known heads.
1433 for n in bases:
1435 for n in bases:
1434 for p in cl.parents(n):
1436 for p in cl.parents(n):
1435 if p != nullid:
1437 if p != nullid:
1436 knownheads[p] = 1
1438 knownheads[p] = 1
1437 knownheads = knownheads.keys()
1439 knownheads = knownheads.keys()
1438 if knownheads:
1440 if knownheads:
1439 # Now that we know what heads are known, we can compute which
1441 # Now that we know what heads are known, we can compute which
1440 # changesets are known. The recipient must know about all
1442 # changesets are known. The recipient must know about all
1441 # changesets required to reach the known heads from the null
1443 # changesets required to reach the known heads from the null
1442 # changeset.
1444 # changeset.
1443 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1445 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1444 junk = None
1446 junk = None
1445 # Transform the list into an ersatz set.
1447 # Transform the list into an ersatz set.
1446 has_cl_set = dict.fromkeys(has_cl_set)
1448 has_cl_set = dict.fromkeys(has_cl_set)
1447 else:
1449 else:
1448 # If there were no known heads, the recipient cannot be assumed to
1450 # If there were no known heads, the recipient cannot be assumed to
1449 # know about any changesets.
1451 # know about any changesets.
1450 has_cl_set = {}
1452 has_cl_set = {}
1451
1453
1452 # Make it easy to refer to self.manifest
1454 # Make it easy to refer to self.manifest
1453 mnfst = self.manifest
1455 mnfst = self.manifest
1454 # We don't know which manifests are missing yet
1456 # We don't know which manifests are missing yet
1455 msng_mnfst_set = {}
1457 msng_mnfst_set = {}
1456 # Nor do we know which filenodes are missing.
1458 # Nor do we know which filenodes are missing.
1457 msng_filenode_set = {}
1459 msng_filenode_set = {}
1458
1460
1459 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1461 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1460 junk = None
1462 junk = None
1461
1463
1462 # A changeset always belongs to itself, so the changenode lookup
1464 # A changeset always belongs to itself, so the changenode lookup
1463 # function for a changenode is identity.
1465 # function for a changenode is identity.
1464 def identity(x):
1466 def identity(x):
1465 return x
1467 return x
1466
1468
1467 # A function generating function. Sets up an environment for the
1469 # A function generating function. Sets up an environment for the
1468 # inner function.
1470 # inner function.
1469 def cmp_by_rev_func(revlog):
1471 def cmp_by_rev_func(revlog):
1470 # Compare two nodes by their revision number in the environment's
1472 # Compare two nodes by their revision number in the environment's
1471 # revision history. Since the revision number both represents the
1473 # revision history. Since the revision number both represents the
1472 # most efficient order to read the nodes in, and represents a
1474 # most efficient order to read the nodes in, and represents a
1473 # topological sorting of the nodes, this function is often useful.
1475 # topological sorting of the nodes, this function is often useful.
1474 def cmp_by_rev(a, b):
1476 def cmp_by_rev(a, b):
1475 return cmp(revlog.rev(a), revlog.rev(b))
1477 return cmp(revlog.rev(a), revlog.rev(b))
1476 return cmp_by_rev
1478 return cmp_by_rev
1477
1479
1478 # If we determine that a particular file or manifest node must be a
1480 # If we determine that a particular file or manifest node must be a
1479 # node that the recipient of the changegroup will already have, we can
1481 # node that the recipient of the changegroup will already have, we can
1480 # also assume the recipient will have all the parents. This function
1482 # also assume the recipient will have all the parents. This function
1481 # prunes them from the set of missing nodes.
1483 # prunes them from the set of missing nodes.
1482 def prune_parents(revlog, hasset, msngset):
1484 def prune_parents(revlog, hasset, msngset):
1483 haslst = hasset.keys()
1485 haslst = hasset.keys()
1484 haslst.sort(cmp_by_rev_func(revlog))
1486 haslst.sort(cmp_by_rev_func(revlog))
1485 for node in haslst:
1487 for node in haslst:
1486 parentlst = [p for p in revlog.parents(node) if p != nullid]
1488 parentlst = [p for p in revlog.parents(node) if p != nullid]
1487 while parentlst:
1489 while parentlst:
1488 n = parentlst.pop()
1490 n = parentlst.pop()
1489 if n not in hasset:
1491 if n not in hasset:
1490 hasset[n] = 1
1492 hasset[n] = 1
1491 p = [p for p in revlog.parents(n) if p != nullid]
1493 p = [p for p in revlog.parents(n) if p != nullid]
1492 parentlst.extend(p)
1494 parentlst.extend(p)
1493 for n in hasset:
1495 for n in hasset:
1494 msngset.pop(n, None)
1496 msngset.pop(n, None)
1495
1497
1496 # This is a function generating function used to set up an environment
1498 # This is a function generating function used to set up an environment
1497 # for the inner function to execute in.
1499 # for the inner function to execute in.
1498 def manifest_and_file_collector(changedfileset):
1500 def manifest_and_file_collector(changedfileset):
1499 # This is an information gathering function that gathers
1501 # This is an information gathering function that gathers
1500 # information from each changeset node that goes out as part of
1502 # information from each changeset node that goes out as part of
1501 # the changegroup. The information gathered is a list of which
1503 # the changegroup. The information gathered is a list of which
1502 # manifest nodes are potentially required (the recipient may
1504 # manifest nodes are potentially required (the recipient may
1503 # already have them) and total list of all files which were
1505 # already have them) and total list of all files which were
1504 # changed in any changeset in the changegroup.
1506 # changed in any changeset in the changegroup.
1505 #
1507 #
1506 # We also remember the first changenode we saw any manifest
1508 # We also remember the first changenode we saw any manifest
1507 # referenced by so we can later determine which changenode 'owns'
1509 # referenced by so we can later determine which changenode 'owns'
1508 # the manifest.
1510 # the manifest.
1509 def collect_manifests_and_files(clnode):
1511 def collect_manifests_and_files(clnode):
1510 c = cl.read(clnode)
1512 c = cl.read(clnode)
1511 for f in c[3]:
1513 for f in c[3]:
1512 # This is to make sure we only have one instance of each
1514 # This is to make sure we only have one instance of each
1513 # filename string for each filename.
1515 # filename string for each filename.
1514 changedfileset.setdefault(f, f)
1516 changedfileset.setdefault(f, f)
1515 msng_mnfst_set.setdefault(c[0], clnode)
1517 msng_mnfst_set.setdefault(c[0], clnode)
1516 return collect_manifests_and_files
1518 return collect_manifests_and_files
1517
1519
1518 # Figure out which manifest nodes (of the ones we think might be part
1520 # Figure out which manifest nodes (of the ones we think might be part
1519 # of the changegroup) the recipient must know about and remove them
1521 # of the changegroup) the recipient must know about and remove them
1520 # from the changegroup.
1522 # from the changegroup.
1521 def prune_manifests():
1523 def prune_manifests():
1522 has_mnfst_set = {}
1524 has_mnfst_set = {}
1523 for n in msng_mnfst_set:
1525 for n in msng_mnfst_set:
1524 # If a 'missing' manifest thinks it belongs to a changenode
1526 # If a 'missing' manifest thinks it belongs to a changenode
1525 # the recipient is assumed to have, obviously the recipient
1527 # the recipient is assumed to have, obviously the recipient
1526 # must have that manifest.
1528 # must have that manifest.
1527 linknode = cl.node(mnfst.linkrev(n))
1529 linknode = cl.node(mnfst.linkrev(n))
1528 if linknode in has_cl_set:
1530 if linknode in has_cl_set:
1529 has_mnfst_set[n] = 1
1531 has_mnfst_set[n] = 1
1530 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1532 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1531
1533
1532 # Use the information collected in collect_manifests_and_files to say
1534 # Use the information collected in collect_manifests_and_files to say
1533 # which changenode any manifestnode belongs to.
1535 # which changenode any manifestnode belongs to.
1534 def lookup_manifest_link(mnfstnode):
1536 def lookup_manifest_link(mnfstnode):
1535 return msng_mnfst_set[mnfstnode]
1537 return msng_mnfst_set[mnfstnode]
1536
1538
1537 # A function generating function that sets up the initial environment
1539 # A function generating function that sets up the initial environment
1538 # the inner function.
1540 # the inner function.
1539 def filenode_collector(changedfiles):
1541 def filenode_collector(changedfiles):
1540 next_rev = [0]
1542 next_rev = [0]
1541 # This gathers information from each manifestnode included in the
1543 # This gathers information from each manifestnode included in the
1542 # changegroup about which filenodes the manifest node references
1544 # changegroup about which filenodes the manifest node references
1543 # so we can include those in the changegroup too.
1545 # so we can include those in the changegroup too.
1544 #
1546 #
1545 # It also remembers which changenode each filenode belongs to. It
1547 # It also remembers which changenode each filenode belongs to. It
1546 # does this by assuming the a filenode belongs to the changenode
1548 # does this by assuming the a filenode belongs to the changenode
1547 # the first manifest that references it belongs to.
1549 # the first manifest that references it belongs to.
1548 def collect_msng_filenodes(mnfstnode):
1550 def collect_msng_filenodes(mnfstnode):
1549 r = mnfst.rev(mnfstnode)
1551 r = mnfst.rev(mnfstnode)
1550 if r == next_rev[0]:
1552 if r == next_rev[0]:
1551 # If the last rev we looked at was the one just previous,
1553 # If the last rev we looked at was the one just previous,
1552 # we only need to see a diff.
1554 # we only need to see a diff.
1553 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1555 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1554 # For each line in the delta
1556 # For each line in the delta
1555 for dline in delta.splitlines():
1557 for dline in delta.splitlines():
1556 # get the filename and filenode for that line
1558 # get the filename and filenode for that line
1557 f, fnode = dline.split('\0')
1559 f, fnode = dline.split('\0')
1558 fnode = bin(fnode[:40])
1560 fnode = bin(fnode[:40])
1559 f = changedfiles.get(f, None)
1561 f = changedfiles.get(f, None)
1560 # And if the file is in the list of files we care
1562 # And if the file is in the list of files we care
1561 # about.
1563 # about.
1562 if f is not None:
1564 if f is not None:
1563 # Get the changenode this manifest belongs to
1565 # Get the changenode this manifest belongs to
1564 clnode = msng_mnfst_set[mnfstnode]
1566 clnode = msng_mnfst_set[mnfstnode]
1565 # Create the set of filenodes for the file if
1567 # Create the set of filenodes for the file if
1566 # there isn't one already.
1568 # there isn't one already.
1567 ndset = msng_filenode_set.setdefault(f, {})
1569 ndset = msng_filenode_set.setdefault(f, {})
1568 # And set the filenode's changelog node to the
1570 # And set the filenode's changelog node to the
1569 # manifest's if it hasn't been set already.
1571 # manifest's if it hasn't been set already.
1570 ndset.setdefault(fnode, clnode)
1572 ndset.setdefault(fnode, clnode)
1571 else:
1573 else:
1572 # Otherwise we need a full manifest.
1574 # Otherwise we need a full manifest.
1573 m = mnfst.read(mnfstnode)
1575 m = mnfst.read(mnfstnode)
1574 # For every file in we care about.
1576 # For every file in we care about.
1575 for f in changedfiles:
1577 for f in changedfiles:
1576 fnode = m.get(f, None)
1578 fnode = m.get(f, None)
1577 # If it's in the manifest
1579 # If it's in the manifest
1578 if fnode is not None:
1580 if fnode is not None:
1579 # See comments above.
1581 # See comments above.
1580 clnode = msng_mnfst_set[mnfstnode]
1582 clnode = msng_mnfst_set[mnfstnode]
1581 ndset = msng_filenode_set.setdefault(f, {})
1583 ndset = msng_filenode_set.setdefault(f, {})
1582 ndset.setdefault(fnode, clnode)
1584 ndset.setdefault(fnode, clnode)
1583 # Remember the revision we hope to see next.
1585 # Remember the revision we hope to see next.
1584 next_rev[0] = r + 1
1586 next_rev[0] = r + 1
1585 return collect_msng_filenodes
1587 return collect_msng_filenodes
1586
1588
1587 # We have a list of filenodes we think we need for a file, lets remove
1589 # We have a list of filenodes we think we need for a file, lets remove
1588 # all those we now the recipient must have.
1590 # all those we now the recipient must have.
1589 def prune_filenodes(f, filerevlog):
1591 def prune_filenodes(f, filerevlog):
1590 msngset = msng_filenode_set[f]
1592 msngset = msng_filenode_set[f]
1591 hasset = {}
1593 hasset = {}
1592 # If a 'missing' filenode thinks it belongs to a changenode we
1594 # If a 'missing' filenode thinks it belongs to a changenode we
1593 # assume the recipient must have, then the recipient must have
1595 # assume the recipient must have, then the recipient must have
1594 # that filenode.
1596 # that filenode.
1595 for n in msngset:
1597 for n in msngset:
1596 clnode = cl.node(filerevlog.linkrev(n))
1598 clnode = cl.node(filerevlog.linkrev(n))
1597 if clnode in has_cl_set:
1599 if clnode in has_cl_set:
1598 hasset[n] = 1
1600 hasset[n] = 1
1599 prune_parents(filerevlog, hasset, msngset)
1601 prune_parents(filerevlog, hasset, msngset)
1600
1602
1601 # A function generator function that sets up the a context for the
1603 # A function generator function that sets up the a context for the
1602 # inner function.
1604 # inner function.
1603 def lookup_filenode_link_func(fname):
1605 def lookup_filenode_link_func(fname):
1604 msngset = msng_filenode_set[fname]
1606 msngset = msng_filenode_set[fname]
1605 # Lookup the changenode the filenode belongs to.
1607 # Lookup the changenode the filenode belongs to.
1606 def lookup_filenode_link(fnode):
1608 def lookup_filenode_link(fnode):
1607 return msngset[fnode]
1609 return msngset[fnode]
1608 return lookup_filenode_link
1610 return lookup_filenode_link
1609
1611
1610 # Now that we have all theses utility functions to help out and
1612 # Now that we have all theses utility functions to help out and
1611 # logically divide up the task, generate the group.
1613 # logically divide up the task, generate the group.
1612 def gengroup():
1614 def gengroup():
1613 # The set of changed files starts empty.
1615 # The set of changed files starts empty.
1614 changedfiles = {}
1616 changedfiles = {}
1615 # Create a changenode group generator that will call our functions
1617 # Create a changenode group generator that will call our functions
1616 # back to lookup the owning changenode and collect information.
1618 # back to lookup the owning changenode and collect information.
1617 group = cl.group(msng_cl_lst, identity,
1619 group = cl.group(msng_cl_lst, identity,
1618 manifest_and_file_collector(changedfiles))
1620 manifest_and_file_collector(changedfiles))
1619 for chnk in group:
1621 for chnk in group:
1620 yield chnk
1622 yield chnk
1621
1623
1622 # The list of manifests has been collected by the generator
1624 # The list of manifests has been collected by the generator
1623 # calling our functions back.
1625 # calling our functions back.
1624 prune_manifests()
1626 prune_manifests()
1625 msng_mnfst_lst = msng_mnfst_set.keys()
1627 msng_mnfst_lst = msng_mnfst_set.keys()
1626 # Sort the manifestnodes by revision number.
1628 # Sort the manifestnodes by revision number.
1627 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1629 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1628 # Create a generator for the manifestnodes that calls our lookup
1630 # Create a generator for the manifestnodes that calls our lookup
1629 # and data collection functions back.
1631 # and data collection functions back.
1630 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1632 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1631 filenode_collector(changedfiles))
1633 filenode_collector(changedfiles))
1632 for chnk in group:
1634 for chnk in group:
1633 yield chnk
1635 yield chnk
1634
1636
1635 # These are no longer needed, dereference and toss the memory for
1637 # These are no longer needed, dereference and toss the memory for
1636 # them.
1638 # them.
1637 msng_mnfst_lst = None
1639 msng_mnfst_lst = None
1638 msng_mnfst_set.clear()
1640 msng_mnfst_set.clear()
1639
1641
1640 changedfiles = changedfiles.keys()
1642 changedfiles = changedfiles.keys()
1641 changedfiles.sort()
1643 changedfiles.sort()
1642 # Go through all our files in order sorted by name.
1644 # Go through all our files in order sorted by name.
1643 for fname in changedfiles:
1645 for fname in changedfiles:
1644 filerevlog = self.file(fname)
1646 filerevlog = self.file(fname)
1645 # Toss out the filenodes that the recipient isn't really
1647 # Toss out the filenodes that the recipient isn't really
1646 # missing.
1648 # missing.
1647 if msng_filenode_set.has_key(fname):
1649 if msng_filenode_set.has_key(fname):
1648 prune_filenodes(fname, filerevlog)
1650 prune_filenodes(fname, filerevlog)
1649 msng_filenode_lst = msng_filenode_set[fname].keys()
1651 msng_filenode_lst = msng_filenode_set[fname].keys()
1650 else:
1652 else:
1651 msng_filenode_lst = []
1653 msng_filenode_lst = []
1652 # If any filenodes are left, generate the group for them,
1654 # If any filenodes are left, generate the group for them,
1653 # otherwise don't bother.
1655 # otherwise don't bother.
1654 if len(msng_filenode_lst) > 0:
1656 if len(msng_filenode_lst) > 0:
1655 yield changegroup.genchunk(fname)
1657 yield changegroup.genchunk(fname)
1656 # Sort the filenodes by their revision #
1658 # Sort the filenodes by their revision #
1657 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1659 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1658 # Create a group generator and only pass in a changenode
1660 # Create a group generator and only pass in a changenode
1659 # lookup function as we need to collect no information
1661 # lookup function as we need to collect no information
1660 # from filenodes.
1662 # from filenodes.
1661 group = filerevlog.group(msng_filenode_lst,
1663 group = filerevlog.group(msng_filenode_lst,
1662 lookup_filenode_link_func(fname))
1664 lookup_filenode_link_func(fname))
1663 for chnk in group:
1665 for chnk in group:
1664 yield chnk
1666 yield chnk
1665 if msng_filenode_set.has_key(fname):
1667 if msng_filenode_set.has_key(fname):
1666 # Don't need this anymore, toss it to free memory.
1668 # Don't need this anymore, toss it to free memory.
1667 del msng_filenode_set[fname]
1669 del msng_filenode_set[fname]
1668 # Signal that no more groups are left.
1670 # Signal that no more groups are left.
1669 yield changegroup.closechunk()
1671 yield changegroup.closechunk()
1670
1672
1671 if msng_cl_lst:
1673 if msng_cl_lst:
1672 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1674 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1673
1675
1674 return util.chunkbuffer(gengroup())
1676 return util.chunkbuffer(gengroup())
1675
1677
1676 def changegroup(self, basenodes, source):
1678 def changegroup(self, basenodes, source):
1677 """Generate a changegroup of all nodes that we have that a recipient
1679 """Generate a changegroup of all nodes that we have that a recipient
1678 doesn't.
1680 doesn't.
1679
1681
1680 This is much easier than the previous function as we can assume that
1682 This is much easier than the previous function as we can assume that
1681 the recipient has any changenode we aren't sending them."""
1683 the recipient has any changenode we aren't sending them."""
1682
1684
1683 self.hook('preoutgoing', throw=True, source=source)
1685 self.hook('preoutgoing', throw=True, source=source)
1684
1686
1685 cl = self.changelog
1687 cl = self.changelog
1686 nodes = cl.nodesbetween(basenodes, None)[0]
1688 nodes = cl.nodesbetween(basenodes, None)[0]
1687 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1689 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1688 self.changegroupinfo(nodes)
1690 self.changegroupinfo(nodes)
1689
1691
1690 def identity(x):
1692 def identity(x):
1691 return x
1693 return x
1692
1694
1693 def gennodelst(revlog):
1695 def gennodelst(revlog):
1694 for r in xrange(0, revlog.count()):
1696 for r in xrange(0, revlog.count()):
1695 n = revlog.node(r)
1697 n = revlog.node(r)
1696 if revlog.linkrev(n) in revset:
1698 if revlog.linkrev(n) in revset:
1697 yield n
1699 yield n
1698
1700
1699 def changed_file_collector(changedfileset):
1701 def changed_file_collector(changedfileset):
1700 def collect_changed_files(clnode):
1702 def collect_changed_files(clnode):
1701 c = cl.read(clnode)
1703 c = cl.read(clnode)
1702 for fname in c[3]:
1704 for fname in c[3]:
1703 changedfileset[fname] = 1
1705 changedfileset[fname] = 1
1704 return collect_changed_files
1706 return collect_changed_files
1705
1707
1706 def lookuprevlink_func(revlog):
1708 def lookuprevlink_func(revlog):
1707 def lookuprevlink(n):
1709 def lookuprevlink(n):
1708 return cl.node(revlog.linkrev(n))
1710 return cl.node(revlog.linkrev(n))
1709 return lookuprevlink
1711 return lookuprevlink
1710
1712
1711 def gengroup():
1713 def gengroup():
1712 # construct a list of all changed files
1714 # construct a list of all changed files
1713 changedfiles = {}
1715 changedfiles = {}
1714
1716
1715 for chnk in cl.group(nodes, identity,
1717 for chnk in cl.group(nodes, identity,
1716 changed_file_collector(changedfiles)):
1718 changed_file_collector(changedfiles)):
1717 yield chnk
1719 yield chnk
1718 changedfiles = changedfiles.keys()
1720 changedfiles = changedfiles.keys()
1719 changedfiles.sort()
1721 changedfiles.sort()
1720
1722
1721 mnfst = self.manifest
1723 mnfst = self.manifest
1722 nodeiter = gennodelst(mnfst)
1724 nodeiter = gennodelst(mnfst)
1723 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1725 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1724 yield chnk
1726 yield chnk
1725
1727
1726 for fname in changedfiles:
1728 for fname in changedfiles:
1727 filerevlog = self.file(fname)
1729 filerevlog = self.file(fname)
1728 nodeiter = gennodelst(filerevlog)
1730 nodeiter = gennodelst(filerevlog)
1729 nodeiter = list(nodeiter)
1731 nodeiter = list(nodeiter)
1730 if nodeiter:
1732 if nodeiter:
1731 yield changegroup.genchunk(fname)
1733 yield changegroup.genchunk(fname)
1732 lookup = lookuprevlink_func(filerevlog)
1734 lookup = lookuprevlink_func(filerevlog)
1733 for chnk in filerevlog.group(nodeiter, lookup):
1735 for chnk in filerevlog.group(nodeiter, lookup):
1734 yield chnk
1736 yield chnk
1735
1737
1736 yield changegroup.closechunk()
1738 yield changegroup.closechunk()
1737
1739
1738 if nodes:
1740 if nodes:
1739 self.hook('outgoing', node=hex(nodes[0]), source=source)
1741 self.hook('outgoing', node=hex(nodes[0]), source=source)
1740
1742
1741 return util.chunkbuffer(gengroup())
1743 return util.chunkbuffer(gengroup())
1742
1744
1743 def addchangegroup(self, source, srctype, url):
1745 def addchangegroup(self, source, srctype, url):
1744 """add changegroup to repo.
1746 """add changegroup to repo.
1745
1747
1746 return values:
1748 return values:
1747 - nothing changed or no source: 0
1749 - nothing changed or no source: 0
1748 - more heads than before: 1+added heads (2..n)
1750 - more heads than before: 1+added heads (2..n)
1749 - less heads than before: -1-removed heads (-2..-n)
1751 - less heads than before: -1-removed heads (-2..-n)
1750 - number of heads stays the same: 1
1752 - number of heads stays the same: 1
1751 """
1753 """
1752 def csmap(x):
1754 def csmap(x):
1753 self.ui.debug(_("add changeset %s\n") % short(x))
1755 self.ui.debug(_("add changeset %s\n") % short(x))
1754 return cl.count()
1756 return cl.count()
1755
1757
1756 def revmap(x):
1758 def revmap(x):
1757 return cl.rev(x)
1759 return cl.rev(x)
1758
1760
1759 if not source:
1761 if not source:
1760 return 0
1762 return 0
1761
1763
1762 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1764 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1763
1765
1764 changesets = files = revisions = 0
1766 changesets = files = revisions = 0
1765
1767
1766 tr = self.transaction()
1768 tr = self.transaction()
1767
1769
1768 # write changelog data to temp files so concurrent readers will not see
1770 # write changelog data to temp files so concurrent readers will not see
1769 # inconsistent view
1771 # inconsistent view
1770 cl = None
1772 cl = None
1771 try:
1773 try:
1772 cl = appendfile.appendchangelog(self.sopener,
1774 cl = appendfile.appendchangelog(self.sopener,
1773 self.changelog.version)
1775 self.changelog.version)
1774
1776
1775 oldheads = len(cl.heads())
1777 oldheads = len(cl.heads())
1776
1778
1777 # pull off the changeset group
1779 # pull off the changeset group
1778 self.ui.status(_("adding changesets\n"))
1780 self.ui.status(_("adding changesets\n"))
1779 cor = cl.count() - 1
1781 cor = cl.count() - 1
1780 chunkiter = changegroup.chunkiter(source)
1782 chunkiter = changegroup.chunkiter(source)
1781 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1783 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1782 raise util.Abort(_("received changelog group is empty"))
1784 raise util.Abort(_("received changelog group is empty"))
1783 cnr = cl.count() - 1
1785 cnr = cl.count() - 1
1784 changesets = cnr - cor
1786 changesets = cnr - cor
1785
1787
1786 # pull off the manifest group
1788 # pull off the manifest group
1787 self.ui.status(_("adding manifests\n"))
1789 self.ui.status(_("adding manifests\n"))
1788 chunkiter = changegroup.chunkiter(source)
1790 chunkiter = changegroup.chunkiter(source)
1789 # no need to check for empty manifest group here:
1791 # no need to check for empty manifest group here:
1790 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1792 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1791 # no new manifest will be created and the manifest group will
1793 # no new manifest will be created and the manifest group will
1792 # be empty during the pull
1794 # be empty during the pull
1793 self.manifest.addgroup(chunkiter, revmap, tr)
1795 self.manifest.addgroup(chunkiter, revmap, tr)
1794
1796
1795 # process the files
1797 # process the files
1796 self.ui.status(_("adding file changes\n"))
1798 self.ui.status(_("adding file changes\n"))
1797 while 1:
1799 while 1:
1798 f = changegroup.getchunk(source)
1800 f = changegroup.getchunk(source)
1799 if not f:
1801 if not f:
1800 break
1802 break
1801 self.ui.debug(_("adding %s revisions\n") % f)
1803 self.ui.debug(_("adding %s revisions\n") % f)
1802 fl = self.file(f)
1804 fl = self.file(f)
1803 o = fl.count()
1805 o = fl.count()
1804 chunkiter = changegroup.chunkiter(source)
1806 chunkiter = changegroup.chunkiter(source)
1805 if fl.addgroup(chunkiter, revmap, tr) is None:
1807 if fl.addgroup(chunkiter, revmap, tr) is None:
1806 raise util.Abort(_("received file revlog group is empty"))
1808 raise util.Abort(_("received file revlog group is empty"))
1807 revisions += fl.count() - o
1809 revisions += fl.count() - o
1808 files += 1
1810 files += 1
1809
1811
1810 cl.writedata()
1812 cl.writedata()
1811 finally:
1813 finally:
1812 if cl:
1814 if cl:
1813 cl.cleanup()
1815 cl.cleanup()
1814
1816
1815 # make changelog see real files again
1817 # make changelog see real files again
1816 self.changelog = changelog.changelog(self.sopener,
1818 self.changelog = changelog.changelog(self.sopener,
1817 self.changelog.version)
1819 self.changelog.version)
1818 self.changelog.checkinlinesize(tr)
1820 self.changelog.checkinlinesize(tr)
1819
1821
1820 newheads = len(self.changelog.heads())
1822 newheads = len(self.changelog.heads())
1821 heads = ""
1823 heads = ""
1822 if oldheads and newheads != oldheads:
1824 if oldheads and newheads != oldheads:
1823 heads = _(" (%+d heads)") % (newheads - oldheads)
1825 heads = _(" (%+d heads)") % (newheads - oldheads)
1824
1826
1825 self.ui.status(_("added %d changesets"
1827 self.ui.status(_("added %d changesets"
1826 " with %d changes to %d files%s\n")
1828 " with %d changes to %d files%s\n")
1827 % (changesets, revisions, files, heads))
1829 % (changesets, revisions, files, heads))
1828
1830
1829 if changesets > 0:
1831 if changesets > 0:
1830 self.hook('pretxnchangegroup', throw=True,
1832 self.hook('pretxnchangegroup', throw=True,
1831 node=hex(self.changelog.node(cor+1)), source=srctype,
1833 node=hex(self.changelog.node(cor+1)), source=srctype,
1832 url=url)
1834 url=url)
1833
1835
1834 tr.close()
1836 tr.close()
1835
1837
1836 if changesets > 0:
1838 if changesets > 0:
1837 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1839 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1838 source=srctype, url=url)
1840 source=srctype, url=url)
1839
1841
1840 for i in xrange(cor + 1, cnr + 1):
1842 for i in xrange(cor + 1, cnr + 1):
1841 self.hook("incoming", node=hex(self.changelog.node(i)),
1843 self.hook("incoming", node=hex(self.changelog.node(i)),
1842 source=srctype, url=url)
1844 source=srctype, url=url)
1843
1845
1844 # never return 0 here:
1846 # never return 0 here:
1845 if newheads < oldheads:
1847 if newheads < oldheads:
1846 return newheads - oldheads - 1
1848 return newheads - oldheads - 1
1847 else:
1849 else:
1848 return newheads - oldheads + 1
1850 return newheads - oldheads + 1
1849
1851
1850
1852
1851 def stream_in(self, remote):
1853 def stream_in(self, remote):
1852 fp = remote.stream_out()
1854 fp = remote.stream_out()
1853 l = fp.readline()
1855 l = fp.readline()
1854 try:
1856 try:
1855 resp = int(l)
1857 resp = int(l)
1856 except ValueError:
1858 except ValueError:
1857 raise util.UnexpectedOutput(
1859 raise util.UnexpectedOutput(
1858 _('Unexpected response from remote server:'), l)
1860 _('Unexpected response from remote server:'), l)
1859 if resp == 1:
1861 if resp == 1:
1860 raise util.Abort(_('operation forbidden by server'))
1862 raise util.Abort(_('operation forbidden by server'))
1861 elif resp == 2:
1863 elif resp == 2:
1862 raise util.Abort(_('locking the remote repository failed'))
1864 raise util.Abort(_('locking the remote repository failed'))
1863 elif resp != 0:
1865 elif resp != 0:
1864 raise util.Abort(_('the server sent an unknown error code'))
1866 raise util.Abort(_('the server sent an unknown error code'))
1865 self.ui.status(_('streaming all changes\n'))
1867 self.ui.status(_('streaming all changes\n'))
1866 l = fp.readline()
1868 l = fp.readline()
1867 try:
1869 try:
1868 total_files, total_bytes = map(int, l.split(' ', 1))
1870 total_files, total_bytes = map(int, l.split(' ', 1))
1869 except ValueError, TypeError:
1871 except ValueError, TypeError:
1870 raise util.UnexpectedOutput(
1872 raise util.UnexpectedOutput(
1871 _('Unexpected response from remote server:'), l)
1873 _('Unexpected response from remote server:'), l)
1872 self.ui.status(_('%d files to transfer, %s of data\n') %
1874 self.ui.status(_('%d files to transfer, %s of data\n') %
1873 (total_files, util.bytecount(total_bytes)))
1875 (total_files, util.bytecount(total_bytes)))
1874 start = time.time()
1876 start = time.time()
1875 for i in xrange(total_files):
1877 for i in xrange(total_files):
1876 # XXX doesn't support '\n' or '\r' in filenames
1878 # XXX doesn't support '\n' or '\r' in filenames
1877 l = fp.readline()
1879 l = fp.readline()
1878 try:
1880 try:
1879 name, size = l.split('\0', 1)
1881 name, size = l.split('\0', 1)
1880 size = int(size)
1882 size = int(size)
1881 except ValueError, TypeError:
1883 except ValueError, TypeError:
1882 raise util.UnexpectedOutput(
1884 raise util.UnexpectedOutput(
1883 _('Unexpected response from remote server:'), l)
1885 _('Unexpected response from remote server:'), l)
1884 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1886 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1885 ofp = self.sopener(name, 'w')
1887 ofp = self.sopener(name, 'w')
1886 for chunk in util.filechunkiter(fp, limit=size):
1888 for chunk in util.filechunkiter(fp, limit=size):
1887 ofp.write(chunk)
1889 ofp.write(chunk)
1888 ofp.close()
1890 ofp.close()
1889 elapsed = time.time() - start
1891 elapsed = time.time() - start
1890 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1892 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1891 (util.bytecount(total_bytes), elapsed,
1893 (util.bytecount(total_bytes), elapsed,
1892 util.bytecount(total_bytes / elapsed)))
1894 util.bytecount(total_bytes / elapsed)))
1893 self.reload()
1895 self.reload()
1894 return len(self.heads()) + 1
1896 return len(self.heads()) + 1
1895
1897
1896 def clone(self, remote, heads=[], stream=False):
1898 def clone(self, remote, heads=[], stream=False):
1897 '''clone remote repository.
1899 '''clone remote repository.
1898
1900
1899 keyword arguments:
1901 keyword arguments:
1900 heads: list of revs to clone (forces use of pull)
1902 heads: list of revs to clone (forces use of pull)
1901 stream: use streaming clone if possible'''
1903 stream: use streaming clone if possible'''
1902
1904
1903 # now, all clients that can request uncompressed clones can
1905 # now, all clients that can request uncompressed clones can
1904 # read repo formats supported by all servers that can serve
1906 # read repo formats supported by all servers that can serve
1905 # them.
1907 # them.
1906
1908
1907 # if revlog format changes, client will have to check version
1909 # if revlog format changes, client will have to check version
1908 # and format flags on "stream" capability, and use
1910 # and format flags on "stream" capability, and use
1909 # uncompressed only if compatible.
1911 # uncompressed only if compatible.
1910
1912
1911 if stream and not heads and remote.capable('stream'):
1913 if stream and not heads and remote.capable('stream'):
1912 return self.stream_in(remote)
1914 return self.stream_in(remote)
1913 return self.pull(remote, heads)
1915 return self.pull(remote, heads)
1914
1916
1915 # used to avoid circular references so destructors work
1917 # used to avoid circular references so destructors work
1916 def aftertrans(files):
1918 def aftertrans(files):
1917 renamefiles = [tuple(t) for t in files]
1919 renamefiles = [tuple(t) for t in files]
1918 def a():
1920 def a():
1919 for src, dest in renamefiles:
1921 for src, dest in renamefiles:
1920 util.rename(src, dest)
1922 util.rename(src, dest)
1921 return a
1923 return a
1922
1924
1923 def instance(ui, path, create):
1925 def instance(ui, path, create):
1924 return localrepository(ui, util.drop_scheme('file', path), create)
1926 return localrepository(ui, util.drop_scheme('file', path), create)
1925
1927
1926 def islocal(path):
1928 def islocal(path):
1927 return True
1929 return True
General Comments 0
You need to be logged in to leave comments. Login now